summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
commit9ec46d47bedefa10bdaaa8a587ddb1851ef396ec (patch)
treeba7545ee99b384a6fc3e5ea028ae4c643648d683
parentInitial commit. (diff)
downloadgolang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.tar.xz
golang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.zip
Adding upstream version 1.33.5+ds1.upstream/1.33.5+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.cirrus.yml348
-rw-r--r--.github/ISSUE_TEMPLATE.md71
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md66
-rw-r--r--.github/renovate.json563
-rw-r--r--.github/workflows/check_cirrus_cron.yml20
-rw-r--r--.github/workflows/discussion_lock.yml20
-rw-r--r--.github/workflows/pr.yml23
-rw-r--r--.github/workflows/rerun_cirrus_cron.yml19
-rw-r--r--.github/workflows/stale.yml31
-rw-r--r--.gitignore14
-rw-r--r--.golangci.yml13
-rw-r--r--.packit.yaml54
-rw-r--r--CHANGELOG.md3071
-rw-r--r--CODE-OF-CONDUCT.md3
-rw-r--r--CONTRIBUTING.md177
-rw-r--r--LICENSE201
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile217
-rw-r--r--OWNERS28
-rw-r--r--README.md132
-rw-r--r--SECURITY.md3
-rw-r--r--add.go727
-rw-r--r--bind/mount.go304
-rw-r--r--bind/mount_unsupported.go13
-rw-r--r--bind/util.go27
-rwxr-xr-xbtrfs_installed_tag.sh7
-rwxr-xr-xbtrfs_tag.sh7
-rw-r--r--buildah.go551
-rw-r--r--buildah_test.go75
-rw-r--r--changelog.txt2970
-rw-r--r--chroot/pty_posix.go64
-rw-r--r--chroot/pty_ptmx.go47
-rw-r--r--chroot/pty_unsupported.go13
-rw-r--r--chroot/run_common.go831
-rw-r--r--chroot/run_freebsd.go269
-rw-r--r--chroot/run_linux.go711
-rw-r--r--chroot/run_test.go610
-rw-r--r--chroot/seccomp.go205
-rw-r--r--chroot/seccomp_freebsd.go15
-rw-r--r--chroot/seccomp_unsupported.go27
-rw-r--r--chroot/selinux.go24
-rw-r--r--chroot/selinux_unsupported.go20
-rw-r--r--chroot/unsupported.go16
-rw-r--r--cmd/buildah/addcopy.go284
-rw-r--r--cmd/buildah/build.go104
-rw-r--r--cmd/buildah/commit.go301
-rw-r--r--cmd/buildah/common.go248
-rw-r--r--cmd/buildah/common_test.go136
-rw-r--r--cmd/buildah/config.go443
-rw-r--r--cmd/buildah/containers.go344
-rw-r--r--cmd/buildah/containers_test.go145
-rw-r--r--cmd/buildah/dumpbolt.go74
-rw-r--r--cmd/buildah/from.go355
-rw-r--r--cmd/buildah/images.go348
-rw-r--r--cmd/buildah/images_test.go70
-rw-r--r--cmd/buildah/info.go102
-rw-r--r--cmd/buildah/inspect.go135
-rw-r--r--cmd/buildah/login.go73
-rw-r--r--cmd/buildah/logout.go59
-rw-r--r--cmd/buildah/main.go253
-rw-r--r--cmd/buildah/manifest.go956
-rw-r--r--cmd/buildah/mkcw.go76
-rw-r--r--cmd/buildah/mount.go143
-rw-r--r--cmd/buildah/passwd.go34
-rw-r--r--cmd/buildah/prune.go95
-rw-r--r--cmd/buildah/pull.go155
-rw-r--r--cmd/buildah/push.go271
-rw-r--r--cmd/buildah/rename.go59
-rw-r--r--cmd/buildah/rm.go92
-rw-r--r--cmd/buildah/rmi.go106
-rw-r--r--cmd/buildah/run.go199
-rw-r--r--cmd/buildah/source.go127
-rw-r--r--cmd/buildah/tag.go57
-rw-r--r--cmd/buildah/umount.go98
-rw-r--r--cmd/buildah/unshare.go150
-rw-r--r--cmd/buildah/unshare_unsupported.go22
-rw-r--r--cmd/buildah/version.go115
-rw-r--r--commit.go428
-rw-r--r--common.go88
-rw-r--r--config.go747
-rw-r--r--contrib/buildahimage/Containerfile113
-rw-r--r--contrib/buildahimage/README.md86
-rw-r--r--contrib/buildahimage/containers.conf2
-rw-r--r--contrib/cirrus/bors-ng.pngbin0 -> 80194 bytes
-rwxr-xr-xcontrib/cirrus/build.sh25
-rwxr-xr-xcontrib/cirrus/lib.sh342
-rwxr-xr-xcontrib/cirrus/logcollector.sh56
-rwxr-xr-xcontrib/cirrus/setup.sh109
-rwxr-xr-xcontrib/cirrus/test.sh90
-rw-r--r--contrib/cirrus/timestamp.awk20
-rw-r--r--contrib/completions/bash/buildah1257
-rw-r--r--contrib/docker/Dockerfile9
-rw-r--r--convertcw.go217
-rw-r--r--convertcw_test.go163
-rw-r--r--copier/copier.go1986
-rw-r--r--copier/copier_linux_test.go195
-rw-r--r--copier/copier_test.go1873
-rw-r--r--copier/copier_unix_test.go77
-rw-r--r--copier/hardlink_not_uint64.go15
-rw-r--r--copier/hardlink_uint64.go15
-rw-r--r--copier/hardlink_unix.go32
-rw-r--r--copier/hardlink_windows.go17
-rw-r--r--copier/mknod_int.go12
-rw-r--r--copier/mknod_uint64.go12
-rw-r--r--copier/syscall_unix.go92
-rw-r--r--copier/syscall_windows.go88
-rw-r--r--copier/xattrs.go101
-rw-r--r--copier/xattrs_test.go57
-rw-r--r--copier/xattrs_unsupported.go15
-rw-r--r--define/build.go336
-rw-r--r--define/isolation.go32
-rw-r--r--define/mount_freebsd.go17
-rw-r--r--define/mount_linux.go17
-rw-r--r--define/mount_unsupported.go17
-rw-r--r--define/namespace.go87
-rw-r--r--define/pull.go50
-rw-r--r--define/pull_test.go13
-rw-r--r--define/types.go311
-rw-r--r--define/types_test.go29
-rw-r--r--define/types_unix.go18
-rw-r--r--define/types_unsupported.go6
-rw-r--r--delete.go15
-rw-r--r--demos/README.md74
-rwxr-xr-xdemos/buildah-bud-demo.sh85
-rwxr-xr-xdemos/buildah-scratch-demo.sh100
-rwxr-xr-xdemos/buildah_multi_stage.sh114
-rwxr-xr-xdemos/docker-compatibility-demo.sh83
-rw-r--r--developmentplan.md13
-rw-r--r--digester.go269
-rw-r--r--digester_test.go306
-rw-r--r--docker/AUTHORS1788
-rw-r--r--docker/types.go258
-rw-r--r--docs/Makefile33
-rw-r--r--docs/buildah-add.1.md141
-rw-r--r--docs/buildah-build.1.md1316
-rw-r--r--docs/buildah-commit.1.md263
-rw-r--r--docs/buildah-config.1.md274
-rw-r--r--docs/buildah-containers.1.md123
-rw-r--r--docs/buildah-copy.1.md139
-rw-r--r--docs/buildah-from.1.md743
-rw-r--r--docs/buildah-images.1.md137
-rw-r--r--docs/buildah-info.1.md73
-rw-r--r--docs/buildah-inspect.1.md39
-rw-r--r--docs/buildah-login.1.md114
-rw-r--r--docs/buildah-logout.1.md60
-rw-r--r--docs/buildah-manifest-add.1.md109
-rw-r--r--docs/buildah-manifest-annotate.1.md66
-rw-r--r--docs/buildah-manifest-create.1.md62
-rw-r--r--docs/buildah-manifest-exists.1.md40
-rw-r--r--docs/buildah-manifest-inspect.1.md37
-rw-r--r--docs/buildah-manifest-push.1.md101
-rw-r--r--docs/buildah-manifest-remove.1.md27
-rw-r--r--docs/buildah-manifest-rm.1.md25
-rw-r--r--docs/buildah-manifest.1.md76
-rw-r--r--docs/buildah-mkcw.1.md78
-rw-r--r--docs/buildah-mount.1.md66
-rw-r--r--docs/buildah-prune.1.md33
-rw-r--r--docs/buildah-pull.1.md162
-rw-r--r--docs/buildah-push.1.md184
-rw-r--r--docs/buildah-rename.1.md19
-rw-r--r--docs/buildah-rm.1.md27
-rw-r--r--docs/buildah-rmi.1.md77
-rw-r--r--docs/buildah-run.1.md403
-rw-r--r--docs/buildah-source-add.1.md21
-rw-r--r--docs/buildah-source-create.1.md24
-rw-r--r--docs/buildah-source-pull.1.md32
-rw-r--r--docs/buildah-source-push.1.md31
-rw-r--r--docs/buildah-source.1.md31
-rw-r--r--docs/buildah-tag.1.md19
-rw-r--r--docs/buildah-umount.1.md27
-rw-r--r--docs/buildah-unshare.1.md63
-rw-r--r--docs/buildah-version.1.md31
-rw-r--r--docs/buildah.1.md207
-rw-r--r--docs/cni-examples/100-buildah-bridge.conf17
-rw-r--r--docs/cni-examples/200-loopback.conf5
-rw-r--r--docs/cni-examples/README.md37
-rw-r--r--docs/containertools/README.md113
-rw-r--r--docs/links/buildah-bud.11
-rw-r--r--docs/release-announcements/README.md27
-rw-r--r--docs/release-announcements/v0.12.md32
-rw-r--r--docs/release-announcements/v0.16.md45
-rw-r--r--docs/release-announcements/v1.1.md82
-rw-r--r--docs/release-announcements/v1.2.md81
-rw-r--r--docs/release-announcements/v1.3.md60
-rw-r--r--docs/release-announcements/v1.4.md82
-rw-r--r--docs/release-announcements/v1.5.md81
-rw-r--r--docs/samples/registries.conf28
-rw-r--r--docs/tutorials/01-intro.md284
-rw-r--r--docs/tutorials/02-registries-repositories.md134
-rw-r--r--docs/tutorials/03-on-build.md193
-rw-r--r--docs/tutorials/04-include-in-your-build-tool.md203
-rw-r--r--docs/tutorials/05-openshift-rootless-build.md578
-rw-r--r--docs/tutorials/README.md26
-rwxr-xr-xexamples/all-the-things.sh79
-rwxr-xr-xexamples/copy.sh105
-rwxr-xr-xexamples/lighttpd.sh18
-rw-r--r--go.mod145
-rw-r--r--go.sum616
-rw-r--r--hack/Dockerfile7
-rwxr-xr-xhack/apparmor_tag.sh4
-rwxr-xr-xhack/build_speed.sh128
-rwxr-xr-xhack/get_ci_vm.sh64
-rwxr-xr-xhack/libsubid_tag.sh29
-rwxr-xr-xhack/systemd_tag.sh7
-rwxr-xr-xhack/tree_status.sh14
-rwxr-xr-xhack/xref-helpmsgs-manpages333
-rw-r--r--image.go949
-rw-r--r--imagebuildah/build.go699
-rw-r--r--imagebuildah/executor.go1082
-rw-r--r--imagebuildah/stage_executor.go2250
-rw-r--r--imagebuildah/stage_executor_test.go100
-rw-r--r--imagebuildah/util.go22
-rw-r--r--import.go176
-rw-r--r--info.go190
-rw-r--r--install.md402
-rw-r--r--internal/config/convert.go121
-rw-r--r--internal/config/convert_test.go166
-rw-r--r--internal/config/executor.go45
-rw-r--r--internal/config/executor_test.go5
-rw-r--r--internal/config/override.go181
-rw-r--r--internal/mkcw/archive.go464
-rw-r--r--internal/mkcw/archive_test.go181
-rw-r--r--internal/mkcw/attest.go250
-rwxr-xr-xinternal/mkcw/embed/entrypoint.gzbin0 -> 405 bytes
-rw-r--r--internal/mkcw/embed/entrypoint.s16
-rw-r--r--internal/mkcw/entrypoint.go6
-rw-r--r--internal/mkcw/luks.go51
-rw-r--r--internal/mkcw/luks_test.go66
-rw-r--r--internal/mkcw/makefs.go38
-rw-r--r--internal/mkcw/types/attest.go47
-rw-r--r--internal/mkcw/types/workload.go34
-rw-r--r--internal/mkcw/workload.go223
-rw-r--r--internal/mkcw/workload_test.go62
-rw-r--r--internal/parse/parse.go79
-rw-r--r--internal/source/add.go133
-rw-r--r--internal/source/create.go70
-rw-r--r--internal/source/pull.go110
-rw-r--r--internal/source/push.go69
-rw-r--r--internal/source/source.go121
-rw-r--r--internal/tmpdir/tmpdir.go26
-rw-r--r--internal/tmpdir/tmpdir_test.go58
-rw-r--r--internal/types.go18
-rw-r--r--internal/util/util.go99
-rw-r--r--internal/volumes/volumes.go637
-rw-r--r--logos/buildah-logo-source.svg2888
-rw-r--r--logos/buildah-logo_large.pngbin0 -> 33351 bytes
-rw-r--r--logos/buildah-logo_large_transparent-bg.pngbin0 -> 30430 bytes
-rw-r--r--logos/buildah-logo_medium.pngbin0 -> 13321 bytes
-rw-r--r--logos/buildah-logo_medium_transparent-bg.pngbin0 -> 12332 bytes
-rw-r--r--logos/buildah-logo_reverse_large.pngbin0 -> 33269 bytes
-rw-r--r--logos/buildah-logo_reverse_medium.pngbin0 -> 13409 bytes
-rw-r--r--logos/buildah-logo_reverse_small.pngbin0 -> 7953 bytes
-rw-r--r--logos/buildah-logo_small.pngbin0 -> 7910 bytes
-rw-r--r--logos/buildah-logo_small_transparent-bg.pngbin0 -> 7207 bytes
-rw-r--r--logos/buildah-logomark_large.pngbin0 -> 21668 bytes
-rw-r--r--logos/buildah-logomark_large_transparent-bg.pngbin0 -> 19720 bytes
-rw-r--r--logos/buildah-logomark_medium.pngbin0 -> 11029 bytes
-rw-r--r--logos/buildah-logomark_medium_transparent-bg.pngbin0 -> 10081 bytes
-rw-r--r--logos/buildah-logomark_small.pngbin0 -> 4737 bytes
-rw-r--r--logos/buildah-logomark_small_transparent-bg.pngbin0 -> 4181 bytes
-rw-r--r--manifests/compat.go33
-rw-r--r--mount.go51
-rw-r--r--new.go355
-rw-r--r--new_test.go35
-rw-r--r--nix/default-arm64.nix85
-rw-r--r--nix/default.nix83
-rw-r--r--nix/nixpkgs.json10
-rw-r--r--nix/nixpkgs.nix9
-rw-r--r--pkg/blobcache/blobcache.go31
-rw-r--r--pkg/chrootuser/user.go116
-rw-r--r--pkg/chrootuser/user_basic.go32
-rw-r--r--pkg/chrootuser/user_test.go40
-rw-r--r--pkg/chrootuser/user_unix.go314
-rw-r--r--pkg/cli/build.go477
-rw-r--r--pkg/cli/common.go584
-rw-r--r--pkg/cli/common_test.go142
-rw-r--r--pkg/cli/exec_codes.go13
-rw-r--r--pkg/completion/completion.go23
-rw-r--r--pkg/dummy/dummy_test.go8
-rw-r--r--pkg/formats/formats.go166
-rw-r--r--pkg/formats/formats_test.go44
-rw-r--r--pkg/formats/templates.go82
-rw-r--r--pkg/jail/jail.go180
-rw-r--r--pkg/jail/jail_int32.go20
-rw-r--r--pkg/jail/jail_int64.go19
-rw-r--r--pkg/manifests/compat.go28
-rw-r--r--pkg/overlay/overlay.go242
-rw-r--r--pkg/overlay/overlay_freebsd.go31
-rw-r--r--pkg/overlay/overlay_linux.go80
-rw-r--r--pkg/parse/parse.go1198
-rw-r--r--pkg/parse/parse_test.go224
-rw-r--r--pkg/parse/parse_unix.go49
-rw-r--r--pkg/parse/parse_unsupported.go18
-rw-r--r--pkg/rusage/rusage.go48
-rw-r--r--pkg/rusage/rusage_test.go48
-rw-r--r--pkg/rusage/rusage_unix.go35
-rw-r--r--pkg/rusage/rusage_unsupported.go18
-rw-r--r--pkg/sshagent/sshagent.go254
-rw-r--r--pkg/sshagent/sshagent_test.go55
-rw-r--r--pkg/supplemented/compat.go26
-rw-r--r--pkg/umask/umask.go13
-rw-r--r--pkg/util/resource_unix.go38
-rw-r--r--pkg/util/resource_unix_test.go32
-rw-r--r--pkg/util/resource_windows.go16
-rw-r--r--pkg/util/test/test1/Containerfile1
-rw-r--r--pkg/util/test/test1/Dockerfile1
-rw-r--r--pkg/util/test/test2/Dockerfile1
-rw-r--r--pkg/util/uptime_darwin.go10
-rw-r--r--pkg/util/uptime_freebsd.go25
-rw-r--r--pkg/util/uptime_linux.go28
-rw-r--r--pkg/util/uptime_windows.go10
-rw-r--r--pkg/util/util.go82
-rw-r--r--pkg/util/util_test.go32
-rw-r--r--pkg/util/version_unix.go19
-rw-r--r--pkg/util/version_windows.go10
-rw-r--r--pkg/volumes/volumes.go13
-rw-r--r--pull.go100
-rw-r--r--push.go155
-rwxr-xr-xrelease.sh81
-rw-r--r--rpm/buildah.spec173
-rw-r--r--run.go209
-rw-r--r--run_common.go1961
-rw-r--r--run_freebsd.go584
-rw-r--r--run_linux.go1240
-rw-r--r--run_test.go84
-rw-r--r--run_unix.go43
-rw-r--r--run_unsupported.go29
-rw-r--r--seccomp.go36
-rw-r--r--seccomp_unsupported.go15
-rw-r--r--selinux.go42
-rw-r--r--selinux_unsupported.go18
-rw-r--r--tests/add.bats308
-rw-r--r--tests/authenticate.bats169
-rw-r--r--tests/basic.bats135
-rw-r--r--tests/blobcache.bats210
-rw-r--r--tests/bud.bats6529
-rw-r--r--tests/bud/add-checksum/Containerfile2
-rw-r--r--tests/bud/add-checksum/Containerfile.bad2
-rw-r--r--tests/bud/add-checksum/Containerfile.bad-checksum2
-rw-r--r--tests/bud/add-chmod/Dockerfile6
-rw-r--r--tests/bud/add-chmod/Dockerfile.bad6
-rw-r--r--tests/bud/add-chmod/Dockerfile.combined6
-rw-r--r--tests/bud/add-chmod/addchmod.txt1
-rw-r--r--tests/bud/add-chown/Dockerfile6
-rw-r--r--tests/bud/add-chown/Dockerfile.bad6
-rw-r--r--tests/bud/add-chown/addchown.txt1
-rw-r--r--tests/bud/add-create-absolute-path/Dockerfile3
-rw-r--r--tests/bud/add-create-absolute-path/distutils.cfg0
-rw-r--r--tests/bud/add-create-relative-path/Dockerfile3
-rw-r--r--tests/bud/add-create-relative-path/distutils.cfg0
-rw-r--r--tests/bud/add-file/Dockerfile8
-rw-r--r--tests/bud/add-file/file0
-rw-r--r--tests/bud/add-file/file20
-rw-r--r--tests/bud/add-run-dir/Dockerfile2
-rw-r--r--tests/bud/addtl-tags/Dockerfile2
-rw-r--r--tests/bud/all-platform/Containerfile.default-arg2
-rw-r--r--tests/bud/base-with-arg/Containerfile20
-rw-r--r--tests/bud/base-with-arg/Containerfile211
-rw-r--r--tests/bud/base-with-arg/Containerfilebad11
-rw-r--r--tests/bud/base-with-arg/first.args1
-rw-r--r--tests/bud/base-with-arg/second.args1
-rw-r--r--tests/bud/base-with-labels/Containerfile2
-rw-r--r--tests/bud/build-arg/Dockerfile3
-rw-r--r--tests/bud/build-arg/Dockerfile22
-rw-r--r--tests/bud/build-arg/Dockerfile315
-rw-r--r--tests/bud/build-arg/Dockerfile43
-rw-r--r--tests/bud/build-with-from/Containerfile4
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebindfrom3
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebindfromrelative4
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebindfromwithemptyfrom4
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebindfromwithoutsource4
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebindfromwriteable3
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebuildkitbase2
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilebuildkitbaserelative3
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilecachefrom8
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilecachefromimage5
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilecachemultiplefrom10
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilemultistagefrom6
-rw-r--r--tests/bud/buildkit-mount-from/Dockerfilemultistagefromcache11
-rw-r--r--tests/bud/buildkit-mount-from/Dockermultistagefrom6
-rw-r--r--tests/bud/buildkit-mount-from/hello1
-rw-r--r--tests/bud/buildkit-mount-from/hello21
-rw-r--r--tests/bud/buildkit-mount/Dockerfile4
-rw-r--r--tests/bud/buildkit-mount/Dockerfile24
-rw-r--r--tests/bud/buildkit-mount/Dockerfile34
-rw-r--r--tests/bud/buildkit-mount/Dockerfile44
-rw-r--r--tests/bud/buildkit-mount/Dockerfile64
-rw-r--r--tests/bud/buildkit-mount/Dockerfilecacheread4
-rw-r--r--tests/bud/buildkit-mount/Dockerfilecachereadwithoutz3
-rw-r--r--tests/bud/buildkit-mount/Dockerfilecachewrite4
-rw-r--r--tests/bud/buildkit-mount/Dockerfilecachewritesharing7
-rw-r--r--tests/bud/buildkit-mount/Dockerfilecachewritewithoutz3
-rw-r--r--tests/bud/buildkit-mount/Dockerfilemultiplemounts8
-rw-r--r--tests/bud/buildkit-mount/Dockerfiletmpfs4
-rw-r--r--tests/bud/buildkit-mount/Dockerfiletmpfscopyup4
-rw-r--r--tests/bud/buildkit-mount/input_file1
-rw-r--r--tests/bud/buildkit-mount/subdir/input_file1
-rw-r--r--tests/bud/cache-chown/Dockerfile.add12
-rw-r--r--tests/bud/cache-chown/Dockerfile.add22
-rw-r--r--tests/bud/cache-chown/Dockerfile.copy12
-rw-r--r--tests/bud/cache-chown/Dockerfile.copy22
-rw-r--r--tests/bud/cache-chown/Dockerfile.prev14
-rw-r--r--tests/bud/cache-chown/Dockerfile.prev24
-rw-r--r--tests/bud/cache-chown/Dockerfile.tar13
-rw-r--r--tests/bud/cache-chown/Dockerfile.tar23
-rw-r--r--tests/bud/cache-chown/Dockerfile.url12
-rw-r--r--tests/bud/cache-chown/Dockerfile.url22
-rw-r--r--tests/bud/cache-chown/testfile1
-rw-r--r--tests/bud/cache-chown/testfile.tar.gzbin0 -> 158 bytes
-rw-r--r--tests/bud/cache-format/Dockerfile3
-rw-r--r--tests/bud/cache-from/Containerfile3
-rw-r--r--tests/bud/cache-mount-locked/Containerfile21
-rw-r--r--tests/bud/cache-mount-locked/file1
-rw-r--r--tests/bud/cache-scratch/Dockerfile5
-rw-r--r--tests/bud/cache-scratch/Dockerfile.config8
-rw-r--r--tests/bud/cache-scratch/Dockerfile.different16
-rw-r--r--tests/bud/cache-scratch/Dockerfile.different26
-rw-r--r--tests/bud/cache-stages/Dockerfile.12
-rw-r--r--tests/bud/cache-stages/Dockerfile.25
-rw-r--r--tests/bud/cache-stages/Dockerfile.325
-rw-r--r--tests/bud/capabilities/Dockerfile3
-rw-r--r--tests/bud/check-race/Containerfile2
-rw-r--r--tests/bud/commit/name-path-changes/Dockerfile2
-rw-r--r--tests/bud/container-ignoresymlink/Dockerfile3
-rw-r--r--tests/bud/container-ignoresymlink/hello0
-rw-r--r--tests/bud/container-ignoresymlink/world0
-rw-r--r--tests/bud/containeranddockerfile/Containerfile1
-rw-r--r--tests/bud/containeranddockerfile/Dockerfile1
-rw-r--r--tests/bud/containerfile/Containerfile2
-rw-r--r--tests/bud/containerfile/Containerfile.in7
-rw-r--r--tests/bud/containerignore/.containerignore6
-rw-r--r--tests/bud/containerignore/.dockerignore2
-rw-r--r--tests/bud/containerignore/Dockerfile4
-rw-r--r--tests/bud/containerignore/Dockerfile.succeed3
-rw-r--r--tests/bud/containerignore/subdir/sub1.txt0
-rw-r--r--tests/bud/containerignore/subdir/sub2.txt0
-rw-r--r--tests/bud/containerignore/test1.txt1
-rw-r--r--tests/bud/containerignore/test2.txt1
-rw-r--r--tests/bud/context-escape-dir/testdir/Containerfile2
-rw-r--r--tests/bud/context-escape-dir/upperfile.txt3
-rw-r--r--tests/bud/context-from-stdin/Dockerfile5
-rw-r--r--tests/bud/copy-archive/Containerfile2
-rw-r--r--tests/bud/copy-chmod/Dockerfile6
-rw-r--r--tests/bud/copy-chmod/Dockerfile.bad7
-rw-r--r--tests/bud/copy-chmod/Dockerfile.combined6
-rw-r--r--tests/bud/copy-chmod/copychmod.txt1
-rw-r--r--tests/bud/copy-chown/Containerfile.chown_user8
-rw-r--r--tests/bud/copy-chown/Dockerfile6
-rw-r--r--tests/bud/copy-chown/Dockerfile.bad6
-rw-r--r--tests/bud/copy-chown/Dockerfile.bad24
-rw-r--r--tests/bud/copy-chown/copychown.txt1
-rw-r--r--tests/bud/copy-create-absolute-path/Dockerfile3
-rw-r--r--tests/bud/copy-create-absolute-path/distutils.cfg0
-rw-r--r--tests/bud/copy-create-relative-path/Dockerfile3
-rw-r--r--tests/bud/copy-create-relative-path/distutils.cfg0
-rw-r--r--tests/bud/copy-envvar/Containerfile3
-rw-r--r--tests/bud/copy-envvar/file-0.0.1.txt0
-rw-r--r--tests/bud/copy-from/Dockerfile4
-rw-r--r--tests/bud/copy-from/Dockerfile.bad2
-rw-r--r--tests/bud/copy-from/Dockerfile24
-rw-r--r--tests/bud/copy-from/Dockerfile2.bad6
-rw-r--r--tests/bud/copy-from/Dockerfile34
-rw-r--r--tests/bud/copy-from/Dockerfile44
-rw-r--r--tests/bud/copy-globs/Containerfile3
-rw-r--r--tests/bud/copy-globs/Containerfile.bad3
-rw-r--r--tests/bud/copy-globs/Containerfile.missing3
-rw-r--r--tests/bud/copy-globs/Dockerfile2
-rw-r--r--tests/bud/copy-globs/test1.txt0
-rw-r--r--tests/bud/copy-globs/test2.txt0
-rw-r--r--tests/bud/copy-multiple-files/Dockerfile3
-rw-r--r--tests/bud/copy-multiple-files/file0
-rw-r--r--tests/bud/copy-multiple-files/file20
-rw-r--r--tests/bud/copy-multistage-paths/Dockerfile.absolute4
-rw-r--r--tests/bud/copy-multistage-paths/Dockerfile.invalid_from4
-rw-r--r--tests/bud/copy-multistage-paths/Dockerfile.relative4
-rw-r--r--tests/bud/copy-root/Dockerfile2
-rw-r--r--tests/bud/copy-root/distutils.cfg0
-rw-r--r--tests/bud/copy-workdir/Dockerfile4
-rw-r--r--tests/bud/copy-workdir/Dockerfile.24
-rw-r--r--tests/bud/copy-workdir/file1.txt1
-rw-r--r--tests/bud/copy-workdir/file2.txt1
-rw-r--r--tests/bud/dest-final-slash/Dockerfile5
-rw-r--r--tests/bud/dest-symlink-dangling/Dockerfile6
-rw-r--r--tests/bud/dest-symlink/Dockerfile9
-rw-r--r--tests/bud/device/Dockerfile2
-rw-r--r--tests/bud/dns/Dockerfile2
-rw-r--r--tests/bud/dockerfile/Dockerfile1
-rw-r--r--tests/bud/dockerignore/.dockerignore6
-rw-r--r--tests/bud/dockerignore/Dockerfile4
-rw-r--r--tests/bud/dockerignore/Dockerfile.succeed3
-rw-r--r--tests/bud/dockerignore/subdir/sub1.txt0
-rw-r--r--tests/bud/dockerignore/subdir/sub2.txt0
-rw-r--r--tests/bud/dockerignore/test1.txt1
-rw-r--r--tests/bud/dockerignore/test2.txt1
-rw-r--r--tests/bud/dockerignore2/.dockerignore1
-rw-r--r--tests/bud/dockerignore2/Dockerfile2
-rw-r--r--tests/bud/dockerignore2/subdir/sub1.txt1
-rw-r--r--tests/bud/dockerignore2/subdir/subsubdir/subsub1.txt1
-rw-r--r--tests/bud/dockerignore3/.dockerignore10
-rw-r--r--tests/bud/dockerignore3/BUILD.md0
-rw-r--r--tests/bud/dockerignore3/COPYRIGHT0
-rw-r--r--tests/bud/dockerignore3/Dockerfile5
-rw-r--r--tests/bud/dockerignore3/LICENSE0
-rw-r--r--tests/bud/dockerignore3/README-secret.md0
-rw-r--r--tests/bud/dockerignore3/README.md0
-rw-r--r--tests/bud/dockerignore3/manifest16
-rw-r--r--tests/bud/dockerignore3/src/Makefile0
-rw-r--r--tests/bud/dockerignore3/src/cmd/Makefile0
-rw-r--r--tests/bud/dockerignore3/src/cmd/main.in0
-rw-r--r--tests/bud/dockerignore3/src/etc/foo.conf0
-rw-r--r--tests/bud/dockerignore3/src/etc/foo.conf.d/dropin.conf0
-rw-r--r--tests/bud/dockerignore3/src/lib/Makefile0
-rw-r--r--tests/bud/dockerignore3/src/lib/framework.in0
-rw-r--r--tests/bud/dockerignore3/test1.txt0
-rw-r--r--tests/bud/dockerignore3/test2.txt0
-rw-r--r--tests/bud/dockerignore3/test3.txt0
-rw-r--r--tests/bud/dockerignore4/BUILD.md0
-rw-r--r--tests/bud/dockerignore4/COPYRIGHT0
-rw-r--r--tests/bud/dockerignore4/Dockerfile.test5
-rw-r--r--tests/bud/dockerignore4/Dockerfile.test.dockerignore10
-rw-r--r--tests/bud/dockerignore4/LICENSE0
-rw-r--r--tests/bud/dockerignore4/README-secret.md0
-rw-r--r--tests/bud/dockerignore4/README.md0
-rw-r--r--tests/bud/dockerignore4/manifest16
-rw-r--r--tests/bud/dockerignore4/src/Makefile0
-rw-r--r--tests/bud/dockerignore4/src/cmd/Makefile0
-rw-r--r--tests/bud/dockerignore4/src/cmd/main.in0
-rw-r--r--tests/bud/dockerignore4/src/etc/foo.conf0
-rw-r--r--tests/bud/dockerignore4/src/etc/foo.conf.d/dropin.conf0
-rw-r--r--tests/bud/dockerignore4/src/lib/Makefile0
-rw-r--r--tests/bud/dockerignore4/src/lib/framework.in0
-rw-r--r--tests/bud/dockerignore4/test1.txt0
-rw-r--r--tests/bud/dockerignore4/test2.txt0
-rw-r--r--tests/bud/dockerignore4/test3.txt0
-rw-r--r--tests/bud/dockerignore6/Dockerfile4
-rw-r--r--tests/bud/dockerignore6/Dockerfile.dockerignore6
-rw-r--r--tests/bud/dockerignore6/Dockerfile.succeed3
-rw-r--r--tests/bud/dockerignore6/Dockerfile.succeed.dockerignore6
-rw-r--r--tests/bud/dockerignore6/subdir/sub1.txt0
-rw-r--r--tests/bud/dockerignore6/subdir/sub2.txt0
-rw-r--r--tests/bud/dockerignore6/test1.txt1
-rw-r--r--tests/bud/dockerignore6/test2.txt1
-rw-r--r--tests/bud/dupe-arg-env-name/Containerfile7
-rw-r--r--tests/bud/env/Dockerfile.check-env2
-rw-r--r--tests/bud/env/Dockerfile.env-from-image2
-rw-r--r--tests/bud/env/Dockerfile.env-precedence17
-rw-r--r--tests/bud/env/Dockerfile.env-same-file3
-rw-r--r--tests/bud/env/Dockerfile.special-chars3
-rw-r--r--tests/bud/exit42/Containerfile2
-rw-r--r--tests/bud/from-as/Dockerfile7
-rw-r--r--tests/bud/from-as/Dockerfile.skip19
-rw-r--r--tests/bud/from-invalid-registry/Containerfile3
-rw-r--r--tests/bud/from-multiple-files/Dockerfile1.alpine2
-rw-r--r--tests/bud/from-multiple-files/Dockerfile1.scratch2
-rw-r--r--tests/bud/from-multiple-files/Dockerfile2.glob2
-rw-r--r--tests/bud/from-multiple-files/Dockerfile2.nofrom1
-rw-r--r--tests/bud/from-multiple-files/Dockerfile2.withfrom2
-rw-r--r--tests/bud/from-scratch/Containerfile1
-rw-r--r--tests/bud/from-scratch/Containerfile24
-rw-r--r--tests/bud/from-scratch/Dockerfile1
-rw-r--r--tests/bud/from-with-arg/Containerfile7
-rw-r--r--tests/bud/group/Containerfile11
-rw-r--r--tests/bud/hardlink/Dockerfile3
-rw-r--r--tests/bud/healthcheck/Dockerfile3
-rw-r--r--tests/bud/heredoc/Containerfile59
-rw-r--r--tests/bud/heredoc/Containerfile.bash_file15
-rw-r--r--tests/bud/heredoc/Containerfile.verify_mount_leak17
-rw-r--r--tests/bud/http-context-containerfile/context.tarbin0 -> 10240 bytes
-rw-r--r--tests/bud/http-context-subdir/context.tarbin0 -> 10240 bytes
-rw-r--r--tests/bud/http-context/context.tarbin0 -> 10240 bytes
-rw-r--r--tests/bud/inline-network/Dockerfile12
-rw-r--r--tests/bud/inline-network/Dockerfile22
-rw-r--r--tests/bud/inline-network/Dockerfile33
-rw-r--r--tests/bud/inline-network/Dockerfile42
-rw-r--r--tests/bud/layers-squash/Dockerfile3
-rw-r--r--tests/bud/layers-squash/Dockerfile.hardlinks3
-rw-r--r--tests/bud/layers-squash/Dockerfile.multi-stage9
-rw-r--r--tests/bud/layers-squash/artifact1
-rw-r--r--tests/bud/leading-args/Dockerfile5
-rw-r--r--tests/bud/long-sleep/Dockerfile4
-rw-r--r--tests/bud/maintainer/Dockerfile2
-rw-r--r--tests/bud/mount/Dockerfile2
-rw-r--r--tests/bud/multi-stage-builds-small-as/Dockerfile.index5
-rw-r--r--tests/bud/multi-stage-builds-small-as/Dockerfile.mixed13
-rw-r--r--tests/bud/multi-stage-builds-small-as/Dockerfile.name5
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.arg6
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.arg_in_copy8
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.arg_in_stage7
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.extended13
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.index5
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.mixed13
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.name5
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.rebase8
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.reused5
-rw-r--r--tests/bud/multi-stage-builds/Dockerfile.reused26
-rw-r--r--tests/bud/multi-stage-only-base/Containerfile12
-rw-r--r--tests/bud/multi-stage-only-base/Containerfile22
-rw-r--r--tests/bud/multi-stage-only-base/Containerfile33
-rw-r--r--tests/bud/multiarch/Dockerfile10
-rw-r--r--tests/bud/multiarch/Dockerfile.built-in-args6
-rw-r--r--tests/bud/multiarch/Dockerfile.fail5
-rw-r--r--tests/bud/multiarch/Dockerfile.fail-multistage19
-rw-r--r--tests/bud/multiarch/Dockerfile.no-run8
-rw-r--r--tests/bud/namespaces/Containerfile12
-rw-r--r--tests/bud/network/Containerfile2
-rw-r--r--tests/bud/no-change/Dockerfile1
-rw-r--r--tests/bud/no-hostname/Containerfile3
-rw-r--r--tests/bud/no-hostname/Containerfile.noetc3
-rw-r--r--tests/bud/non-directory-in-path/non-directory1
-rw-r--r--tests/bud/onbuild/Dockerfile4
-rw-r--r--tests/bud/onbuild/Dockerfile13
-rw-r--r--tests/bud/onbuild/Dockerfile26
-rw-r--r--tests/bud/platform-sets-args/Containerfile6
-rw-r--r--tests/bud/preprocess/Decomposed.in7
-rw-r--r--tests/bud/preprocess/Error.in5
-rw-r--r--tests/bud/preprocess/common3
-rw-r--r--tests/bud/preprocess/install-base3
-rw-r--r--tests/bud/preserve-volumes/Dockerfile24
-rw-r--r--tests/bud/pull/Containerfile1
-rw-r--r--tests/bud/recurse/Dockerfile3
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-access3
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-mode2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-not-required2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-options2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-required2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-required-false2
-rw-r--r--tests/bud/run-mounts/Dockerfile.secret-required-wo-value2
-rw-r--r--tests/bud/run-mounts/Dockerfile.ssh5
-rw-r--r--tests/bud/run-mounts/Dockerfile.ssh_access7
-rw-r--r--tests/bud/run-mounts/Dockerfile.ssh_options3
-rw-r--r--tests/bud/run-privd/Dockerfile2
-rw-r--r--tests/bud/run-scenarios/Dockerfile.args4
-rw-r--r--tests/bud/run-scenarios/Dockerfile.cmd-empty-run3
-rw-r--r--tests/bud/run-scenarios/Dockerfile.cmd-run3
-rw-r--r--tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-empty-run4
-rw-r--r--tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-run4
-rw-r--r--tests/bud/run-scenarios/Dockerfile.entrypoint-empty-run3
-rw-r--r--tests/bud/run-scenarios/Dockerfile.entrypoint-run3
-rw-r--r--tests/bud/run-scenarios/Dockerfile.multi-args5
-rw-r--r--tests/bud/run-scenarios/Dockerfile.noop-flags1
-rw-r--r--tests/bud/secret-relative/Dockerfile5
-rw-r--r--tests/bud/secret-relative/secret1.txt1
-rw-r--r--tests/bud/secret-relative/secret2.txt1
-rw-r--r--tests/bud/shell/Dockerfile3
-rw-r--r--tests/bud/shell/Dockerfile.build-shell-custom3
-rw-r--r--tests/bud/shell/Dockerfile.build-shell-default2
-rw-r--r--tests/bud/simple-multi-step/Containerfile4
-rw-r--r--tests/bud/stdio/Dockerfile7
-rw-r--r--tests/bud/supplemental-groups/Dockerfile3
-rw-r--r--tests/bud/symlink/Containerfile.add-tar-gz-with-link3
-rw-r--r--tests/bud/symlink/Containerfile.add-tar-with-link3
-rw-r--r--tests/bud/symlink/Dockerfile6
-rw-r--r--tests/bud/symlink/Dockerfile.absolute-dir-symlink6
-rw-r--r--tests/bud/symlink/Dockerfile.absolute-symlink6
-rw-r--r--tests/bud/symlink/Dockerfile.multiple-symlinks10
-rw-r--r--tests/bud/symlink/Dockerfile.relative-symlink8
-rw-r--r--tests/bud/symlink/Dockerfile.replace-symlink3
-rw-r--r--tests/bud/symlink/Dockerfile.symlink-points-to-itself3
-rw-r--r--tests/bud/symlink/tarball.tarbin0 -> 10240 bytes
-rw-r--r--tests/bud/symlink/tarball.tar.gzbin0 -> 218 bytes
l---------tests/bud/symlink/tarball_latest.tar1
l---------tests/bud/symlink/tarball_latest.tar.gz1
-rw-r--r--tests/bud/target/Dockerfile13
-rw-r--r--tests/bud/targetarch/Dockerfile3
-rw-r--r--tests/bud/terminal/Dockerfile2
-rw-r--r--tests/bud/unrecognized/Dockerfile2
-rw-r--r--tests/bud/use-args/Containerfile4
-rw-r--r--tests/bud/use-args/Containerfile.dest_nobrace6
-rw-r--r--tests/bud/use-args/Containerfile.destination6
-rw-r--r--tests/bud/use-layers/Dockerfile7
-rw-r--r--tests/bud/use-layers/Dockerfile.24
-rw-r--r--tests/bud/use-layers/Dockerfile.35
-rw-r--r--tests/bud/use-layers/Dockerfile.43
-rw-r--r--tests/bud/use-layers/Dockerfile.52
-rw-r--r--tests/bud/use-layers/Dockerfile.61
-rw-r--r--tests/bud/use-layers/Dockerfile.72
-rw-r--r--tests/bud/use-layers/Dockerfile.build-args4
-rw-r--r--tests/bud/use-layers/Dockerfile.dangling-symlink2
-rw-r--r--tests/bud/use-layers/Dockerfile.fail-case5
-rw-r--r--tests/bud/use-layers/Dockerfile.multistage-copy9
-rw-r--r--tests/bud/use-layers/Dockerfile.non-existent-registry5
-rw-r--r--tests/bud/verify-cleanup/Dockerfile24
-rw-r--r--tests/bud/verify-cleanup/hey1
-rw-r--r--tests/bud/verify-cleanup/secret1.txt1
-rw-r--r--tests/bud/volume-ownership/Dockerfile10
-rw-r--r--tests/bud/volume-perms/Dockerfile6
-rw-r--r--tests/bud/volume-symlink/Dockerfile6
-rw-r--r--tests/bud/volume-symlink/Dockerfile.no-symlink4
-rw-r--r--tests/bud/with-arg/Dockerfile4
-rw-r--r--tests/bud/with-arg/Dockerfile24
-rw-r--r--tests/bud/with-arg/Dockerfilefromarg16
-rw-r--r--tests/bud/workdir-symlink/Dockerfile6
-rw-r--r--tests/bud/workdir-symlink/Dockerfile-27
-rw-r--r--tests/bud/workdir-symlink/Dockerfile-310
-rw-r--r--tests/bud/workdir-user/Dockerfile6
-rw-r--r--tests/bud_overlay_leaks.bats18
-rw-r--r--tests/byid.bats104
-rw-r--r--tests/chroot.bats105
-rw-r--r--tests/commit.bats328
-rw-r--r--tests/config.bats433
-rw-r--r--tests/conformance/README.md39
-rw-r--r--tests/conformance/conformance_test.go3587
-rw-r--r--tests/conformance/selinux.go14
-rw-r--r--tests/conformance/selinux_unsupported.go7
-rw-r--r--tests/conformance/testdata/Dockerfile.add11
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_15
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_105
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_115
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_125
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_135
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_25
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_36
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_3_16
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_45
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_55
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_65
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_75
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_85
-rw-r--r--tests/conformance/testdata/Dockerfile.copyfrom_95
-rw-r--r--tests/conformance/testdata/Dockerfile.edgecases49
-rw-r--r--tests/conformance/testdata/Dockerfile.env23
-rw-r--r--tests/conformance/testdata/Dockerfile.exposedefault2
-rw-r--r--tests/conformance/testdata/Dockerfile.margs39
-rw-r--r--tests/conformance/testdata/Dockerfile.reusebase6
-rw-r--r--tests/conformance/testdata/Dockerfile.run.args5
-rw-r--r--tests/conformance/testdata/Dockerfile.shell3
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.12
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.22
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.32
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.42
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.52
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.62
-rw-r--r--tests/conformance/testdata/add/archive/Dockerfile.72
-rw-r--r--tests/conformance/testdata/add/archive/sub/subdirectory.tar.gzbin0 -> 195 bytes
-rw-r--r--tests/conformance/testdata/add/dir-not-dir/Dockerfile3
-rw-r--r--tests/conformance/testdata/add/dir-not-dir/test.tarbin0 -> 5632 bytes
-rw-r--r--tests/conformance/testdata/add/not-dir-dir/Dockerfile4
-rw-r--r--tests/conformance/testdata/add/not-dir-dir/test.tarbin0 -> 10240 bytes
-rw-r--r--tests/conformance/testdata/add/parent-clean/Dockerfile4
-rw-r--r--tests/conformance/testdata/add/parent-dangling/Dockerfile4
-rw-r--r--tests/conformance/testdata/add/parent-symlink/Dockerfile2
-rw-r--r--tests/conformance/testdata/add/parent-symlink/foobar.tarbin0 -> 10240 bytes
-rw-r--r--tests/conformance/testdata/add/populated-dir-not-dir/Dockerfile5
-rw-r--r--tests/conformance/testdata/add/populated-dir-not-dir/test.tarbin0 -> 10240 bytes
-rw-r--r--tests/conformance/testdata/copy/Dockerfile3
-rw-r--r--tests/conformance/testdata/copy/script2
-rw-r--r--tests/conformance/testdata/copyblahblub/Dockerfile4
-rw-r--r--tests/conformance/testdata/copyblahblub/Dockerfile24
-rw-r--r--tests/conformance/testdata/copyblahblub/Dockerfile38
-rw-r--r--tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a1
-rw-r--r--tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b1
-rw-r--r--tests/conformance/testdata/copychown/Dockerfile11
-rw-r--r--tests/conformance/testdata/copychown/script2
-rw-r--r--tests/conformance/testdata/copychown/script22
-rw-r--r--tests/conformance/testdata/copydir/Dockerfile3
-rw-r--r--tests/conformance/testdata/copydir/dir/file0
-rw-r--r--tests/conformance/testdata/copyempty/.script2
-rw-r--r--tests/conformance/testdata/copyempty/Dockerfile2
-rw-r--r--tests/conformance/testdata/copyempty/Dockerfile22
-rw-r--r--tests/conformance/testdata/copyempty/script12
-rw-r--r--tests/conformance/testdata/copyempty/script22
-rw-r--r--tests/conformance/testdata/copyrename/Dockerfile3
-rw-r--r--tests/conformance/testdata/copyrename/file12
-rw-r--r--tests/conformance/testdata/copysymlink/Dockerfile2
-rw-r--r--tests/conformance/testdata/copysymlink/Dockerfile27
l---------tests/conformance/testdata/copysymlink/file-link.tar.gz1
-rw-r--r--tests/conformance/testdata/copysymlink/file.tar.gz1
-rw-r--r--tests/conformance/testdata/dir/Dockerfile4
-rw-r--r--tests/conformance/testdata/dir/file0
-rw-r--r--tests/conformance/testdata/dir/subdir/file20
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/.dockerignore7
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/Dockerfile8
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-other/.dockerignore8
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-other/Dockerfile8
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating-other/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file21
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating/.dockerignore8
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating/Dockerfile8
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/alternating/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/nothing-dot/.dockerignore1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/nothing-dot/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/nothing-slash/.dockerignore1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/nothing-slash/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-file/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-file/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file11
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file21
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file11
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file21
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/folder/subfolder/file1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/folder/file1
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/folder/file1
-rw-r--r--tests/conformance/testdata/dockerignore/empty/.dockerignore1
-rw-r--r--tests/conformance/testdata/dockerignore/empty/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/empty/test1.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-skip/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-skip/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-skip/volume/data/oneline.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-1/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-1/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub1.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub2.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub3.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-2/.dockerignore3
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-2/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub1.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub2.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub3.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/.dockerignore6
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/Dockerfile4
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/subdir/sub1.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/subdir/sub2.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/test1.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/integration1/test2.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/integration2/.dockerignore1
-rw-r--r--tests/conformance/testdata/dockerignore/integration2/Dockerfile2
-rw-r--r--tests/conformance/testdata/dockerignore/integration2/subdir/sub1.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/integration2/subdir/subsubdir/subsub1.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/.dockerignore10
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/BUILD.md0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/COPYRIGHT0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/Dockerfile5
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/LICENSE0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/README-secret.md0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/README.md0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/manifest16
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/Makefile0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/cmd/Makefile0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/cmd/main.in0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf.d/dropin.conf0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/lib/Makefile0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/src/lib/framework.in0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/test1.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/test2.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/integration3/test3.txt0
-rw-r--r--tests/conformance/testdata/dockerignore/minimal_test/.dockerignore2
-rw-r--r--tests/conformance/testdata/dockerignore/minimal_test/Dockerfile3
-rw-r--r--tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file10
-rw-r--r--tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file20
-rw-r--r--tests/conformance/testdata/dockerignore/populated/.dotfile-a.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/file-a.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/file-b.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/file-c.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/subdir-b/.dotfile-b.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-c/file-b.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-c/file-c.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-e/file-a.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-e/file-b.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/subdir-e/file-n.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-a.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-n.txt1
-rw-r--r--tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-o.txt1
l---------tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/subdir-g/subdir-b1
-rw-r--r--tests/conformance/testdata/env/precedence/Dockerfile7
-rw-r--r--tests/conformance/testdata/heredoc/Dockerfile.heredoc_copy23
-rw-r--r--tests/conformance/testdata/heredoc/file1
-rw-r--r--tests/conformance/testdata/mount/Dockerfile2
-rw-r--r--tests/conformance/testdata/mount/file1
-rw-r--r--tests/conformance/testdata/mount/file21
-rw-r--r--tests/conformance/testdata/overlapdirwithoutslash/Dockerfile2
-rw-r--r--tests/conformance/testdata/overlapdirwithoutslash/existing/etc/file-in-existing-dir0
-rw-r--r--tests/conformance/testdata/overlapdirwithslash/Dockerfile2
-rw-r--r--tests/conformance/testdata/overlapdirwithslash/existing/etc/file-in-existing-dir0
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/Dockerfile3
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.23
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.33
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.43
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/tree1/directory/file-in-directory0
l---------tests/conformance/testdata/replace/symlink-with-directory/tree1/maybe-directory1
-rw-r--r--tests/conformance/testdata/replace/symlink-with-directory/tree2/maybe-directory/file-in-maybe-directory0
-rw-r--r--tests/conformance/testdata/subdir/subdir/Dockerfile2
-rw-r--r--tests/conformance/testdata/tar-g/Dockerfile2
-rwxr-xr-xtests/conformance/testdata/tar-g/content.sh7
-rw-r--r--tests/conformance/testdata/tar-g/content.tar.gzbin0 -> 343 bytes
-rw-r--r--tests/conformance/testdata/tar-g/content.txt1
-rw-r--r--tests/conformance/testdata/transientmount/Dockerfile3
-rw-r--r--tests/conformance/testdata/transientmount/Dockerfile.env23
-rw-r--r--tests/conformance/testdata/transientmount/file0
-rw-r--r--tests/conformance/testdata/transientmount/subdir/file20
-rw-r--r--tests/conformance/testdata/volume/Dockerfile7
-rw-r--r--tests/conformance/testdata/volume/file1
-rw-r--r--tests/conformance/testdata/volume/file21
-rw-r--r--tests/conformance/testdata/volumerun/Dockerfile7
-rw-r--r--tests/conformance/testdata/volumerun/file1
-rw-r--r--tests/conformance/testdata/volumerun/file21
-rw-r--r--tests/conformance/testdata/volumerun/file41
-rw-r--r--tests/conformance/testdata/wildcard/Dockerfile3
-rw-r--r--tests/conformance/testdata/wildcard/dir2/file.a1
-rw-r--r--tests/conformance/testdata/wildcard/dir2/file.b1
-rw-r--r--tests/conformance/testdata/wildcard/dir2/file.c1
-rw-r--r--tests/conformance/testdata/wildcard/dir2/file2.b1
-rw-r--r--tests/containers.bats82
-rw-r--r--tests/containers.conf65
-rw-r--r--tests/containers_conf.bats140
-rw-r--r--tests/copy.bats539
-rw-r--r--tests/copy/copy.go160
-rw-r--r--tests/deny.json7
-rw-r--r--tests/digest.bats74
-rw-r--r--tests/digest/README.md28
l---------tests/digest/make-v2s11
l---------tests/digest/make-v2s1-with-dups1
l---------tests/digest/make-v2s21
-rwxr-xr-xtests/digest/make-v2sN180
-rw-r--r--tests/docker.json6
-rw-r--r--tests/e2e/buildah_suite_test.go325
-rw-r--r--tests/e2e/inspect_test.go92
-rw-r--r--tests/formats.bats70
-rw-r--r--tests/from.bats670
-rw-r--r--tests/git-daemon/release-1.11-rhel.tar.gzbin0 -> 10573 bytes
-rw-r--r--tests/git-daemon/repo-with-containerfile-on-old-commit.tar.gzbin0 -> 10924 bytes
-rw-r--r--tests/git-daemon/repo.tar.gzbin0 -> 10530 bytes
-rw-r--r--tests/git-daemon/subdirectory.tar.gzbin0 -> 12108 bytes
-rw-r--r--tests/help.bats95
-rw-r--r--tests/helpers.bash760
-rwxr-xr-xtests/helpers.bash.t87
-rw-r--r--tests/history.bats152
-rw-r--r--tests/images.bats272
-rw-r--r--tests/imgtype/imgtype.go259
-rw-r--r--tests/info.bats29
-rw-r--r--tests/inspect.bats138
-rw-r--r--tests/lists.bats229
-rw-r--r--tests/loglevel.bats28
-rw-r--r--tests/mkcw.bats97
-rw-r--r--tests/mount.bats65
-rw-r--r--tests/namespaces.bats522
-rw-r--r--tests/overlay.bats96
-rw-r--r--tests/policy.json7
-rw-r--r--tests/pull.bats405
-rw-r--r--tests/push.bats226
-rw-r--r--tests/registries.bats32
-rw-r--r--tests/registries.conf26
-rw-r--r--tests/registries.conf.block25
-rw-r--r--tests/registries.conf.hub25
-rw-r--r--tests/rename.bats37
-rw-r--r--tests/rm.bats71
-rw-r--r--tests/rmi.bats248
-rw-r--r--tests/run.bats954
-rw-r--r--tests/selinux.bats86
-rw-r--r--tests/serve/serve.go73
-rw-r--r--tests/sign.bats97
-rw-r--r--tests/source.bats156
-rw-r--r--tests/squash.bats170
-rw-r--r--tests/ssh.bats77
-rw-r--r--tests/subscriptions.bats86
-rw-r--r--tests/tag.bats42
-rwxr-xr-xtests/test_buildah_authentication.sh238
-rwxr-xr-xtests/test_buildah_baseline.sh244
-rwxr-xr-xtests/test_buildah_build_rpm.sh124
-rw-r--r--tests/test_buildah_rpm.sh115
-rwxr-xr-xtests/test_runner.sh22
-rw-r--r--tests/testreport/testreport.go449
-rw-r--r--tests/testreport/types/types.go10
-rw-r--r--tests/tools/Makefile31
-rw-r--r--tests/tools/go.mod187
-rw-r--r--tests/tools/go.sum1629
-rw-r--r--tests/tools/tools.go13
-rw-r--r--tests/tutorial.bats25
-rw-r--r--tests/tutorial/tutorial.go65
-rw-r--r--tests/umount.bats68
-rwxr-xr-xtests/validate/buildahimages-are-sane74
-rwxr-xr-xtests/validate/pr-should-include-tests88
-rwxr-xr-xtests/validate/pr-should-include-tests.t131
-rwxr-xr-xtests/validate/whitespace.sh13
-rw-r--r--troubleshooting.md158
-rw-r--r--unmount.go17
-rw-r--r--util.go228
-rw-r--r--util/types.go20
-rw-r--r--util/util.go475
-rw-r--r--util/util_linux.go20
-rw-r--r--util/util_test.go113
-rw-r--r--util/util_unix.go17
-rw-r--r--util/util_unsupported.go8
-rw-r--r--util/util_windows.go16
983 files changed, 89236 insertions, 0 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
new file mode 100644
index 0000000..ac12d66
--- /dev/null
+++ b/.cirrus.yml
@@ -0,0 +1,348 @@
+---
+
+# Main collection of env. vars to set for all tasks and scripts.
+env:
+ ####
+ #### Global variables used for all tasks
+ ####
+ # Name of the ultimate destination branch for this CI run, PR or post-merge.
+ DEST_BRANCH: "main"
+ GOPATH: "/var/tmp/go"
+ GOSRC: "${GOPATH}/src/github.com/containers/buildah"
+ # Overrides default location (/tmp/cirrus) for repo clone
+ CIRRUS_WORKING_DIR: "${GOSRC}"
+ # Shell used to execute all script commands
+ CIRRUS_SHELL: "/bin/bash"
+ # Automation script path relative to $CIRRUS_WORKING_DIR)
+ SCRIPT_BASE: "./contrib/cirrus"
+ # No need to go crazy, but grab enough to cover most PRs
+ CIRRUS_CLONE_DEPTH: 50
+ # Unless set by in_podman.sh, default to operating outside of a podman container
+ IN_PODMAN: 'false'
+ # root or rootless
+ PRIV_NAME: root
+
+ ####
+ #### Cache-image names to test with
+ ####
+ # GCE project where images live
+ IMAGE_PROJECT: "libpod-218412"
+ FEDORA_NAME: "fedora-39β"
+ PRIOR_FEDORA_NAME: "fedora-38"
+ DEBIAN_NAME: "debian-13"
+
+ # Image identifiers
+ IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
+ DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
+
+ IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
+
+ ####
+ #### Command variables to help avoid duplication
+ ####
+ # Command to prefix every output line with a timestamp
+ # (can't do inline awk script, Cirrus-CI or YAML mangles quoting)
+ _TIMESTAMP: 'awk -f ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk'
+
+gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
+
+# Default timeout for each task
+timeout_in: 120m
+
+# Default VM to use unless set or modified by task
+gce_instance: &standardvm
+ image_project: "${IMAGE_PROJECT}"
+ zone: "us-central1-c" # Required by Cirrus for the time being
+ cpu: 2
+ memory: "4Gb"
+ disk: 200 # Gigabytes, do not set less than 200 per obscure GCE docs re: I/O performance
+ image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+
+
+# Update metadata on VM images referenced by this repository state
+meta_task:
+ name: "VM img. keepalive"
+ alias: meta
+
+ container:
+ image: "quay.io/libpod/imgts:latest"
+ cpu: 1
+ memory: 1
+
+ env:
+ # Space-separated list of images used by this repository state
+ IMGNAMES: |-
+ ${FEDORA_CACHE_IMAGE_NAME}
+ ${PRIOR_FEDORA_CACHE_IMAGE_NAME}
+ ${DEBIAN_CACHE_IMAGE_NAME}
+ build-push-${IMAGE_SUFFIX}
+ BUILDID: "${CIRRUS_BUILD_ID}"
+ REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
+ GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14]
+ GCPNAME: ENCRYPTED[8509e6a681b859479ce6aa275bd3c4ac82de5beec6df6057925afc4cd85b7ef2e879066ae8baaa2d453b82958e434578]
+ GCPPROJECT: ENCRYPTED[cc09b62d0ec6746a3df685e663ad25d9d5af95ef5fd843c96f3d0ec9d7f065dc63216b9c685c9f43a776a1d403991494]
+
+ clone_script: 'true'
+ script: '/usr/local/bin/entrypoint.sh'
+
+
+smoke_task:
+ alias: 'smoke'
+ name: "Smoke Test"
+
+ gce_instance:
+ memory: "12Gb"
+
+ # Don't bother running on branches (including cron), or for tags.
+ only_if: $CIRRUS_PR != ''
+
+ timeout_in: 30m
+
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
+ validate_test_script: '${SCRIPT_BASE}/test.sh validate |& ${_TIMESTAMP}'
+
+ binary_artifacts:
+ path: ./bin/*
+
+# Check that all included go modules from other sources match
+# # what is expected in `vendor/modules.txt` vs `go.mod`.
+vendor_task:
+ name: "Test Vendoring"
+ alias: vendor
+
+ env:
+ CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
+ GOPATH: "/var/tmp/go"
+ GOSRC: "/var/tmp/go/src/github.com/containers/buildah"
+
+ # Runs within Cirrus's "community cluster"
+ container:
+ image: docker.io/library/golang:latest
+ cpu: 1
+ memory: 1
+
+ timeout_in: 5m
+
+ vendor_script:
+ - 'make vendor'
+ - './hack/tree_status.sh'
+
+
+# Confirm cross-compile ALL architectures on a Mac OS-X VM.
+cross_build_task:
+ name: "Cross Compile"
+ alias: cross_build
+ only_if: >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+
+ osx_instance:
+ image: ghcr.io/cirruslabs/macos-ventura-base:latest
+
+ script:
+ - brew update
+ - brew install go
+ - brew install go-md2man
+ - brew install gpgme
+ - go version
+ - make cross CGO_ENABLED=0
+
+ binary_artifacts:
+ path: ./bin/*
+
+
+unit_task:
+ name: 'Unit tests w/ $STORAGE_DRIVER'
+ alias: unit
+ only_if: &not_build_docs >-
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
+ $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
+ depends_on: &smoke_vendor_cross
+ - smoke
+ - vendor
+ - cross_build
+
+ timeout_in: 1h
+
+ matrix:
+ - env:
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ STORAGE_DRIVER: 'overlay'
+
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
+ unit_test_script: '${SCRIPT_BASE}/test.sh unit |& ${_TIMESTAMP}'
+
+ binary_artifacts:
+ path: ./bin/*
+
+
+conformance_task:
+ name: 'Build Conformance w/ $STORAGE_DRIVER'
+ alias: conformance
+ only_if: *not_build_docs
+ depends_on: *smoke_vendor_cross
+
+ gce_instance:
+ image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
+
+ timeout_in: 65m
+
+ matrix:
+ - env:
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ STORAGE_DRIVER: 'overlay'
+
+ setup_script: '${SCRIPT_BASE}/setup.sh conformance |& ${_TIMESTAMP}'
+ conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}'
+
+
+integration_task:
+ name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
+ alias: integration
+ only_if: *not_build_docs
+ depends_on: *smoke_vendor_cross
+
+ matrix:
+ # VFS
+ - env:
+ DISTRO_NV: "${FEDORA_NAME}"
+ IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ DISTRO_NV: "${PRIOR_FEDORA_NAME}"
+ IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ - env:
+ DISTRO_NV: "${DEBIAN_NAME}"
+ IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'vfs'
+ # OVERLAY
+ - env:
+ DISTRO_NV: "${FEDORA_NAME}"
+ IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ - env:
+ DISTRO_NV: "${PRIOR_FEDORA_NAME}"
+ IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ - env:
+ DISTRO_NV: "${DEBIAN_NAME}"
+ IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+
+ gce_instance:
+ image_name: "$IMAGE_NAME"
+
+ # Separate scripts for separate outputs, makes debugging easier.
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
+ integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}'
+
+ binary_artifacts:
+ path: ./bin/*
+
+ always: &standardlogs
+ audit_log_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh audit'
+ df_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh df'
+ journal_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh journal'
+ podman_system_info_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh podman'
+ buildah_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh buildah_version'
+ buildah_info_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh buildah_info'
+ package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages'
+ golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
+
+integration_rootless_task:
+ name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
+ alias: integration_rootless
+ only_if: *not_build_docs
+ depends_on: *smoke_vendor_cross
+
+ matrix:
+ # Running rootless tests on overlay
+ # OVERLAY
+ - env:
+ DISTRO_NV: "${FEDORA_NAME}"
+ IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ PRIV_NAME: rootless
+ - env:
+ DISTRO_NV: "${PRIOR_FEDORA_NAME}"
+ IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ PRIV_NAME: rootless
+ - env:
+ DISTRO_NV: "${DEBIAN_NAME}"
+ IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
+ STORAGE_DRIVER: 'overlay'
+ PRIV_NAME: rootless
+
+ gce_instance:
+ image_name: "$IMAGE_NAME"
+
+ # Separate scripts for separate outputs, makes debugging easier.
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
+ integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}'
+
+ binary_artifacts:
+ path: ./bin/*
+
+ always:
+ <<: *standardlogs
+
+in_podman_task:
+ name: "Containerized Integration"
+ alias: in_podman
+ only_if: *not_build_docs
+ depends_on: *smoke_vendor_cross
+
+ env:
+ # This is key, cause the scripts to re-execute themselves inside a container.
+ IN_PODMAN: 'true'
+ BUILDAH_ISOLATION: 'chroot'
+ STORAGE_DRIVER: 'vfs'
+
+ # Separate scripts for separate outputs, makes debugging easier.
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
+ integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}'
+
+ binary_artifacts:
+ path: ./bin/*
+
+ always:
+ <<: *standardlogs
+
+
+# Status aggregator for all tests. This task simply ensures a defined
+# set of tasks all passed, and allows confirming that based on the status
+# of this task.
+success_task:
+ name: "Total Success"
+ alias: success
+
+ depends_on:
+ - meta
+ - smoke
+ - unit
+ - conformance
+ - vendor
+ - cross_build
+ - integration
+ - in_podman
+
+ container:
+ image: "quay.io/libpod/alpine:latest"
+ cpu: 1
+ memory: 1
+
+ env:
+ CIRRUS_SHELL: direct # execute command directly
+
+ clone_script: mkdir -p $CIRRUS_WORKING_DIR
+ script: /bin/true
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..a458262
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,71 @@
+<!--
+If you are reporting a new issue, make sure that we do not have any duplicates
+already open. You can ensure this by searching the issue list for this
+repository. If there is a duplicate, please close your issue and add a comment
+to the existing issue instead.
+
+If you suspect your issue is a bug, please edit your issue description to
+include the BUG REPORT INFORMATION shown below. If you fail to provide this
+information within 7 days, we cannot debug your issue and will close it. We
+will, however, reopen it if you later provide the information.
+
+---------------------------------------------------
+BUG REPORT INFORMATION
+---------------------------------------------------
+Use the commands below to provide key information from your environment:
+You do NOT have to include this information if this is a FEATURE REQUEST
+-->
+
+**Description**
+
+<!--
+Briefly describe the problem you are having in a few paragraphs.
+-->
+
+**Steps to reproduce the issue:**
+1.
+2.
+3.
+
+
+**Describe the results you received:**
+
+
+**Describe the results you expected:**
+
+
+**Output of `rpm -q buildah` or `apt list buildah`:**
+
+```
+(paste your output here)
+```
+
+**Output of `buildah version`:**
+
+```
+(paste your output here)
+```
+
+**Output of `podman version` if reporting a `podman build` issue:**
+
+```
+(paste your output here)
+```
+
+**Output of `cat /etc/*release`:**
+
+```
+(paste your output here)
+```
+
+**Output of `uname -a`:**
+
+```
+(paste your output here)
+```
+
+**Output of `cat /etc/containers/storage.conf`:**
+
+```
+(paste your output here)
+```
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..25fd64e
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,66 @@
+<!--
+Thanks for sending a pull request!
+
+Please make sure you've read and understood our contributing guidelines
+(https://github.com/containers/buildah/blob/main/CONTRIBUTING.md) as well as ensuring
+that all your commits are signed with `git commit -s`.
+-->
+
+#### What type of PR is this?
+
+<!--
+Please label this pull request according to what type of issue you are
+addressing, especially if this is a release targeted pull request.
+
+Uncomment only one `/kind <>` line, hit enter to put that in a new line, and
+remove leading whitespace from that line:
+-->
+
+> /kind api-change
+> /kind bug
+> /kind cleanup
+> /kind deprecation
+> /kind design
+> /kind documentation
+> /kind failing-test
+> /kind feature
+> /kind flake
+> /kind other
+
+#### What this PR does / why we need it:
+
+#### How to verify it
+
+#### Which issue(s) this PR fixes:
+
+<!--
+Automatically closes linked issue when PR is merged.
+Uncomment the following comment block and include the issue
+number or None on one line.
+Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`, or `None`.
+-->
+
+<!--
+Fixes #
+or
+None
+-->
+
+#### Special notes for your reviewer:
+
+#### Does this PR introduce a user-facing change?
+
+<!--
+If no, just write `None` in the release-note block below. If yes, a release note
+is required: Enter your extended release note in the block below. If the PR
+requires additional action from users switching to the new release, include the
+string "action required".
+
+For more information on release notes please follow the kubernetes model:
+https://git.k8s.io/community/contributors/guide/release-notes.md
+-->
+
+```release-note
+
+```
+
diff --git a/.github/renovate.json5 b/.github/renovate.json5
new file mode 100644
index 0000000..64d26e3
--- /dev/null
+++ b/.github/renovate.json5
@@ -0,0 +1,63 @@
+/*
+ Renovate is a service similar to GitHub Dependabot, but with
+ (fantastically) more configuration options. So many options
+ in fact, if you're new I recommend glossing over this cheat-sheet
+ prior to the official documentation:
+
+ https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
+
+ Configuration Update/Change Procedure:
+ 1. Make changes
+ 2. Manually validate changes (from repo-root):
+
+ podman run -it \
+ -v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
+ docker.io/renovate/renovate:latest \
+ renovate-config-validator
+ 3. Commit.
+
+ Configuration Reference:
+ https://docs.renovatebot.com/configuration-options/
+
+ Monitoring Dashboard:
+ https://app.renovatebot.com/dashboard#github/containers
+
+ Note: The Renovate bot will create/manage it's business on
+ branches named 'renovate/*'. Otherwise, and by
+ default, the only the copy of this file that matters
+ is the one on the `main` branch. No other branches
+ will be monitored or touched in any way.
+*/
+
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+
+ /*************************************************
+ ****** Global/general configuration options *****
+ *************************************************/
+
+ // Re-use predefined sets of configuration options to DRY
+ "extends": [
+ // https://github.com/containers/automation/blob/main/renovate/defaults.json5
+ "github>containers/automation//renovate/defaults.json5"
+ ],
+
+ // Permit automatic rebasing when base-branch changes by more than
+ // one commit.
+ "rebaseWhen": "behind-base-branch",
+
+ /*************************************************
+ *** Repository-specific configuration options ***
+ *************************************************/
+
+ // Don't leave dep. update. PRs "hanging", assign them to people.
+ "assignees": ["containers/buildah-maintainers"],
+
+ "ignorePaths": [
+ "**/vendor/**",
+ "**/docs/**",
+ "**/examples/**",
+ "**/tests/**"
+ ],
+
+}
diff --git a/.github/workflows/check_cirrus_cron.yml b/.github/workflows/check_cirrus_cron.yml
new file mode 100644
index 0000000..7dcd3f3
--- /dev/null
+++ b/.github/workflows/check_cirrus_cron.yml
@@ -0,0 +1,20 @@
+---
+
+# See also:
+# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
+
+on:
+ # Note: This only applies to the default branch.
+ schedule:
+ # N/B: This should correspond to a period slightly after
+ # the last job finishes running. See job defs. at:
+ # https://cirrus-ci.com/settings/repository/6706677464432640
+ - cron: '03 03 * * 1-5'
+ # Debug: Allow triggering job manually in github-actions WebUI
+ workflow_dispatch: {}
+
+jobs:
+ # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+ call_cron_failures:
+ uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
+ secrets: inherit
diff --git a/.github/workflows/discussion_lock.yml b/.github/workflows/discussion_lock.yml
new file mode 100644
index 0000000..a03b18a
--- /dev/null
+++ b/.github/workflows/discussion_lock.yml
@@ -0,0 +1,20 @@
+---
+
+# See also:
+# https://github.com/containers/podman/blob/main/.github/workflows/discussion_lock.yml
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+ # Debug: Allow triggering job manually in github-actions WebUI
+ workflow_dispatch: {}
+
+jobs:
+ # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+ closed_issue_discussion_lock:
+ uses: containers/podman/.github/workflows/discussion_lock.yml@main
+ secrets: inherit
+ permissions:
+ contents: read
+ issues: write
+ pull-requests: write
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
new file mode 100644
index 0000000..b3e9b34
--- /dev/null
+++ b/.github/workflows/pr.yml
@@ -0,0 +1,23 @@
+name: validate
+on:
+ pull_request:
+
+jobs:
+ commit:
+ runs-on: ubuntu-22.04
+ # Only check commits on pull requests.
+ if: github.event_name == 'pull_request'
+ steps:
+ - name: get pr commits
+ id: 'get-pr-commits'
+ uses: tim-actions/get-pr-commits@3efc1387ead42029a0d488ab98f24b7452dc3cde # v1.3.0
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - if: contains(github.head_ref, 'renovate/') != true
+ name: check subject line length
+ uses: tim-actions/commit-message-checker-with-regex@094fc16ff83d04e2ec73edb5eaf6aa267db33791 # v0.3.2
+ with:
+ commits: ${{ steps.get-pr-commits.outputs.commits }}
+ pattern: '^.{0,72}(\n.*)*$'
+ error: 'Subject too long (max 72)'
diff --git a/.github/workflows/rerun_cirrus_cron.yml b/.github/workflows/rerun_cirrus_cron.yml
new file mode 100644
index 0000000..d3ee64c
--- /dev/null
+++ b/.github/workflows/rerun_cirrus_cron.yml
@@ -0,0 +1,19 @@
+---
+
+# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
+
+on:
+ # Note: This only applies to the default branch.
+ schedule:
+ # N/B: This should correspond to a period slightly after
+ # the last job finishes running. See job defs. at:
+ # https://cirrus-ci.com/settings/repository/6706677464432640
+ - cron: '01 01 * * 1-5'
+ # Debug: Allow triggering job manually in github-actions WebUI
+ workflow_dispatch: {}
+
+jobs:
+ # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+ call_cron_rerun:
+ uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
+ secrets: inherit
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000..14f36fc
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,31 @@
+name: Mark stale issues and pull requests
+
+# Please refer to https://github.com/actions/stale/blob/master/action.yml
+# to see all config knobs of the stale action.
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions:
+ contents: read
+
+jobs:
+ stale:
+
+ permissions:
+ issues: write # for actions/stale to close stale issues
+ pull-requests: write # for actions/stale to close stale PRs
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v8
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
+ stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
+ stale-issue-label: 'stale-issue'
+ stale-pr-label: 'stale-pr'
+ days-before-stale: 30
+ days-before-close: 365
+ remove-stale-when-updated: true
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e8dee79
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,14 @@
+docs/buildah*.1
+docs/*.5
+/bin
+/buildah
+/imgtype
+/build/
+/tests/tools/build
+Dockerfile*
+!/tests/bud/*/Dockerfile*
+!/tests/conformance/**/Dockerfile*
+*.swp
+/result/
+internal/mkcw/embed/entrypoint.o
+internal/mkcw/embed/entrypoint
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..af0b10c
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,13 @@
+---
+run:
+ build-tags:
+ - apparmor
+ - seccomp
+ - selinux
+ # Don't exceed number of threads available when running under CI
+ concurrency: 4
+linters:
+ enable:
+ - revive
+ - unconvert
+ - unparam
diff --git a/.packit.yaml b/.packit.yaml
new file mode 100644
index 0000000..76b297c
--- /dev/null
+++ b/.packit.yaml
@@ -0,0 +1,54 @@
+---
+# See the documentation for more information:
+# https://packit.dev/docs/configuration/
+
+specfile_path: rpm/buildah.spec
+upstream_tag_template: v{version}
+
+srpm_build_deps:
+ - make
+
+jobs:
+ - job: copr_build
+ trigger: pull_request
+ notifications:
+ failure_comment:
+ message: "Ephemeral COPR build failed. @containers/packit-build please check."
+ enable_net: true
+ targets:
+ - fedora-all-x86_64
+ - fedora-all-aarch64
+ - fedora-eln-x86_64
+ - fedora-eln-aarch64
+ - centos-stream+epel-next-8-x86_64
+ - centos-stream+epel-next-8-aarch64
+ - centos-stream+epel-next-9-x86_64
+ - centos-stream+epel-next-9-aarch64
+ additional_repos:
+ - "copr://rhcontainerbot/podman-next"
+
+ # Run on commit to main branch
+ - job: copr_build
+ trigger: commit
+ notifications:
+ failure_comment:
+ message: "podman-next COPR build failed. @containers/packit-build please check."
+ owner: rhcontainerbot
+ project: podman-next
+ enable_net: true
+
+ - job: propose_downstream
+ trigger: release
+ update_release: false
+ dist_git_branches:
+ - fedora-all
+
+ - job: koji_build
+ trigger: commit
+ dist_git_branches:
+ - fedora-all
+
+ - job: bodhi_update
+ trigger: commit
+ dist_git_branches:
+ - fedora-branched # rawhide updates are created automatically
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..4958128
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,3071 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Changelog
+
+## v1.33.5 (2024-02-01)
+
+ Bump c/common to v0.57.4, moby/buildkit v0.5.12
+
+## v1.33.4 (2024-01-30)
+
+ Bump c/image to v5.29.2 and c/common to v0.57.3
+
+## v1.33.3 (2024-01-18)
+
+ Bump c/common to 0.57.2 and c/image to 5.29.1
+
+## v1.33.2 (2023-11-22)
+
+ Update minimum to golang 1.20
+ fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0
+ fix(deps): update module github.com/moby/buildkit to v0.12.3
+ Bump to v1.33.2-dev
+
+## v1.33.1 (2023-11-18)
+
+ fix(deps): update module github.com/moby/buildkit to v0.11.4 [security]
+ test,heredoc: use fedora instead of docker.io/library/python:latest
+ Bump to v1.33.1-dev
+
+## v1.33.0 (2023-11-17)
+
+ Never omit layers for emptyLayer instructions when squashing/cwing
+ Add OverrideChanges and OverrideConfig to CommitOptions
+ buildah: add heredoc support for RUN, COPY and ADD
+ vendor: bump imagebuilder to v1.2.6-0.20231110114814-35a50d57f722
+ conformance tests: archive the context directory as 0:0 (#5171)
+ blobcacheinfo,test: blobs must be resued when pushing across registry
+ Bump c/storage v1.51.0, c/image v5.29.0, c/common v0.57.0
+ pkg/util.MirrorToTempFileIfPathIsDescriptor(): don't leak an fd
+ StageExecutor.Execute: force a commit for --unsetenv, too
+ Increase a copier+chroot test timeout
+ Add support for --compat-auth-file in login/logout
+ Update existing tests for error message change
+ Update c/image and c/common to latest
+ fix(deps): update module github.com/containerd/containerd to v1.7.9
+ build: downgrade to go 1.20
+ Add godoc for pkg/parse.GetTempDir
+ conformance tests: use go-dockerclient for BuildKit builds
+ Make TEE types case-insensitive
+ fix(deps): update module golang.org/x/crypto to v0.15.0
+ Tweak some help descriptions
+ Stop using DefaultNetworkSysctl and use containers.conf only
+ Implement ADD checksum flag #5135
+ vendor of openshift/imagebuilder #5135
+ Pass secrets from the host down to internal podman containers
+ Update cirrus and version of golang
+ image: replace GetStoreImage with ResolveReference
+ vendor: bump c/image to 373c52a9466f
+ pkg/parse.Platform(): minor simplification
+ createConfigsAndManifests: clear history before cw-specific logic
+ Use a constant definition instead of "scratch"
+ conformance: use require.NoErrorf() more
+ fix(deps): update module golang.org/x/term to v0.14.0
+ fix(deps): update module golang.org/x/sync to v0.5.0
+ fix(deps): update module github.com/spf13/cobra to v1.8.0
+ fix(deps): update module golang.org/x/sys to v0.14.0
+ fix(deps): update github.com/containers/common digest to 8354404
+ fix(deps): update module github.com/opencontainers/runc to v1.1.10
+ fix(deps): update github.com/containers/luksy digest to b5a7f79
+ Log the platform for build errors during multi-platform builds
+ Use mask definitions from containers/common
+ Vendor in latest containers/common
+ fix(deps): update module github.com/containerd/containerd to v1.7.8
+ fix(deps): update module go.etcd.io/bbolt to v1.3.8
+ container.conf: support attributed string slices
+ fix(deps): update module sigs.k8s.io/yaml to v1.4.0
+ Use cutil.StringInSlice rather then contains
+ Add --no-hostname option to buildah containers
+ vendor c/common: appendable containers.conf strings, Part 1
+ fix(deps): update module github.com/onsi/gomega to v1.28.1
+ chroot.setupChrootBindMounts: pay more attention to flags
+ chore(deps): update dependency containers/automation_images to v20231004
+ Vendor containers/common
+ chore(deps): update module golang.org/x/net to v0.17.0 [security]
+ run: use internal.GetTempDir with os.MkdirTemp
+ fix(deps): update module github.com/containerd/containerd to v1.7.7
+ imagebuildah,multi-stage: do not remove base images
+ gitignore: add mkcw binary
+ mkcw: remove entrypoint binaries
+ fix(deps): update module golang.org/x/crypto to v0.14.0
+ fix(deps): update module golang.org/x/sys to v0.13.0
+ fix(deps): update module golang.org/x/sync to v0.4.0
+ Update some comments related to confidential workload
+ Use the parent's image ID in the config that we pass to imagebuilder
+ fix(deps): update github.com/containers/common digest to 8892536
+ fix(deps): update github.com/containers/luksy digest to 6df88cb
+ bug: Ensure the mount type is always BindMount by default
+ Protocol can be specified with --port. Ex. --port 514/udp
+ fix(deps): update module github.com/onsi/gomega to v1.28.0
+ build,config: add support for --unsetlabel
+ tests/bud: add tests
+ [CI:BUILD] Packit: tag @containers/packit-build on copr build failures
+ stage_executor: allow images without layers
+ vendor of containers/common
+ Removing selinux_tag.sh as no longer needed after 580356f [NO NEW TESTS NEEDED]
+ add/copy: make sure we handle relative path names correctly
+ fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc5
+ Bump to v1.33.0-dev
+ imagebuildah: consider ignorefile with --build-context
+
+## v1.32.0 (2023-09-14)
+
+ GetTmpDir is not using ImageCopyTmpdir correctly
+ Run codespell on code
+ Bump vendor containers/(common, storage, image)
+ Cirrus: Remove multi-arch buildah image builds
+ fix(deps): update module github.com/containerd/containerd to v1.7.6
+ Split GetTempDir from internal/util
+ Move most of internal/parse to internal/volumes
+ copier: remove libimage dependency via util package
+ Add some docs for `build --cw`, `commit --cw`, and `mkcw`
+ Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build`
+ Make sure that pathnames picked up from the environment are absolute
+ fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4
+ fix(deps): update module github.com/docker/docker to v24.0.6+incompatible
+ Don't try to look up names when committing images
+ fix(deps): update module golang.org/x/crypto to v0.13.0
+ docs: use valid github repo
+ fix(deps): update module golang.org/x/sys to v0.12.0
+ vendor containers/common@12405381ff45
+ push: --force-compression should be true with --compression-format
+ Update module github.com/containerd/containerd to v1.7.5
+ [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2
+ docs: add reference to oci-hooks
+ Support passing of ULimits as -1 to mean max
+ GHA: Attempt to fix discussion_lock workflow
+ Fixing the owner of the storage.conf.
+ pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD
+ Use buildah repo rather then podman repo
+ GHA: Closed issue/PR comment-lock test
+ fix(deps): update module github.com/containers/storage to v1.49.0
+ chore(deps): update dependency containers/automation_images to v20230816
+ Replace troff code with markdown in buildah-{copy,add}.1.md
+ [CI:BUILD] rpm: spdx compatible license field
+ executor: build-arg warnings must honor global args
+ fix(deps): update module github.com/containers/ocicrypt to v1.1.8
+ chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64`
+ make,cross: restore loong64
+ Clear CommonBuildOpts when loading Builder status
+ buildah/push/manifest-push: add support for --force-compression
+ vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9
+ chore(deps): update dependency containers/automation_images to v20230809
+ [CI:BUILD] RPM: fix buildtags
+ fix(deps): update module github.com/opencontainers/runc to v1.1.9
+ chore(deps): update dependency ubuntu to v22
+ chore(deps): update dependency containers/automation_images to v20230807
+ [CI:BUILD] Packit: add fedora-eln targets
+ [CI:BUILD] RPM: build docs with vendored go-md2man
+ packit: Build PRs into default packit COPRs
+ Update install.md
+ Update install.md changes current Debian stable version name
+ fix(deps): update module golang.org/x/term to v0.11.0
+ fix(deps): update module golang.org/x/crypto to v0.12.0
+ tests: fix layer-label tests
+ buildah: add --layer-label for setting labels on layers
+ Cirrus: container/rootless env. var. passthrough
+ Cirrus: Remove duplicate env. var. definitions
+ fix(deps): update github.com/containers/storage digest to c3da76f
+ Add a missing .Close() call on an ImageSource
+ Create only a reference when that's all we need
+ Add a missing .Close() call on an ImageDestination
+ CI:BUILD] RPM: define gobuild macro for rhel/centos stream
+ manifest/push: add support for --add-compression
+ manifest/inspect: add support for tls-verify and authfile
+ vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a
+ vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac
+ fix(deps): update module github.com/containerd/containerd to v1.7.3
+ fix(deps): update module github.com/onsi/gomega to v1.27.10
+ fix(deps): update module github.com/docker/docker to v24.0.5+incompatible
+ fix(deps): update module github.com/containers/image/v5 to v5.26.1
+ fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0
+ Update vendor of containers/(storage,image,common)
+ fix(deps): update module github.com/opencontainers/runc to v1.1.8
+ [CI:BUILD] Packit: remove pre-sync action
+ fix(deps): update module github.com/containers/common to v0.55.2
+ [CI:BUILD] Packit: downstream task script needs GOPATH
+ Vendor in containers/(common, image, storage)
+ fix(deps): update module golang.org/x/term to v0.10.0
+ [CI:BUILD] Packit: fix pre-sync action for downstream tasks
+ contrib/buildahimage: set config correctly for rootless build user
+ fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4
+ Bump to v1.32.0-dev
+ Update debian install instructions
+ pkg/overlay: add limited support for FreeBSD
+
+## v1.31.0 (2023-06-30)
+
+ Bump c/common to 0.55.1 and c/image to 5.26.1
+ Bump c/image to 5.26.0 and c/common to 0.54.0
+ vendor: update c/{common,image,storage} to latest
+ chore: pkg imported more than once
+ buildah: add pasta(1) support
+ use slirp4netns package from c/common
+ update c/common to latest
+ add hostname to /etc/hosts when running with host network
+ vendor: update c/common to latest
+ [CI:BUILD] Packit: add jobs for downstream Fedora package builds
+ fix(deps): update module golang.org/x/sync to v0.3.0
+ fix(deps): update module golang.org/x/crypto to v0.10.0
+ Add smoke tests for encryption CLI helpers
+ fix(deps): update module golang.org/x/term to v0.9.0
+ fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.3
+ Remove device mapper support
+ Remove use of deprecated tar.TypeRegA
+ Update tooling to support newer golangci-lint
+ Make cli.EncryptConfig,DecryptConfig, GetFormat public
+ Don't decrypt images by default
+ fix(deps): update module github.com/onsi/gomega to v1.27.8
+ fix(deps): update github.com/containers/storage digest to 3f3fb2f
+ Renovate: Don't touch fragile test stuffs
+ [CI:DOCS] Update comment to remove ambiguity
+ fix(deps): update github.com/containers/image/v5 digest to abe5133
+ fix(deps): update module github.com/sirupsen/logrus to v1.9.3
+ fix(deps): update module github.com/containerd/containerd to v1.7.2
+ Explicitly ref. quay images for CI
+ At startup, log the effective capabilities for debugging
+ parse: use GetTempDir from internal utils
+ GetTmpDir: honor image_copy_tmp_dir from containers.conf
+ docs/Makefile: don't show sed invocations
+ CI: Support testing w/ podman-next COPR packages
+ intermediate-images inherit-label test: make it debuggable
+ fix(deps): update github.com/containers/common digest to 462ccdd
+ Add a warning to `--secret` docs
+ vendor: bump c/storage to v1.46.2-0.20230526114421-55ee2d19292f
+ executor: apply label to only final stage
+ remove registry.centos.org
+ Go back to setting SysProcAttr.Pdeathsig for child processes
+ Fix auth.json path (validated on Fedora 38) wq Signed-off-by: Andreas Mack <andreas.mack@gmail.com>
+ fix(deps): update module github.com/stretchr/testify to v1.8.3
+ CI: fix test broken by renovatebot
+ chore(deps): update quay.io/libpod/testimage docker tag to v20221018
+ fix(deps): update module github.com/onsi/gomega to v1.27.7
+ test: use debian instead of docker.io/library/debian:testing-slim
+ vendor: bump logrus to 1.9.2
+ [skip-ci] Update tim-actions/get-pr-commits action to v1.3.0
+ Revert "Proof of concept: nightly dependency treadmill"
+ fix(deps): update module github.com/sirupsen/logrus to v1.9.1
+ vendor in containers/(common,storage,image)
+ fix(deps): update module github.com/docker/distribution to v2.8.2+incompatible
+ run: drop Pdeathsig
+ chroot: lock thread before setPdeathsig
+ tests: add a case for required=false
+ fix(deps): update module github.com/openshift/imagebuilder to v1.2.5
+ build: validate volumes on backend
+ secret: accept required flag w/o value
+ fix(deps): update module github.com/containerd/containerd to v1.7.1
+ fix(deps): update module golang.org/x/crypto to v0.9.0
+ Update the demos README file to fix minor typos
+ fix(deps): update module golang.org/x/sync to v0.2.0
+ fix(deps): update module golang.org/x/term to v0.8.0
+ manifest, push: use source as destination if not specified
+ run,mount: remove path only if they didnt pre-exist
+ Cirrus: Fix meta task failing to find commit
+ parse: filter edge-case for podman-remote
+ fix(deps): update module github.com/opencontainers/runc to v1.1.7
+ fix(deps): update module github.com/docker/docker to v23.0.5+incompatible
+ build: --platform must accept only arch
+ fix(deps): update module github.com/containers/common to v0.53.0
+ makefile: increase conformance timeout
+ Cap suffixDigitsModulo to a 9-digits suffix.
+ Rename conflict to suffixDigitsModulo
+ fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.2
+ fix(deps): update module github.com/opencontainers/runc to v1.1.6
+ chore(deps): update centos docker tag to v8
+ Clarify the need for qemu-user-static package
+ chore(deps): update quay.io/centos/centos docker tag to v8
+ Renovate: Ensure test/tools/go.mod is managed
+ Revert "buildah image should not enable fuse-overlayfs for rootful mode"
+ Bump to v1.31.0-dev
+ parse: add support for relabel bind mount option
+
+## v1.30.0 (2023-04-06)
+
+ fix(deps): update module github.com/opencontainers/runc to v1.1.5
+ fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.7
+ buildah image should not enable fuse-overlayfs for rootful mode
+ stage_executor: inline network add default string
+ fix(deps): update module github.com/containers/common to v0.51.2
+ chore(deps): update dependency containers/automation_images to v20230330
+ fix(deps): update module github.com/docker/docker to v23.0.2+incompatible
+ chore(deps): update dependency containers/automation_images to v20230320
+ fix(deps): update module github.com/onsi/gomega to v1.27.6
+ fix(deps): update github.com/opencontainers/runtime-tools digest to e931285
+ [skip-ci] Update actions/stale action to v8
+ test: don't allow to override io.buildah.version
+ executor: only apply label on the final stage
+ Update docs/buildah-build.1.md
+ update build instruction for Ubuntu
+ code review
+ build: accept arguments from file with --build-arg-file
+ run_linux: Update heuristic for mounting /sys
+ [CI:BUILD] Packit: Enable Copr builds on PR and commit to main
+ fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.6
+ Update to Go 1.18
+ Disable dependabot in favor of renovate
+ chore(deps): update dependency containers/automation_images to v20230314
+ Fix requiring tests on Makefile changes
+ Vendor in latest containers/(storage, common, image)
+ imagebuildah: set len(short_image_id) to 12
+ Re-enable conformance tests
+ Skip conformance test failures with Docker 23.0.1
+ Cirrus: Replace Ubuntu -> Debian SID
+ run: add support for inline --network in RUN stmt
+ vendor: bump imagebuilder to a3c3f8358ca31b1e4daa6
+ stage_executor: attempt to push cache only when cacheKey is valid
+ Add "ifnewer" as option in help message for pull command
+ build: document behaviour of buildah's distributed cache
+ fix(deps): update module golang.org/x/term to v0.6.0
+ Add default list of capabilities required to run buildah in a container
+ executor,copy: honor default ARG value while eval stage
+ sshagent: use ExtendedAgent instead of Agent
+ tests/bud: remove unwated test
+ executor: do not warn on builtin default args
+ executor: don't warn about unused TARGETARCH,TARGETOS,TARGETPLATFORM
+ Fix tutorial for rootless mode
+ Vendor in latest containers/(common, storage, image)
+ Ignore the base image's base image annotations
+ fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.5
+ build(deps): bump github.com/containers/storage from 1.45.3 to 1.45.4
+ Vendor in latest containers/common
+ docs/tutorials/04: add defaults for Run()
+ imagebuildah.StageExecutor: suppress bogus "Pushing cache []:..."
+ executor: also add stage with no children to cleanupStages
+ [CI:BUILD] copr: fix el8 builds
+ Fix documentation on which Capabilities are allowed by default
+ Skip subject-length validation for renovate PRs
+ Temporarily hard-skip bud-multiple-platform-values test
+ fix(deps): update github.com/openshift/imagebuilder digest to 86828bf
+ build(deps): bump github.com/containerd/containerd from 1.6.16 to 1.6.17
+ build(deps): bump tim-actions/get-pr-commits from 1.1.0 to 1.2.0
+ build(deps): bump github.com/containers/image/v5 from 5.24.0 to 5.24.1
+ [skip-ci] Update tim-actions/get-pr-commits digest to 55b867b
+ build(deps): bump github.com/opencontainers/selinux
+ build(deps): bump golang.org/x/crypto from 0.5.0 to 0.6.0
+ Add renovate configuration
+ Run codespell on codebase
+ login: support interspersed args for password
+ conformance: use scratch for minimal test
+ pkg/parse: expose public CleanCacheMount API
+ build(deps): bump go.etcd.io/bbolt from 1.3.6 to 1.3.7
+ build(deps): bump github.com/containerd/containerd from 1.6.15 to 1.6.16
+ docs: specify order preference for FROM
+ Bump to v1.30.0-dev
+
+## v1.29.0 (2023-01-25)
+
+ tests: improve build-with-network-test
+ Bump c/storagev1.45.3, c/imagev5.24.0, c/commonv0.51.0
+ build(deps): bump github.com/onsi/gomega from 1.25.0 to 1.26.0
+ Flake 3710 has been closed. Reenable the test.
+ [CI:DOCS] Fix two diversity issues in a tutorial
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.9.2 to 1.9.3
+ vendor in latests containers/(storage, common, image)
+ fix bud-multiple-platform-with-base-as-default-arg flake
+ stage_executor: while mounting stages use freshly built stage
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.9.0 to 1.9.2
+ build(deps): bump github.com/onsi/gomega from 1.24.2 to 1.25.0
+ vendor in latests containers/(storage, common, image, ocicyrpt)
+ [Itests: change the runtime-flag test for crun
+ [CI:DOCS] README: drop sudo
+ Fix multi-arch manifest-list build timeouts
+ Cirrus: Update VM Images
+ bud: Consolidate multiple synthetic LABEL instructions
+ build, secret: allow realtive mountpoints wrt to work dir
+ fixed squash documentation
+ build(deps): bump github.com/containerd/containerd from 1.6.14 to 1.6.15
+ Correct minor comment
+ Vendor in latest containers/(common, image, storage)
+ system tests: remove unhelpful assertions
+ buildah: add prune command and expose CleanCacheMount API
+ vendor: bump c/storage to a747b27
+ Add support for --group-add to buildah from
+ build(deps): bump actions/stale from 6 to 7
+ Add documentation for buildah build --pull=missing
+ build(deps): bump github.com/containerd/containerd from 1.6.12 to 1.6.14
+ build(deps): bump github.com/docker/docker
+ parse: default ignorefile must not point to symlink outside context
+ buildah: wrap network setup errors
+ build, mount: allow realtive mountpoints wrt to work dir
+ Update to F37 CI VM Images, re-enable prior-fedora
+ Update vendor or containers/(image, storage, common)
+ build(deps): bump golang.org/x/crypto from 0.3.0 to 0.4.0
+ Update contact information
+ build(deps): bump golang.org/x/term from 0.2.0 to 0.3.0
+ Replace io/ioutil calls with os calls
+ [skip-ci] GHA/Cirrus-cron: Fix execution order
+ Vendor in containers/common
+ build(deps): bump golang.org/x/sys from 0.2.0 to 0.3.0
+ remote-cache: support multiple sources and destinations
+ Update c/storage after https://github.com/containers/storage/pull/1436
+ util.SortMounts(): make the returned order more stable
+ version: Bump to 1.29.0-dev
+ [CI:BUILD] Cirrus: Migrate OSX task to M1
+ Update vendor of containers/(common, storage, image)
+ mount=type=cache: seperate cache parent on host for each user
+ Fix installation instructions for Gentoo Linux
+ build(deps): bump github.com/containerd/containerd from 1.6.9 to 1.6.10
+ GHA: Reuse both cirrus rerun and check workflows
+ Vendor in latest containers/(common,image,storage)
+ build(deps): bump github.com/onsi/gomega from 1.24.0 to 1.24.1
+ copier.Put(): clear up os/syscall mode bit confusion
+ build(deps): bump golang.org/x/sys from 0.1.0 to 0.2.0
+ Use TypeBind consistently to name bind/nullfs mounts
+ Add no-new-privileges flag
+ Update vendor of containers/(common, image, storage)
+ imagebuildah:build with --all-platforms must honor args for base images
+ codespell code
+ Expand args and env when using --all-platforms
+ build(deps): bump github.com/onsi/gomega from 1.23.0 to 1.24.0
+ GHA: Simplify Cirrus-Cron check slightly
+ Stop using ubi8
+ remove unnecessary (hence misleading) rmi
+ chroot: fix mounting of ro bind mounts
+ executor: honor default ARG value while eval base name
+ userns: add arbitrary steps/stage to --userns=auto test
+ Don't set allow.mount in the vnet jail on Freebsd
+ copier: Preserve file flags when copying archives on FreeBSD
+ Remove quiet flag, so that it works in podman-remote
+ test: fix preserve rootfs with --mount for podman-remote
+ test: fix prune logic for cache-from after adding content summary
+ vendor in latest containers/(storage, common, image)
+ Fix RUN --mount=type=bind,from=<stage> not preserving rootfs of stage
+ Define and use a safe, reliable test image
+ Fix word missing in Container Tools Guide
+ Makefile: Use $(MAKE) to start sub-makes in install.tools
+ imagebuildah: pull cache from remote repo after adding content summary
+ Makefile: Fix install on FreeBSD
+ Ensure the cache volume locks are unlocked on all paths
+ Vendor in latest containers/(common,storage)
+ Simplify the interface of GetCacheMount and getCacheMount
+ Fix cache locks with multiple mounts
+ Remove calls to Lockfile.Locked()
+ Maintain cache mount locks as lock objects instead of paths
+ test: cleaning cache must not clean lockfiles
+ run: honor lockfiles for multiple --mount instruction
+ mount,cache: lockfiles must not be part of users cache content
+ Update vendor containers/(common,image,storage)
+ [CI:BUILD] copr: buildah rpm should depend on containers-common-extra
+ pr-should-include-tests: allow specfile, golangci
+ build(deps): bump dawidd6/action-send-mail from 3.7.0 to 3.7.1
+ build(deps): bump github.com/docker/docker
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.8.3 to 1.9.0
+ Update vendor containers/(common,image,storage)
+ build(deps): bump actions/upload-artifact from 2 to 3
+ build(deps): bump actions/checkout from 2 to 3
+ build(deps): bump actions/stale from 1 to 6
+ build(deps): bump dawidd6/action-send-mail from 2.2.2 to 3.7.0
+ build(deps): bump tim-actions/get-pr-commits from 1.1.0 to 1.2.0
+ sshagent: LockOSThread before setting SocketLabel
+ Update tests for error message changes
+ Update c/image after https://github.com/containers/image/pull/1299
+ Fix ident for dependabot gha block
+ build(deps): bump github.com/containers/ocicrypt from 1.1.5 to 1.1.6
+ Fix man pages to match latest cobra settings
+ build(deps): bump github.com/spf13/cobra from 1.5.0 to 1.6.0
+ build(deps): bump github.com/onsi/gomega from 1.20.2 to 1.22.1
+ test: retrofit 'bud with undefined build arg directory'
+ imagebuildah: warnOnUnsetBuildArgs while processing stages from executor
+ Update contrib/buildahimage/Containerfile
+ Cirrus CI add flavor parameter
+ Correction - `FLAVOR` not `FLAVOUR`
+ Changed build argument from `RELEASE` to `FLAVOUR`
+ Combine buildahimage Containerfiles
+ bud.bats refactoring: $TEST_SCRATCH_DIR, part 2 of 2
+ bud.bats refactoring: $TEST_SCRATCH_DIR, part 1 of 2
+ System test cleanup: document, clarify, fix
+ test: removing unneeded/expensive COPY
+ test: warning behaviour for unset/set TARGETOS,TARGETARCH,TARGETPLATFORM
+ Bump to v1.28.1-dev
+
+## v1.28.0 (2022-09-30)
+
+ Update vendor containers/(common,image)
+ [CI:DOCS] Add quay-description update reminder
+ vendor: bump c/common to v0.49.2-0.20220929111928-2d1b45ae2423
+ build(deps): bump github.com/opencontainers/selinux
+ Vendor in latest containers/storage
+ Changing shell list operators from `;` to `&&`
+ Fix buildahimage container.conf permissions regression
+ Set sysctls from containers.conf
+ refactor: stop using Normalize directly from containerd package
+ config,builder: process variant while populating image spec
+ Proof of concept: nightly dependency treadmill
+ Run codespell on code
+ Check for unset build args after TARGET args
+ pkg/cli: improve completion test
+ vendor in latest containers/(common,storage,image)
+ copier: work around freebsd bug for "mkdir /"
+ vendor: update c/image
+ test: run in the host cgroup namespace
+ vendor: update c/storage
+ vendor: update c/common
+ cmd: check for user UID instead of privileges
+ run,build: conflict --isolation=chroot and --network
+ Fix broken dns test (from merge collision)
+ Fix stutters
+ Fix broken command completion
+ buildah bud --network=none should have no network
+ build: support --skip-unused-stages for multi-stage builds
+ Prevent use of --dns* options with --net=none
+ buildah: make --cache-ttl=0s equivalent to --no-cache
+ parse: make processing flags in --mount order agnostic
+ Minor test fix for podman-remote
+ build: honor <Containerfile>.containerignore as ignore file
+ Update install.md: Debian 11 (Bullseye) is stable
+ build(deps): bump github.com/docker/docker
+ Use constants from containers/common for finding seccomp.json
+ Don't call os.Exit(1) from manifest exist
+ manifest: add support for buildah manifest exists
+ Buildah should ignore /etc/crio/seccomp.json
+ chroot: Fix cross build break
+ chroot: Move isDevNull to run_common.go
+ chroot: Fix setRlimit build on FreeBSD
+ chroot: Move parseRLimits and setRlimits to run_common.go
+ chroot: Fix runUsingChrootExecMain on FreeBSD
+ chroot: Move runUsingChrootExecMain to run_common.go
+ chroot: Factor out Linux-specific unshare options from runUsingChroot
+ chroot: Move runUsingChroot to run_common.go
+ chroot: Move RunUsingChroot and runUsingChrootMain to run_common.go
+ chroot: Factor out /dev/ptmx pty implementation
+ chroot: Add FreeBSD support for run with chroot isolation
+ build(deps): bump github.com/docker/go-units from 0.4.0 to 0.5.0
+ Replace k8s.gcr.io/pause in tests with registry.k8s.io/pause
+ build(deps): bump github.com/onsi/gomega from 1.20.0 to 1.20.1
+ Cirrus: use image with fewer downloaded dependencies
+ build(deps): bump github.com/opencontainers/runc from 1.1.3 to 1.1.4
+ run: add container gid to additional groups
+ buildah: support for --retry and --retry-delay for push/pull failures
+ Makefile: always call $(GO) instead of `go`
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.8.2 to 1.8.3
+ test: use `T.TempDir` to create temporary test directory
+ mount,cache: enable SElinux shared content label option by default
+ commit: use race-free RemoveNames instead of SetNames
+ Drop util/util.Cause()
+ cmd/buildah: add "manifest create --amend"
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.8.1 to 1.8.2
+ docs: specify git protocol is not supported for github hosted repo
+ Scrub user and group names from layer diffs
+ build(deps): bump github.com/containerd/containerd from 1.6.6 to 1.6.8
+ version: bump to 1.28.0-dev
+
+## v1.27.0 (2022-08-01)
+
+ build: support filtering cache by duration using `--cache-ttl`.
+ build: support building from commit when using git repo as build context.
+ build: clean up git repos correctly when using subdirs.
+ build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`.
+ imagebuildah: optimize cache hits for `COPY` and `ADD` instructions.
+ build: support OCI hooks for ephemeral build containers.
+ build: add support for `--userns=auto`.
+ copier: add NoOverwriteNonDirDir option .
+ add initial support for building images using Buildah on FreeBSD.
+ multistage: this now skips the computing of unwanted stages to improve performance.
+ multiarch: support splitting build logs for `--platform` using `--logsplit`.
+ build: add support for building images where the base image has no history.
+ commit: allow disabling image history with `--omit-history`.
+ build: add support for renaming a device in rootless setups.
+ build: now supports additionalBuildContext in builds via the `--build-context` option.
+ build: `--output` produces artifacts even if the build container is not committed.
+ build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax.
+ build: now accepts a branch and a subdirectory when the build context is a git repository.
+ build: output now shows a progress bar while pushing and pulling images
+ build: now errors out if the path to Containerfile is a directory.
+ build: support building container images on environments that are rootless and without any valid login sessions.
+ fix: `--output` now generates artifacts even if the entire build is cached.
+ fix: `--output` generates artifacts only for the target stage in multi-stage builds.
+ fix,add: now fails on a bad HTTP response instead of writing to container
+ fix,squash: never use build cache when computing the last step of the last stage
+ fix,build,run: allow reusing secret more than once in different RUN steps
+ fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value.
+
+## v1.26.0 (2022-05-04)
+
+ imagebuildah,build: move deepcopy of args before we spawn goroutine
+ Vendor in containers/storage v1.40.2
+ buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated
+ help output: get more consistent about option usage text
+ Handle OS version and features flags
+ buildah build: --annotation and --label should remove values
+ buildah build: add a --env
+ buildah: deep copy options.Args before performing concurrent build/stage
+ test: inline platform and builtinargs behaviour
+ vendor: bump imagebuilder to master/009dbc6
+ build: automatically set correct TARGETPLATFORM where expected
+ build(deps): bump github.com/fsouza/go-dockerclient
+ Vendor in containers/(common, storage, image)
+ imagebuildah, executor: process arg variables while populating baseMap
+ buildkit: add support for custom build output with --output
+ Cirrus: Update CI VMs to F36
+ fix staticcheck linter warning for deprecated function
+ Fix docs build on FreeBSD
+ build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0
+ copier.unwrapError(): update for Go 1.16
+ copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit
+ copier.Put(): write to read-only directories
+ build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools
+ Rename $TESTSDIR (the plural one), step 4 of 3
+ Rename $TESTSDIR (the plural one), step 3 of 3
+ Rename $TESTSDIR (the plural one), step 2 of 3
+ Rename $TESTSDIR (the plural one), step 1 of 3
+ build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3
+ Ed's periodic test cleanup
+ using consistent lowercase 'invalid' word in returned err msg
+ Update vendor of containers/(common,storage,image)
+ use etchosts package from c/common
+ run: set actual hostname in /etc/hostname to match docker parity
+ update c/common to latest main
+ Update vendor of containers/(common,storage,image)
+ Stop littering
+ manifest-create: allow creating manifest list from local image
+ Update vendor of storage,common,image
+ Bump golang.org/x/crypto to 7b82a4e
+ Initialize network backend before first pull
+ oci spec: change special mount points for namespaces
+ tests/helpers.bash: assert handle corner cases correctly
+ buildah: actually use containers.conf settings
+ integration tests: learn to start a dummy registry
+ Fix error check to work on Podman
+ buildah build should accept at most one arg
+ tests: reduce concurrency for flaky bud-multiple-platform-no-run
+ vendor in latest containers/common,image,storage
+ manifest-add: allow override arch,variant while adding image
+ Remove a stray `\` from .containerenv
+ Vendor in latest opencontainers/selinux v1.10.1
+ build, commit: allow removing default identity labels
+ Create shorter names for containers based on image IDs
+ test: skip rootless on cgroupv2 in root env
+ fix hang when oci runtime fails
+ Set permissions for GitHub actions
+ copier test: use correct UID/GID in test archives
+ run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM
+ Bump back to v1.26.0-dev
+ build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1
+ Included the URL to check the SHA
+
+## v1.25.1 (2022-03-30)
+
+ buildah: create WORKDIR with USER permissions
+ vendor: update github.com/openshift/imagebuilder
+ copier: attempt to open the dir before adding it
+ Updated dependabot to get updates for GitHub actions.
+ Switch most calls to filepath.Walk to filepath.WalkDir
+ build: allow --no-cache and --layers so build cache can be overrided
+ build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0
+ Bump to v1.26.0-dev
+ build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+
+## v1.25.0 (2022-03-25)
+
+ install: drop RHEL/CentOS 7 doc
+ build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5
+ Bump c/storage to v1.39.0 in main
+ Add a test for CVE-2022-27651
+ build(deps): bump github.com/docker/docker
+ Bump github.com/prometheus/client_golang to v1.11.1
+ [CI:DOCS] man pages: sort flags, and keep them that way
+ build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2
+ Don't pollute
+ network setup: increase timeout to 4 minutes
+ do not set the inheritable capabilities
+ build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3
+ parse: convert exposed GetVolumes to internal only
+ buildkit: mount=type=cache support locking external cache store
+ .in support: improve error message when cpp is not installed
+ buildah image: install cpp
+ build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1
+ build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0
+ build(deps): bump github.com/docker/docker
+ Add --no-hosts flag to eliminate use of /etc/hosts within containers
+ test: remove skips for rootless users
+ test: unshare mount/umount if test is_rootless
+ tests/copy: read correct containers.conf
+ build(deps): bump github.com/docker/distribution
+ cirrus: add seperate task and matrix for rootless
+ tests: skip tests for rootless which need unshare
+ buildah: test rootless integration
+ vendor: bump c/storage to main/93ce26691863
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10
+ tests/copy: initialize the network, too
+ [CI:DOCS] remove references to Kubic for CentOS and Ubuntu
+ build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1
+ use c/image/pkg/blobcache
+ vendor c/image/v5@v5.20.0
+ add: ensure the context directory is an absolute path
+ executor: docker builds must inherit healthconfig from base if any
+ docs: Remove Containerfile and containeringore
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9
+ helpers.bash: Use correct syntax
+ speed up combination-namespaces test
+ build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ Bump back to 1.25.0-dev
+ build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0
+
+## v1.24.2 (2022-02-16)
+
+ Increase subuid/subgid to 65535
+ history: only add proxy vars to history if specified
+ run_linux: use --systemd-cgroup
+ buildah: new global option --cgroup-manager
+ Makefile: build with systemd when available
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8
+ Bump c/common to v0.47.4
+ Cirrus: Use updated VM images
+ conformance: add a few "replace-directory-with-symlink" tests
+ Bump back to v1.25.0-dev
+
+## v1.24.1 (2022-02-03)
+
+ executor: Add support for inline --platform within Dockerfile
+ caps: fix buildah run --cap-add=all
+ Update vendor of openshift/imagebuilder
+ Bump version of containers/image and containers/common
+ Update vendor of containers/common
+ System tests: fix accidental vandalism of source dir
+ build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2
+ imagebuildah.BuildDockerfiles(): create the jobs semaphore
+ build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1
+ overlay: always honor mountProgram
+ overlay: move mount program invocation to separate function
+ overlay: move mount program lookup to separate function
+ Bump to v1.25.0-dev [NO TESTS NEEDED]
+
+## v1.24.0 (2022-01-26)
+
+ Update vendor of containers/common
+ build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ Github-workflow: Report both failures and errors.
+ build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0
+ Update docs/buildah-build.1.md
+ [CI:DOCS] Fix typos and improve language
+ buildah bud --network add support for custom networks
+ Make pull commands be consistent
+ docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing
+ build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0
+ Vendor in latest containers/image
+ Run codespell on code
+ .github/dependabot.yml: add tests/tools go.mod
+ CI: rm git-validation, add GHA job to validate PRs
+ tests/tools: bump go-md2man to v2.0.1
+ tests/tools/Makefile: simplify
+ tests/tools: bump onsi/ginkgo to v1.16.5
+ vendor: bump c/common and others
+ mount: add support for custom upper and workdir with overlay mounts
+ linux: fix lookup for runtime
+ overlay: add MountWithOptions to API which extends support for advanced overlay
+ Allow processing of SystemContext from FlagSet
+ .golangci.yml: enable unparam linter
+ util/resolveName: rm bool return
+ tests/tools: bump golangci-lint
+ .gitignore: fixups
+ all: fix capabilities.NewPid deprecation warnings
+ bind/mount.go: fix linter comment
+ all: fix gosimple warning S1039
+ tests/e2e/buildah_suite_test.go: fix gosimple warnings
+ imagebuildah/executor.go: fix gosimple warning
+ util.go: fix gosimple warning
+ build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0
+ Enable git-daemon tests
+ Allow processing of id options from FlagSet
+ Cirrus: Re-order tasks for more parallelism
+ Cirrus: Freshen VM images
+ Fix platform handling for empty os/arch values
+ Allow processing of network options from FlagSet
+ Fix permissions on secrets directory
+ Update containers/image and containers/common
+ bud.bats: use a local git daemon for the git protocol test
+ Allow processing of common options from FlagSet
+ Cirrus: Run int. tests in parallel with unit
+ vendor c/common
+ Fix default CNI paths
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7
+ multi-stage: enable mounting stages across each other with selinux enabled
+ executor: Share selinux label of first stage with other stages in a build
+ buildkit: add from field to bind and cache mounts so images can be used as source
+ Use config.ProxyEnv from containers/common
+ use libnetwork from c/common for networking
+ setup the netns in the buildah parent process
+ build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6
+ build: fix libsubid test
+ Allow callers to replace the ContainerSuffix
+ parse: allow parsing anomaly non-human value for memory control group
+ .cirrus: remove static_build from ci
+ stage_executor: re-use all possible layers from cache for squashed builds
+ build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0
+ Allow rootless buildah to set resource limits on cgroup V2
+ build(deps): bump github.com/docker/docker
+ tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification
+ build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3
+ Wire logger through to config
+ copier.Put: check for is-not-a-directory using lstat, not stat
+ Turn on rootless cgroupv2 tests
+ Grab all of the containers.conf settings for namespaces.
+ image: set MediaType in OCI manifests
+ copier: RemoveAll possibly-directories
+ Simple README fix
+ images: accept multiple filter with logical AND
+ build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1
+ UPdate vendor of container/storage
+ build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0
+ build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0
+ Make LocalIP public function so Podman can use it
+ Fix UnsetEnv for buildah bud
+ Tests should rely only on static/unchanging images
+ run: ensure that stdio pipes are labeled correctly
+ build(deps): bump github.com/docker/docker
+ Cirrus: Bump up to Fedora 35 & Ubuntu 21.10
+ chroot: don't use the generate default seccomp filter for unit tests
+ build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8
+ ssh-agent: Increase timeout before we explicitly close connection
+ docs/tutorials: update
+ Clarify that manifest defaults to localhost as the registry name
+ "config": remove a stray bit of debug output
+ "commit": fix a flag typo
+ Fix an error message: unlocking vs locking
+ Expand the godoc for CommonBuildOptions.Secrets
+ chroot: accept an "rw" option
+ Add --unsetenv option to buildah commit and build
+ define.TempDirForURL(): show CombinedOutput when a command fails
+ config: support the variant field
+ rootless: do not bind mount /sys if not needed
+ Fix tutorial to specify command on buildah run line
+ build: history should not contain ARG values
+ docs: Use guaranteed path for go-md2man
+ run: honor --network=none from builder if nothing specified
+ networkpolicy: Should be enabled instead of default when explictly set
+ Add support for env var secret sources
+ build(deps): bump github.com/docker/docker
+ fix: another non-portable shebang
+ Rootless containers users should use additional groups
+ Support overlayfs path contains colon
+ Report ignorefile location when no content added
+ Add support for host.containers.internal in the /etc/hosts
+ build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5
+ imagebuildah: fix nil deref
+ buildkit: add support for mount=type=cache
+ Default secret mode to 400
+ [CI:DOCS] Include manifest example usage
+ docs: update buildah-from, buildah-pull 'platform' option compatibility notes
+ docs: update buildah-build 'platform' option compatibility notes
+ De-dockerize the man page as much as possible
+ [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st
+ docs: Fix and Update Containerfile man page with supported mount types
+ mount: add tmpcopyup to tmpfs mount option
+ buildkit: Add support for --mount=type=tmpfs
+ build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1
+ Fix command doc links in README.md
+ build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1
+ build: Add support for buildkit like --mount=type=bind
+ Bump containerd to v1.5.7
+ build(deps): bump github.com/docker/docker
+ tests: stop pulling php, composer
+ Fix .containerignore link file
+ Cirrus: Fix defunct package metadata breaking cache
+ build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0
+ buildah build: add --all-platforms
+ Add man page for Containerfile and .containerignore
+ Plumb the remote logger throughut Buildah
+ Replace fmt.Sprintf("%d", x) with strconv.Itoa(x)
+ Run: Cleanup run directory after every RUN step
+ build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0
+ Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation
+ Makefile: check for `-race` using `-mod=vendor`
+ imagebuildah: fix an attempt to write to a nil map
+ push: support to specify the compression format
+ conformance: allow test cases to specify dockerUseBuildKit
+ build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0
+ build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1
+ unmarshalConvertedConfig(): handle zstd compression
+ tests/copy/copy: wire up compression options
+ Update to github.com/vbauerster/mpb v7.1.5
+ Add flouthoc to OWNERS
+ build: Add additional step nodes when labels are modified
+ Makefile: turn on race detection whenever it's available
+ conformance: add more tests for exclusion short-circuiting
+ Update VM Images + Drop prior-ubuntu testing
+ Bump to v1.24.0-dev
+
+## v1.23.0 (2021-09-13)
+
+ Vendor in containers/common v0.44.0
+ build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0
+ Update 05-openshift-rootless-build.md
+ build(deps): bump github.com/opencontainers/selinux from 1.8.4 to 1.8.5
+ .cirrus.yml: run cross_build_task on Big Sur
+ Makefile: update cross targets
+ Add support for rootless overlay mounts
+ Cirrus: Increase unit-test timeout
+ Docs: Clarify rmi w/ manifest/index use
+ build: mirror --authfile to filesystem if pointing to FD instead of file
+ Fix build with .git url with branch
+ manifest: rm should remove only manifests not referenced images.
+ vendor: bump c/common to v0.43.3-0.20210902095222-a7acc160fb25
+ Avoid rehashing and noop compression writer
+ corrected man page section; .conf file to mention its man page
+ copy: add --max-parallel-downloads to tune that copy option
+ copier.Get(): try to avoid descending into directories
+ tag: Support tagging manifest list instead of resolving to images
+ Install new manpages to correct sections
+ conformance: tighten up exception specifications
+ Add support for libsubid
+ Add epoch time field to buildah images
+ Fix ownership of /home/build/.local/share/containers
+ build(deps): bump github.com/containers/image/v5 from 5.15.2 to 5.16.0
+ Rename bud to build, while keeping an alias for to bud.
+ Replace golang.org/x/crypto/ssh/terminal with golang.org/x/term
+ build(deps): bump github.com/opencontainers/runc from 1.0.1 to 1.0.2
+ build(deps): bump github.com/onsi/gomega from 1.15.0 to 1.16.0
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.3 to 1.7.4
+ build(deps): bump github.com/containers/common from 0.43.1 to 0.43.2
+ Move DiscoverContainerfile to pkg/util directory
+ build(deps): bump github.com/containers/image/v5 from 5.15.1 to 5.15.2
+ Remove some references to Docker
+ build(deps): bump github.com/containers/image/v5 from 5.15.0 to 5.15.1
+ imagebuildah: handle --manifest directly
+ build(deps): bump github.com/containers/common from 0.42.1 to 0.43.1
+ build(deps): bump github.com/opencontainers/selinux from 1.8.3 to 1.8.4
+ executor: make sure imageMap is updated with terminatedStage
+ tests/serve/serve.go: use a kernel-assigned port
+ Bump go for vendor-in-container from 1.13 to 1.16
+ imagebuildah: move multiple-platform building internal
+ Adds GenerateStructure helper function to support rootfs-overlay.
+ Run codespell to fix spelling
+ Implement SSH RUN mount
+ build(deps): bump github.com/onsi/gomega from 1.14.0 to 1.15.0
+ Fix resolv.conf content with run --net=private
+ run: fix nil deref using the option's logger
+ build(deps): bump github.com/containerd/containerd from 1.5.1 to 1.5.5
+ make vendor-in-container
+ bud: teach --platform to take a list
+ set base-image annotations
+ build(deps): bump github.com/opencontainers/selinux from 1.8.2 to 1.8.3
+ [CI:DOCS] Fix CHANGELOG.md
+ Bump to v1.23.0-dev [NO TESTS NEEDED]
+ Accept repositories on login/logout
+
+## v1.22.0 (2021-08-02)
+ c/image, c/storage, c/common vendor before Podman 3.3 release
+ WIP: tests: new assert()
+ Proposed patch for 3399 (shadowutils)
+ Fix handling of --restore shadow-utils
+ build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0
+ runtime-flag (debug) test: handle old & new runc
+ build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0
+ Allow dst and destination for target in secret mounts
+ Multi-arch: Always push updated version-tagged img
+ Add a few tests on cgroups V2
+ imagebuildah.stageExecutor.prepare(): remove pseudonym check
+ refine dangling filter
+ Chown with environment variables not set should fail
+ Just restore protections of shadow-utils
+ build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1
+ Remove specific kernel version number requirement from install.md
+ Multi-arch image workflow: Make steps generic
+ chroot: fix environment value leakage to intermediate processes
+ Update nix pin with `make nixpkgs`
+ buildah source - create and manage source images
+ Update cirrus-cron notification GH workflow
+ Reuse code from containers/common/pkg/parse
+ Cirrus: Freshen VM images
+ build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6
+ Fix excludes exception begining with / or ./
+ Fix syntax for --manifest example
+ build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0
+ vendor containers/common@main
+ Cirrus: Drop dependence on fedora-minimal
+ Adjust conformance-test error-message regex
+ Workaround appearance of differing debug messages
+ Cirrus: Install docker from package cache
+ build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2
+ Switch rusagelogfile to use options.Out
+ build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5
+ Turn stdio back to blocking when command finishes
+ Add support for default network creation
+ Cirrus: Updates for master->main rename
+ Change references from master to main
+ Add `--env` and `--workingdir` flags to run command
+ build(deps): bump github.com/opencontainers/runc
+ [CI:DOCS] buildah bud: spelling --ignore-file requires parameter
+ [CI:DOCS] push/pull: clarify supported transports
+ Remove unused function arguments
+ Create mountOptions for mount command flags
+ Extract version command implementation to function
+ Add --json flags to `mount` and `version` commands
+ build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3
+ build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1
+ copier.Put(): set xattrs after ownership
+ buildah add/copy: spelling
+ build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0
+ buildah copy and buildah add should support .containerignore
+ Remove unused util.StartsWithValidTransport
+ Fix documentation of the --format option of buildah push
+ Don't use alltransports.ParseImageName with known transports
+ build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1
+ man pages: clarify `rmi` removes dangling parents
+ tests: make it easer to override the location of the copy helper
+ build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0
+ [CI:DOCS] Fix links to c/image master branch
+ imagebuildah: use the specified logger for logging preprocessing warnings
+ Fix copy into workdir for a single file
+ Fix docs links due to branch rename
+ Update nix pin with `make nixpkgs`
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3
+ build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2
+ build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6
+ build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2
+ build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12
+ build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4
+ fix(docs): typo
+ Move to v1.22.0-dev
+ Fix handling of auth.json file while in a user namespace
+ Add rusage-logfile flag to optionally send rusage to a file
+ imagebuildah: redo step logging
+ build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3
+ build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1
+ Add volumes to make running buildah within a container easier
+ build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0
+ Add and use a "copy" helper instead of podman load/save
+ Bump github.com/containers/common from 0.38.4 to 0.39.0
+ containerImageRef/containerImageSource: don't buffer uncompressed layers
+ containerImageRef(): squashed images have no parent images
+ Sync. workflow across skopeo, buildah, and podman
+ Bump github.com/containers/storage from 1.31.1 to 1.31.2
+ Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95
+ Bump to v1.21.1-dev [NO TESTS NEEDED]
+
+## v1.21.0 (2021-05-19)
+ Don't blow up if cpp detects errors
+ Vendor in containers/common v0.38.4
+ Remove 'buildah run --security-opt' from completion
+ update c/common
+ Fix handling of --default-mounts-file
+ update vendor of containers/storage v1.31.1
+ Bump github.com/containers/storage from 1.30.3 to 1.31.0
+ Send logrus messages back to caller when building
+ github: Fix bad repo. ref in workflow config
+ Check earlier for bad image tags name
+ buildah bud: fix containers/podman/issues/10307
+ Bump github.com/containers/storage from 1.30.1 to 1.30.3
+ Cirrus: Support [CI:DOCS] test skipping
+ Notification email for cirrus-cron build failures
+ Bump github.com/opencontainers/runc from 1.0.0-rc93 to 1.0.0-rc94
+ Fix race condition
+ Fix copy race while walking paths
+ Preserve ownership of lower directory when doing an overlay mount
+ Bump github.com/onsi/gomega from 1.11.0 to 1.12.0
+ Update nix pin with `make nixpkgs`
+ codespell cleanup
+ Multi-arch github-action workflow unification
+ Bump github.com/containers/image/v5 from 5.11.1 to 5.12.0
+ Bump github.com/onsi/ginkgo from 1.16.1 to 1.16.2
+ imagebuildah: ignore signatures when tagging images
+ update to latest libimage
+ Bump github.com/containers/common from 0.37.0 to 0.37.1
+ Bump github.com/containers/storage from 1.30.0 to 1.30.1
+ Upgrade to GitHub-native Dependabot
+ Document location of auth.json file if XDG_RUNTIME_DIR is not set
+ run.bats: fix flake in run-user test
+ Cirrus: Update F34beta -> F34
+ pr-should-include-tests: try to make work in buildah
+ runUsingRuntime: when relaying error from the runtime, mention that
+ Run(): avoid Mkdir() into the rootfs
+ imagebuildah: replace archive with chrootarchive
+ imagebuildah.StageExecutor.volumeCacheSaveVFS(): set up bind mounts
+ conformance: use :Z with transient mounts when SELinux is enabled
+ bud.bats: fix a bats warning
+ imagebuildah: create volume directories when using overlays
+ imagebuildah: drop resolveSymlink()
+ namespaces test - refactoring and cleanup
+ Refactor 'idmapping' system test
+ Cirrus: Update Ubuntu images to 21.04
+ Tiny fixes in bud system tests
+ Add compabitility wrappers for removed packages
+ Fix expected message at pulling image
+ Fix system tests of 'bud' subcommand
+ [CI:DOCS] Update steps for CentOS runc users
+ Add support for secret mounts
+ Add buildah manifest rm command
+ restore push/pull and util API
+ [CI:DOCS] Remove older distro docs
+ Rename rhel secrets to subscriptions
+ vendor in openshift/imagebuilder
+ Remove buildah bud --loglevel ...
+ use new containers/common/libimage package
+ Fix copier when using globs
+ Test namespace flags of 'bud' subcommand
+ Add system test of 'bud' subcommand
+ Output names of multiple tags in buildah bud
+ push to docker test: don't get fooled by podman
+ copier: add Remove()
+ build(deps): bump github.com/containers/image/v5 from 5.10.5 to 5.11.1
+ Restore log timestamps
+ Add system test of 'buildah help' with a tiny fix
+ tests: copy.bats: fix infinite hang
+ Do not force hard code to crun in rootless mode
+ build(deps): bump github.com/openshift/imagebuilder from 1.2.0 to 1.2.1
+ build(deps): bump github.com/containers/ocicrypt from 1.1.0 to 1.1.1
+ build(deps): bump github.com/containers/common from 0.35.4 to 0.36.0
+ Fix arg missing warning in bud
+ Check without flag in 'from --cgroup-parent' test
+ Minor fixes to Buildah as a library tutorial documentation
+ Add system test of 'buildah version' for packaged buildah
+ Add a few system tests of 'buildah from'
+ Log the final error with %+v at logging level "trace"
+ copier: add GetOptions.NoCrossDevice
+ Update nix pin with `make nixpkgs`
+ Bump to v1.20.2-dev
+
+## v1.20.1 (2021-04-13)
+ Run container with isolation type set at 'from'
+ bats helpers.bash - minor refactoring
+ Bump containers/storage vendor to v1.29.0
+ build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1
+ Cirrus: Update VMs w/ F34beta
+ CLI add/copy: add a --from option
+ build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0
+ Add authentication system tests for 'commit' and 'bud'
+ fix local image lookup for custom platform
+ Double-check existence of OCI runtimes
+ Cirrus: Make use of shared get_ci_vm container
+ Add system tests of "buildah run"
+ Update nix pin with `make nixpkgs`
+ Remove some stuttering on returns errors
+ Setup alias for --tty to --terminal
+ Add conformance tests for COPY /...
+ Put a few more minutes on the clock for the CI conformance test
+ Add a conformance test for COPY --from $symlink
+ Add conformance tests for COPY ""
+ Check for symlink in builtin volume
+ Sort all mounts by destination directory
+ System-test cleanup
+ Export parse.Platform string to be used by podman-remote
+ blobcache: fix sequencing error
+ build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4
+ Fix URL in demos/buildah_multi_stage.sh
+ Add a few system tests
+ [NO TESTS NEEDED] Use --recurse-modules when building git context
+ Bump to v1.20.1-dev
+
+## v1.20.0 (2021-03-25)
+ * vendor in containers/storage v1.28.1
+ * build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3
+ * tests: prefetch: use buildah, not podman, for pulls
+ * Use faster way to check image tag existence during multi-arch build
+ * Add information about multi-arch images to the Readme
+ * COPY --chown: expand the conformance test
+ * pkg/chrootuser: use a bufio.Scanner
+ * [CI:DOCS] Fix rootful typo in docs
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2
+ * Add documentation and testing for .containerignore
+ * build(deps): bump github.com/sirupsen/logrus from 1.8.0 to 1.8.1
+ * build(deps): bump github.com/hashicorp/go-multierror from 1.1.0 to 1.1.1
+ * Lookup Containerfile if user specifies a directory
+ * Add Tag format placeholder to docs
+ * copier: ignore sockets
+ * image: propagate errors from extractRootfs
+ * Remove system test of 'buildah containers -a'
+ * Clarify userns options are usable only as root in man pages
+ * Fix system test of 'containers -a'
+ * Remove duplicated code in addcopy
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1
+ * build(deps): bump github.com/onsi/gomega from 1.10.5 to 1.11.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.1 to 1.7.2
+ * Update multi-arch buildah build setup with new logic
+ * Update nix pin with `make nixpkgs`
+ * overlay.bats: fix the "overlay source permissions" test
+ * imagebuildah: use overlay for volumes when using overlay
+ * Make PolicyMap and PullPolicy names align
+ * copier: add GetOptions.IgnoreUnreadable
+ * Check local image to match system context
+ * fix: Containerfiles - smaller set of userns u/gids
+ * Set upperdir permissions based on source
+ * Shrink the vendoring size of pkc/cli
+ * Clarify image name match failure message
+ * ADD/COPY: create the destination directory first, chroot to it
+ * copier.GetOptions: add NoDerefSymLinks
+ * copier: add an Eval function
+ * Update system test for 'from --cap-add/drop'
+ * copier: fix a renaming bug
+ * copier: return child process stderr if we can't JSON decode the response
+ * Add some system tests
+ * build(deps): bump github.com/containers/storage from 1.26.0 to 1.27.0
+ * complement add/copy --chmod documentation
+ * buildah login and logout, do not need to enter user namespace
+ * Add multi-arch image build
+ * chmod/chown added/fixed in bash completions
+ * OWNERS: add @lsm5
+ * buildah add/copy --chmod dockerfile implementation
+ * bump github.com/openshift/imagebuilder from 1.1.8 to 1.2.0
+ * buildah add/copy --chmod cli implementation for files and urls
+ * Make sure we set the buildah version label
+ * Isolation strings, should match user input
+ * [CI:DOCS] buildah-from.md: remove dup arch,os
+ * build(deps): bump github.com/containers/image/v5 from 5.10.2 to 5.10.3
+ * Cirrus: Temp. disable prior-fedora (F32) testing
+ * pr-should-include-tests: recognized "renamed" tests
+ * build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.0 to 1.7.1
+ * build(deps): bump github.com/containers/common from 0.34.2 to 0.35.0
+ * Fix reaping of stages with no instructions
+ * add stale bot
+ * Add base image name to comment
+ * build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3
+ * Don't fail copy to emptydir
+ * buildah: use volatile containers
+ * vendor: update containers/storage
+ * Eliminate the use of containers/building import in pkg subdirs
+ * Add more support for removing config
+ * Improve messages about --cache-from not being supported
+ * Revert patch to allow COPY/ADD of empty dirs.
+ * Don't fail copy to emptydir
+ * Fix tutorial for rootless mode
+ * Fix caching layers with build args
+ * Vendor in containers/image v5.10.2
+ * build(deps): bump github.com/containers/common from 0.34.0 to 0.34.2
+ * build(deps): bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0
+ * 'make validate': require PRs to include tests
+ * build(deps): bump github.com/onsi/gomega from 1.10.4 to 1.10.5
+ * build(deps): bump github.com/containers/storage from 1.24.5 to 1.25.0
+ * Use chown function for U volume flag from containers/common repository
+ * --iidfile: print hash prefix
+ * bump containernetworking/cni to v0.8.1 - fix for CVE-2021-20206
+ * run: fix check for host pid namespace
+ * Finish plumbing for buildah bud --manifest
+ * buildah manifest add localimage should work
+ * Stop testing directory permissions with latest docker
+ * Fix build arg check
+ * build(deps): bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0
+ * [ci:docs] Fix man page for buildah push
+ * Update nix pin with `make nixpkgs`
+ * Bump to containers/image v5.10.1
+ * Rebuild layer if a change in ARG is detected
+ * Bump golang.org/x/crypto to the latest
+ * Add Ashley and Urvashi to Approvers
+ * local image lookup by digest
+ * Use build-arg ENV val from local environment if set
+ * Pick default OCI Runtime from containers.conf
+ * Added required devel packages
+ * Cirrus: Native OSX Build
+ * Cirrus: Two minor cleanup items
+ * Workaround for RHEL gating test failure
+ * build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0
+ * build(deps): bump github.com/mattn/go-shellwords from 1.0.10 to 1.0.11
+ * Reset upstream branch to dev version
+ * If destination does not exists, do not throw error
+
+## v1.19.0 (2021-01-08)
+ Update vendor of containers/storage and containers/common
+ Buildah inspect should be able to inspect manifests
+ Make buildah push support pushing manifests lists and digests
+ Fix handling of TMPDIR environment variable
+ Add support for --manifest flags
+ Upper directory should match mode of destination directory
+ Only grab the OS, Arch if the user actually specified them
+ Use --arch and --os and --variant options to select architecture and os
+ Cirrus: Track libseccomp and golang version
+ copier.PutOptions: add an "IgnoreDevices" flag
+ fix: `rmi --prune` when parent image is in store.
+ build(deps): bump github.com/containers/storage from 1.24.3 to 1.24.4
+ build(deps): bump github.com/containers/common from 0.31.1 to 0.31.2
+ Allow users to specify stdin into containers
+ Drop log message on failure to mount on /sys file systems to info
+ Spelling
+ SELinux no longer requires a tag.
+ build(deps): bump github.com/opencontainers/selinux from 1.6.0 to 1.8.0
+ build(deps): bump github.com/containers/common from 0.31.0 to 0.31.1
+ Update nix pin with `make nixpkgs`
+ Switch references of /var/run -> /run
+ Allow FROM to be overriden with from option
+ copier: don't assume we can chroot() on Unixy systems
+ copier: add PutOptions.NoOverwriteDirNonDir, Get/PutOptions.Rename
+ copier: handle replacing directories with not-directories
+ copier: Put: skip entries with zero-length names
+ build(deps): bump github.com/containers/storage from 1.24.2 to 1.24.3
+ Add U volume flag to chown source volumes
+ Turn off PRIOR_UBUNTU Test until vm is updated
+ pkg, cli: rootless uses correct isolation
+ build(deps): bump github.com/onsi/gomega from 1.10.3 to 1.10.4
+ update installation doc to reflect current status
+ Move away from using docker.io
+ enable short-name aliasing
+ build(deps): bump github.com/containers/storage from 1.24.1 to 1.24.2
+ build(deps): bump github.com/containers/common from 0.30.0 to 0.31.0
+ Throw errors when using bogus --network flags
+ pkg/supplemented test: replace our null blobinfocache
+ build(deps): bump github.com/containers/common from 0.29.0 to 0.30.0
+ inserts forgotten quotation mark
+ Not prefer use local image create/add manifest
+ Add container information to .containerenv
+ Add --ignorefile flag to use alternate .dockerignore flags
+ Add a source debug build
+ Fix crash on invalid filter commands
+ build(deps): bump github.com/containers/common from 0.27.0 to 0.29.0
+ Switch to using containers/common pkg's
+ fix: non-portable shebang #2812
+ Remove copy/paste errors that leaked `Podman` into man pages.
+ Add suggests cpp to spec file
+ Apply suggestions from code review
+ update docs for debian testing and unstable
+ imagebuildah: disable pseudo-terminals for RUN
+ Compute diffID for mapped-layer at creating image source
+ intermediateImageExists: ignore images whose history we can't read
+ Bump to v1.19.0-dev
+ build(deps): bump github.com/containers/common from 0.26.3 to 0.27.0
+
+## v1.18.0 (2020-11-16)
+ Fix testing error caused by simultanious merge
+ Vendor in containers/storage v1.24.0
+ short-names aliasing
+ Add --policy flag to buildah pull
+ Stop overwrapping and stuttering
+ copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ Run: don't forcibly disable UTS namespaces in rootless mode
+ test: ensure non-directory in a Dockerfile path is handled correctly
+ Add a few tests for `pull` command
+ Fix buildah config --cmd to handle array
+ build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ Fix NPE when Dockerfile path contains non-directory entries
+ Update buildah bud man page from podman build man page
+ Move declaration of decryption-keys to common cli
+ Run: correctly call copier.Mkdir
+ util: digging UID/GID out of os.FileInfo should work on Unix
+ imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ Verify userns-uid-map and userns-gid-map input
+ Use CPP, CC and flags in dep check scripts
+ Avoid overriding LDFLAGS in Makefile
+ ADD: handle --chown on URLs
+ Update nix pin with `make nixpkgs`
+ (*Builder).Run: MkdirAll: handle EEXIST error
+ copier: try to force loading of nsswitch modules before chroot()
+ fix MkdirAll usage
+ build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ Use osusergo build tag for static build
+ imagebuildah: cache should take image format into account
+ Bump to v1.18.0-dev
+
+## v1.17.0 (2020-10-29)
+ Handle cases where other tools mount/unmount containers
+ overlay.MountReadOnly: support RO overlay mounts
+ overlay: use fusermount for rootless umounts
+ overlay: fix umount
+ Switch default log level of Buildah to Warn. Users need to see these messages
+ Drop error messages about OCI/Docker format to Warning level
+ build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2
+ tests/testreport: adjust for API break in storage v1.23.6
+ build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6
+ copier: put: ignore Typeflag="g"
+ Use curl to get repo file (fix #2714)
+ build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0
+ build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1
+ Remove docs that refer to bors, since we're not using it
+ Buildah bud should not use stdin by default
+ bump containerd, docker, and golang.org/x/sys
+ Makefile: cross: remove windows.386 target
+ copier.copierHandlerPut: don't check length when there are errors
+ Stop excessive wrapping
+ CI: require that conformance tests pass
+ bump(github.com/openshift/imagebuilder) to v1.1.8
+ Skip tlsVerify insecure BUILD_REGISTRY_SOURCES
+ Fix build path wrong https://github.com/containers/podman/issues/7993
+ refactor pullpolicy to avoid deps
+ build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0
+ CI: run gating tasks with a lot more memory
+ ADD and COPY: descend into excluded directories, sometimes
+ copier: add more context to a couple of error messages
+ copier: check an error earlier
+ copier: log stderr output as debug on success
+ Update nix pin with `make nixpkgs`
+ Set directory ownership when copied with ID mapping
+ build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
+ build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0
+ Cirrus: Remove bors artifacts
+ Sort build flag definitions alphabetically
+ ADD: only expand archives at the right time
+ Remove configuration for bors
+ Shell Completion for podman build flags
+ Bump c/common to v0.24.0
+ New CI check: xref --help vs man pages
+ CI: re-enable several linters
+ Move --userns-uid-map/--userns-gid-map description into buildah man page
+ add: preserve ownerships and permissions on ADDed archives
+ Makefile: tweak the cross-compile target
+ Bump containers/common to v0.23.0
+ chroot: create bind mount targets 0755 instead of 0700
+ Change call to Split() to safer SplitN()
+ chroot: fix handling of errno seccomp rules
+ build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
+ Add In Progress section to contributing
+ integration tests: make sure tests run in ${topdir}/tests
+ Run(): ignore containers.conf's environment configuration
+ Warn when setting healthcheck in OCI format
+ Cirrus: Skip git-validate on branches
+ tools: update git-validation to the latest commit
+ tools: update golangci-lint to v1.18.0
+ Add a few tests of push command
+ Add(): fix handling of relative paths with no ContextDir
+ build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0
+ Lint: Use same linters as podman
+ Validate: reference HEAD
+ Fix buildah mount to display container names not ids
+ Update nix pin with `make nixpkgs`
+ Add missing --format option in buildah from man page
+ Fix up code based on codespell
+ build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7
+ build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5
+ Improve buildah completions
+ Cirrus: Fix validate commit epoch
+ Fix bash completion of manifest flags
+ Uniform some man pages
+ Update Buildah Tutorial to address BZ1867426
+ Update bash completion of `manifest add` sub command
+ copier.Get(): hard link targets shouldn't be relative paths
+ build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2
+ Pass timestamp down to history lines
+ Timestamp gets updated everytime you inspect an image
+ bud.bats: use absolute paths in newly-added tests
+ contrib/cirrus/lib.sh: don't use CN for the hostname
+ tests: Add some tests
+ Update `manifest add` man page
+ Extend flags of `manifest add`
+ build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4
+ build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1
+ Bump to v1.17.0-dev
+ CI: expand cross-compile checks
+
+## v1.16.0 (2020-09-03)
+ fix build on 32bit arches
+ containerImageRef.NewImageSource(): don't always force timestamps
+ Add fuse module warning to image readme
+ Heed our retry delay option values when retrying commit/pull/push
+ Switch to containers/common for seccomp
+ Use --timestamp rather then --omit-timestamp
+ docs: remove outdated notice
+ docs: remove outdated notice
+ build-using-dockerfile: add a hidden --log-rusage flag
+ build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ Discard ReportWriter if user sets options.Quiet
+ build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ Fix ownership of content copied using COPY --from
+ newTarDigester: zero out timestamps in tar headers
+ Update nix pin with `make nixpkgs`
+ bud.bats: correct .dockerignore integration tests
+ Use pipes for copying
+ run: include stdout in error message
+ run: use the correct error for errors.Wrapf
+ copier: un-export internal types
+ copier: add Mkdir()
+ in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ docs/buildah-commit.md: tweak some wording, add a --rm example
+ imagebuildah: don’t blank out destination names when COPYing
+ Replace retry functions with common/pkg/retry
+ StageExecutor.historyMatches: compare timestamps using .Equal
+ Update vendor of containers/common
+ Fix errors found in coverity scan
+ Change namespace handling flags to better match podman commands
+ conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ Vendor in containers/storage v1.23.0
+ Add buildah.IsContainer interface
+ Avoid feeding run_buildah to pipe
+ fix(buildahimage): add xz dependency in buildah image
+ Bump github.com/containers/common from 0.15.2 to 0.18.0
+ Howto for rootless image building from OpenShift
+ Add --omit-timestamp flag to buildah bud
+ Update nix pin with `make nixpkgs`
+ Shutdown storage on failures
+ Handle COPY --from when an argument is used
+ Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ Cirrus: Use newly built VM images
+ Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ Enhance the .dockerignore man pages
+ conformance: add a test for COPY from subdirectory
+ fix bug manifest inspct
+ Add documentation for .dockerignore
+ Add BuilderIdentityAnnotation to identify buildah version
+ DOC: Add quay.io/containers/buildah image to README.md
+ Update buildahimages readme
+ fix spelling mistake in "info" command result display
+ Don't bind /etc/host and /etc/resolv.conf if network is not present
+ blobcache: avoid an unnecessary NewImage()
+ Build static binary with `buildGoModule`
+ copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ tarFilterer: handle multiple archives
+ Fix a race we hit during conformance tests
+ Rework conformance testing
+ Update 02-registries-repositories.md
+ test-unit: invoke cmd/buildah tests with --flags
+ parse: fix a type mismatch in a test
+ Fix compilation of tests/testreport/testreport
+ build.sh: log the version of Go that we're using
+ test-unit: increase the test timeout to 40/45 minutes
+ Add the "copier" package
+ Fix & add notes regarding problematic language in codebase
+ Add dependency on github.com/stretchr/testify/require
+ CompositeDigester: add the ability to filter tar streams
+ BATS tests: make more robust
+ vendor golang.org/x/text@v0.3.3
+ Switch golang 1.12 to golang 1.13
+ imagebuildah: wait for stages that might not have even started yet
+ chroot, run: not fail on bind mounts from /sys
+ chroot: do not use setgroups if it is blocked
+ Set engine env from containers.conf
+ imagebuildah: return the right stage's image as the "final" image
+ Fix a help string
+ Deduplicate environment variables
+ switch containers/libpod to containers/podman
+ Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ Mask out /sys/dev to prevent information leak
+ linux: skip errors from the runtime kill
+ Mask over the /sys/fs/selinux in mask branch
+ Add VFS additional image store to container
+ tests: add auth tests
+ Allow "readonly" as alias to "ro" in mount options
+ Ignore OS X specific consistency mount option
+ Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ Bump github.com/containers/common from 0.14.0 to 0.15.2
+ Rootless Buildah should default to IsolationOCIRootless
+ imagebuildah: fix inheriting multi-stage builds
+ Make imagebuildah.BuildOptions.Architecture/OS optional
+ Make imagebuildah.BuildOptions.Jobs optional
+ Resolve a possible race in imagebuildah.Executor.startStage()
+ Switch scripts to use containers.conf
+ Bump openshift/imagebuilder to v1.1.6
+ Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ buildah, bud: support --jobs=N for parallel execution
+ executor: refactor build code inside new function
+ Add bud regression tests
+ Cirrus: Fix missing htpasswd in registry img
+ docs: clarify the 'triples' format
+ CHANGELOG.md: Fix markdown formatting
+ Add nix derivation for static builds
+ Bump to v1.16.0-dev
+ version centos7 for compatible
+
+## v1.15.0 (2020-06-17)
+ Bump github.com/containers/common from 0.12.0 to 0.13.1
+ Bump github.com/containers/storage from 1.20.1 to 1.20.2
+ Bump github.com/seccomp/containers-golang from 0.4.1 to 0.5.0
+ Bump github.com/stretchr/testify from 1.6.0 to 1.6.1
+ Bump github.com/opencontainers/runc from 1.0.0-rc9 to 1.0.0-rc90
+ Add CVE-2020-10696 to CHANGELOG.md and changelog.txt
+ Bump github.com/stretchr/testify from 1.5.1 to 1.6.0
+ Bump github.com/onsi/ginkgo from 1.12.2 to 1.12.3
+ Vendor in containers/common v0.12.0
+ fix lighttpd example
+ Vendor in new go.etcd.io/bbolt
+ Bump github.com/onsi/ginkgo from 1.12.1 to 1.12.2
+ Bump imagebuilder for ARG fix
+ Bump github.com/containers/common from 0.11.2 to 0.11.4
+ remove dependency on openshift struct
+ Warn on unset build arguments
+ vendor: update seccomp/containers-golang to v0.4.1
+ Ammended docs
+ Updated docs
+ clean up comments
+ update exit code for tests
+ Implement commit for encryption
+ implementation of encrypt/decrypt push/pull/bud/from
+ fix resolve docker image name as transport
+ Bump github.com/opencontainers/go-digest from 1.0.0-rc1 to 1.0.0
+ Bump github.com/onsi/ginkgo from 1.12.0 to 1.12.1
+ Bump github.com/containers/storage from 1.19.1 to 1.19.2
+ Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4
+ Add preliminary profiling support to the CLI
+ Bump github.com/containers/common from 0.10.0 to 0.11.2
+ Evaluate symlinks in build context directory
+ fix error info about get signatures for containerImageSource
+ Add Security Policy
+ Cirrus: Fixes from review feedback
+ Bump github.com/containers/storage from 1.19.0 to 1.19.1
+ Bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0
+ imagebuildah: stages shouldn't count as their base images
+ Update containers/common v0.10.0
+ Bump github.com/fsouza/go-dockerclient from 1.6.4 to 1.6.5
+ Add registry to buildahimage Dockerfiles
+ Cirrus: Use pre-installed VM packages + F32
+ Cirrus: Re-enable all distro versions
+ Cirrus: Update to F31 + Use cache images
+ golangci-lint: Disable gosimple
+ Lower number of golangci-lint threads
+ Fix permissions on containers.conf
+ Don't force tests to use runc
+ Bump github.com/containers/common from 0.9.1 to 0.9.5
+ Return exit code from failed containers
+ Bump github.com/containers/storage from 1.18.2 to 1.19.0
+ Bump github.com/containers/common from 0.9.0 to 0.9.1
+ cgroup_manager should be under [engine]
+ Use c/common/pkg/auth in login/logout
+ Cirrus: Temporarily disable Ubuntu 19 testing
+ Add containers.conf to stablebyhand build
+ Update gitignore to exclude test Dockerfiles
+ Bump github.com/fsouza/go-dockerclient from 1.6.3 to 1.6.4
+ Bump github.com/containers/common from 0.8.1 to 0.9.0
+ Bump back to v1.15.0-dev
+ Remove warning for systemd inside of container
+
+## v1.14.8 (2020-04-09)
+ Run (make vendor)
+ Run (make -C tests/tools vendor)
+ Run (go mod tidy) before (go mod vendor) again
+ Fix (make vendor)
+ Bump validation
+ Bump back to v1.15.0-dev
+
+## v1.14.7 (2020-04-07)
+ Bump github.com/containers/image/v5 from 5.3.1 to 5.4.3
+ make vendor: run `tidy` after `vendor`
+ Do not skip the directory when the ignore pattern matches
+ Bump github.com/containers/common from 0.7.0 to 0.8.1
+ Downgrade siruspen/logrus from 1.4.2
+ Fix errorf conventions
+ dockerignore tests : remove symlinks, rework
+ Bump back to v1.15.0-dev
+
+## v1.14.6 (2020-04-02)
+ bud.bats - cleanup, refactoring
+ vendor in latest containers/storage 1.18.0 and containers/common v0.7.0
+ Bump github.com/spf13/cobra from 0.0.6 to 0.0.7
+ Bump github.com/containers/storage from 1.16.5 to 1.17.0
+ Bump github.com/containers/image/v5 from 5.2.1 to 5.3.1
+ Fix Amazon install step
+ Bump back to v1.15.0-dev
+ Fix bud-build-arg-cache test
+ Make image history work correctly with new args handling
+ Don't add args to the RUN environment from the Builder
+ Update github.com/openshift/imagebuilder to v1.1.4
+ Add .swp files to .gitignore
+
+## v1.14.5 (2020-03-26)
+ revert #2246 FIPS mode change
+ Bump back to v1.15.0-dev
+ image with dup layers: we now have one on quay
+ digest test : make more robust
+
+## v1.14.4 (2020-03-25)
+ Fix fips-mode check for RHEL8 boxes
+ Fix potential CVE in tarfile w/ symlink (Edit 02-Jun-2020: Addresses CVE-2020-10696)
+ Fix .dockerignore with globs and ! commands
+ update install steps for Amazon Linux 2
+ Bump github.com/openshift/imagebuilder from 1.1.2 to 1.1.3
+ Add comment for RUN command in volume ownership test
+ Run stat command directly for volume ownership test
+ vendor in containers/common v0.6.1
+ Cleanup go.sum
+ Bump back to v1.15.0-dev
+
+## v1.14.3 (2020-03-17)
+ Update containers/storage to v1.16.5
+ Bump github.com/containers/storage from 1.16.2 to 1.16.4
+ Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2
+ Update github.com/openshift/imagebuilder vendoring
+ Update unshare man page to fix script example
+ Fix compilation errors on non linux platforms
+ Bump containers/common and opencontainers/selinux versions
+ Add tests for volume ownership
+ Preserve volume uid and gid through subsequent commands
+ Fix FORWARD_NULL errors found by Coverity
+ Bump github.com/containers/storage from 1.16.1 to 1.16.2
+ Fix errors found by codespell
+ Bump back to v1.15.0-dev
+ Add Pull Request Template
+
+## v1.14.2 (2020-03-03)
+ Add Buildah pull request template
+ Bump to containers/storage v1.16.1
+ run_linux: fix tight loop if file is not pollable
+ Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3
+ Bump github.com/containers/common from 0.4.1 to 0.4.2
+ Bump back to v1.15.0-dev
+ Add Containerfile to build a versioned stable image on quay.io
+
+## v1.14.1 (2020-02-27)
+ Search for local runtime per values in containers.conf
+ Set correct ownership on working directory
+ BATS : in teardown, umount stale mounts
+ Bump github.com/spf13/cobra from 0.0.5 to 0.0.6
+ Bump github.com/fsouza/go-dockerclient from 1.6.1 to 1.6.3
+ Bump github.com/stretchr/testify from 1.4.0 to 1.5.1
+ Replace unix with syscall to allow vendoring into libpod
+ Update to containers/common v0.4.1
+ Improve remote manifest retrieval
+ Fix minor spelling errors in containertools README
+ Clear the right variable in buildahimage
+ Correct a couple of incorrect format specifiers
+ Update to containers/common v0.3.0
+ manifest push --format: force an image type, not a list type
+ run: adjust the order in which elements are added to $PATH
+ getDateAndDigestAndSize(): handle creation time not being set
+ Bump github.com/containers/common from 0.2.0 to 0.2.1
+ include installation steps for CentOS 8 and Stream
+ include installation steps for CentOS7 and forks
+ Adjust Ubuntu install info to also work on Pop!_OS
+ Make the commit id clear like Docker
+ Show error on copied file above context directory in build
+ Bump github.com/containers/image/v5 from 5.2.0 to 5.2.1
+ pull/from/commit/push: retry on most failures
+ Makefile: fix install.cni.sudo
+ Repair buildah so it can use containers.conf on the server side
+ Bump github.com/mattn/go-shellwords from 1.0.9 to 1.0.10
+ Bump github.com/fsouza/go-dockerclient from 1.6.0 to 1.6.1
+ Fixing formatting & build instructions
+ Add Code of Conduct
+ Bors: Fix no. req. github reviews
+ Cirrus+Bors: Simplify temp branch skipping
+ Bors-ng: Add documentation and status-icon
+ Bump github.com/onsi/ginkgo from 1.11.0 to 1.12.0
+ fix XDG_RUNTIME_DIR for authfile
+ Cirrus: Disable F29 testing
+ Cirrus: Add jq package
+ Cirrus: Fix lint + validation using wrong epoch
+ Stop using fedorproject registry
+ Bors: Workaround ineffective required statuses
+ Bors: Enable app + Disable Travis
+ Cirrus: Add standardized log-collection
+ Cirrus: Improve automated lint + validation
+ Allow passing options to golangci-lint
+ Cirrus: Fixes from review feedback
+ Cirrus: Temporarily ignore VM testing failures
+ Cirrus: Migrate off papr + implement VM testing
+ Cirrus: Update packages + fixes for get_ci_vm.sh
+ Show validation command-line
+ Skip overlay test w/ vfs driver
+ use alpine, not centos, for various tests
+ Flake handling: cache and prefetch images
+ Bump to v1.15.0-dev
+
+## v1.14.0 (2020-02-05)
+ bump github.com/mtrmac/gpgme
+ Update containers/common to v0.1.4
+ manifest push: add --format option
+ Bump github.com/onsi/gomega from 1.8.1 to 1.9.0
+ vendor github.com/containers/image/v5@v5.2.0
+ info test: deal with random key order
+ Bump back to v1.14.0-dev
+
+## v1.13.2 (2020-01-29)
+ sign.bats: set GPG_TTY=/dev/null
+ Fix parse_unsupported.go
+ getDateAndDigestAndSize(): use manifest.Digest
+ Bump github.com/opencontainers/selinux from 1.3.0 to 1.3.1
+ Bump github.com/containers/common from 0.1.0 to 0.1.2
+ Touch up os/arch doc
+ chroot: handle slightly broken seccomp defaults
+ buildahimage: specify fuse-overlayfs mount options
+ Bump github.com/mattn/go-shellwords from 1.0.7 to 1.0.9
+ copy.bats: make sure we detect failures due to missing source
+ parse: don't complain about not being able to rename something to itself
+ Makefile: use a $(GO_TEST) macro, fix a typo
+ manifests: unit test fix
+ Fix build for 32bit platforms
+ Allow users to set OS and architecture on bud
+ Fix COPY in containerfile with envvar
+ Bump c/storage to v1.15.7
+ add --sign-by to bud/commit/push, --remove-signatures for pull/push
+ Remove cut/paste error in CHANGELOG.md
+ Update vendor of containers/common to v0.1.0
+ update install instructions for Debian, Raspbian and Ubuntu
+ Add support for containers.conf
+ Bump back to v1.14.0-dev
+
+## v1.13.1 (2020-01-14)
+ Bump github.com/containers/common from 0.0.5 to 0.0.7
+ Bump github.com/onsi/ginkgo from 1.10.3 to 1.11.0
+ Bump github.com/pkg/errors from 0.8.1 to 0.9.0
+ Bump github.com/onsi/gomega from 1.7.1 to 1.8.1
+ Add codespell support
+ copyFileWithTar: close source files at the right time
+ copy: don't digest files that we ignore
+ Check for .dockerignore specifically
+ Travis: rm go 1.12.x
+ Don't setup excludes, if their is only one pattern to match
+ set HOME env to /root on chroot-isolation by default
+ docs: fix references to containers-*.5
+ update openshift/api
+ fix bug Add check .dockerignore COPY file
+ buildah bud --volume: run from tmpdir, not source dir
+ Fix imageNamePrefix to give consistent names in buildah-from
+ cpp: use -traditional and -undef flags
+ Fix image reference in tutorial 4
+ discard outputs coming from onbuild command on buildah-from --quiet
+ make --format columnizing consistent with buildah images
+ Bump to v1.14.0-dev
+
+## v1.13.0 (2019-12-27)
+ Bump to c/storage v1.15.5
+ Update container/storage to v1.15.4
+ Fix option handling for volumes in build
+ Rework overlay pkg for use with libpod
+ Fix buildahimage builds for buildah
+ Add support for FIPS-Mode backends
+ Set the TMPDIR for pulling/pushing image to $TMPDIR
+ WIP: safer test for pull --all-tags
+ BATS major cleanup: blobcache.bats: refactor
+ BATS major cleanup: part 4: manual stuff
+ BATS major cleanup, step 3: yet more run_buildah
+ BATS major cleanup, part 2: use more run_buildah
+ BATS major cleanup, part 1: log-level
+ Bump github.com/containers/image/v5 from 5.0.0 to 5.1.0
+ Bump github.com/containers/common from 0.0.3 to 0.0.5
+ Bump to v1.13.0-dev
+
+## v1.12.0 (2019-12-13)
+ Allow ADD to use http src
+ Bump to c/storage v.1.15.3
+ install.md: update golang dependency
+ imgtype: reset storage opts if driver overridden
+ Start using containers/common
+ overlay.bats typo: fuse-overlays should be fuse-overlayfs
+ chroot: Unmount with MNT_DETACH instead of UnmountMountpoints()
+ bind: don't complain about missing mountpoints
+ imgtype: check earlier for expected manifest type
+ Vendor containers/storage fix
+ Vendor containers/storage v1.15.1
+ Add history names support
+ PR takeover of #1966
+ Tests: Add inspect test check steps
+ Tests: Add container name and id check in containers test steps
+ Test: Get permission in add test
+ Tests: Add a test for tag by id
+ Tests: Add test cases for push test
+ Tests: Add image digest test
+ Tests: Add some buildah from tests
+ Tests: Add two commit test
+ Tests: Add buildah bud with --quiet test
+ Tests: Add two test for buildah add
+ Bump back to v1.12.0-dev
+
+## v1.11.6 (2019-12-03)
+ Handle missing equal sign in --from and --chown flags for COPY/ADD
+ bud COPY does not download URL
+ Bump github.com/onsi/gomega from 1.7.0 to 1.7.1
+ Fix .dockerignore exclude regression
+ Ran buildah through codespell
+ commit(docker): always set ContainerID and ContainerConfig
+ Touch up commit man page image parameter
+ Add builder identity annotations.
+ info: use util.Runtime()
+ Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3
+ Bump back to v1.12.0-dev
+
+## v1.11.5 (2019-11-11)
+ Enhance error on unsafe symbolic link targets
+ Add OCIRuntime to info
+ Check nonexsit authfile
+ Only output image id if running buildah bud --quiet
+ Fix --pull=true||false and add --pull-never to bud and from (retry)
+ cgroups v2: tweak or skip tests
+ Prepwork: new 'skip' helpers for tests
+ Handle configuration blobs for manifest lists
+ unmarshalConvertedConfig: avoid using the updated image's ref
+ Add completions for Manifest commands
+ Add disableFips option to secrets pkg
+ Update bud.bats test archive test
+ Add test for caching based on content digest
+ Builder.untarPath(): always evaluate b.ContentDigester.Hash()
+ Bump github.com/onsi/ginkgo from 1.10.1 to 1.10.2
+ Fix another broken test: copy-url-mtime
+ yet more fixes
+ Actual bug fix for 'add' test: fix the expected mode
+ BATS tests - lots of mostly minor cleanup
+ build: drop support for ostree
+ Add support for make vendor-in-container
+ imgtype: exit with error if storage fails
+ remove XDG_RUNTIME_DIR from default authfile path
+ fix troubleshooting redirect instructions
+ Bump back to v1.12.0-dev
+
+## v1.11.4 (2019-10-28)
+ buildah: add a "manifest" command
+ manifests: add the module
+ pkg/supplemented: add a package for grouping images together
+ pkg/manifests: add a manifest list build/manipulation API
+ Update for ErrUnauthorizedForCredentials API change in containers/image
+ Update for manifest-lists API changes in containers/image
+ version: also note the version of containers/image
+ Move to containers/image v5.0.0
+ Enable --device directory as src device
+ Fix git build with branch specified
+ Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1
+ Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0
+ Add clarification to the Tutorial for new users
+ Silence "using cache" to ensure -q is fully quiet
+ Add OWNERS File to Buildah
+ Bump github.com/containers/storage from 1.13.4 to 1.13.5
+ Move runtime flag to bud from common
+ Commit: check for storage.ErrImageUnknown using errors.Cause()
+ Fix crash when invalid COPY --from flag is specified.
+ Bump back to v1.12.0-dev
+
+## v1.11.3 (2019-10-04)
+ Update c/image to v4.0.1
+ Bump github.com/spf13/pflag from 1.0.3 to 1.0.5
+ Fix --build-args handling
+ Bump github.com/spf13/cobra from 0.0.3 to 0.0.5
+ Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2
+ Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1
+ Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4
+ Add support for retrieving context from stdin "-"
+ Ensure bud remote context cleans up on error
+ info: add cgroups2
+ Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1
+ Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6
+ Bump github.com/stretchr/testify from 1.3.0 to 1.4.0
+ Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0
+ Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3
+ Bump github.com/onsi/gomega from 1.5.0 to 1.7.0
+ update c/storage to v1.13.4
+ Print build 'STEP' line to stdout, not stderr
+ Fix travis-ci on forks
+ Vendor c/storage v1.13.3
+ Use Containerfile by default
+ Added tutorial on how to include Buildah as library
+ util/util: Fix "configuraitno" -> "configuration" log typo
+ Bump back to v1.12.0-dev
+
+## v1.11.2 (2019-09-13)
+ Add some cleanup code
+ Move devices code to unit specific directory.
+ Bump back to v1.12.0-dev
+
+## v1.11.1 (2019-09-11)
+ Add --devices flag to bud and from
+ Downgrade .papr to highest atomic verion
+ Add support for /run/.containerenv
+ Truncate output of too long image names
+ Preserve file and directory mount permissions
+ Bump fedora version from 28 to 30
+ makeImageRef: ignore EmptyLayer if Squash is set
+ Set TMPDIR to /var/tmp by default
+ replace --debug=false with --log-level=error
+ Allow mounts.conf entries for equal source and destination paths
+ fix label and annotation for 1-line Dockerfiles
+ Enable interfacer linter and fix lints
+ install.md: mention goproxy
+ Makefile: use go proxy
+ Bump to v1.12.0-dev
+
+## v1.11.0 (2019-08-29)
+ tests/bud.bats: add --signature-policy to some tests
+ Vendor github.com/openshift/api
+ pull/commit/push: pay attention to $BUILD_REGISTRY_SOURCES
+ Add `--log-level` command line option and deprecate `--debug`
+ add support for cgroupsV2
+ Correctly detect ExitError values from Run()
+ Disable empty logrus timestamps to reduce logger noise
+ Remove outdated deps Makefile target
+ Remove gofmt.sh in favor of golangci-lint
+ Remove govet.sh in favor of golangci-lint
+ Allow to override build date with SOURCE_DATE_EPOCH
+ Update shebangs to take env into consideration
+ Fix directory pull image names
+ Add --digestfile and Re-add push statement as debug
+ README: mention that Podman uses Buildah's API
+ Use content digests in ADD/COPY history entries
+ add: add a DryRun flag to AddAndCopyOptions
+ Fix possible runtime panic on bud
+ Add security-related volume options to validator
+ use correct path for ginkgo
+ Add bud 'without arguments' integration tests
+ Update documentation about bud
+ add: handle hard links when copying with .dockerignore
+ add: teach copyFileWithTar() about symlinks and directories
+ Allow buildah bud to be called without arguments
+ imagebuilder: fix detection of referenced stage roots
+ Touch up go mod instructions in install
+ run_linux: fix mounting /sys in a userns
+ Vendor Storage v1.13.2
+ Cirrus: Update VM images
+ Fix handling of /dev/null masked devices
+ Update `bud`/`from` help to contain indicator for `--dns=none`
+ Bump back to v1.11.0-dev
+
+## v1.10.1 (2019-08-08)
+ Bump containers/image to v3.0.2 to fix keyring issue
+ Bug fix for volume minus syntax
+ Bump container/storage v1.13.1 and containers/image v3.0.1
+ bump github.com/containernetworking/cni to v0.7.1
+ Add overlayfs to fuse-overlayfs tip
+ Add automatic apparmor tag discovery
+ Fix bug whereby --get-login has no effect
+ Bump to v1.11.0-dev
+
+## v1.10.0 (2019-08-02)
+ vendor github.com/containers/image@v3.0.0
+ Remove GO111MODULE in favor of `-mod=vendor`
+ Vendor in containers/storage v1.12.16
+ Add '-' minus syntax for removal of config values
+ tests: enable overlay tests for rootless
+ rootless, overlay: use fuse-overlayfs
+ vendor github.com/containers/image@v2.0.1
+ Added '-' syntax to remove volume config option
+ delete `successfully pushed` message
+ Add golint linter and apply fixes
+ vendor github.com/containers/storage@v1.12.15
+ Change wait to sleep in buildahimage readme
+ Handle ReadOnly images when deleting images
+ Add support for listing read/only images
+
+## v1.9.2 (2019-07-19)
+ from/import: record the base image's digest, if it has one
+ Fix CNI version retrieval to not require network connection
+ Add misspell linter and apply fixes
+ Add goimports linter and apply fixes
+ Add stylecheck linter and apply fixes
+ Add unconvert linter and apply fixes
+ image: make sure we don't try to use zstd compression
+ run.bats: skip the "z" flag when testing --mount
+ Update to runc v1.0.0-rc8
+ Update to match updated runtime-tools API
+ bump github.com/opencontainers/runtime-tools to v0.9.0
+ Build e2e tests using the proper build tags
+ Add unparam linter and apply fixes
+ Run: correct a typo in the --cap-add help text
+ unshare: add a --mount flag
+ fix push check image name is not empty
+ Bump to v1.9.2-dev
+
+## v1.9.1 (2019-07-12)
+ add: fix slow copy with no excludes
+ Add errcheck linter and fix missing error check
+ Improve tests/tools/Makefile parallelism and abstraction
+ Fix response body not closed resource leak
+ Switch to golangci-lint
+ Add gomod instructions and mailing list links
+ On Masked path, check if /dev/null already mounted before mounting
+ Update to containers/storage v1.12.13
+ Refactor code in package imagebuildah
+ Add rootless podman with NFS issue in documentation
+ Add --mount for buildah run
+ import method ValidateVolumeOpts from libpod
+ Fix typo
+ Makefile: set GO111MODULE=off
+ rootless: add the built-in slirp DNS server
+ Update docker/libnetwork to get rid of outdated sctp package
+ Update buildah-login.md
+ migrate to go modules
+ install.md: mention go modules
+ tests/tools: go module for test binaries
+ fix --volume splits comma delimited option
+ Add bud test for RUN with a priv'd command
+ vendor logrus v1.4.2
+ pkg/cli: panic when flags can't be hidden
+ pkg/unshare: check all errors
+ pull: check error during report write
+ run_linux.go: ignore unchecked errors
+ conformance test: catch copy error
+ chroot/run_test.go: export funcs to actually be executed
+ tests/imgtype: ignore error when shutting down the store
+ testreport: check json error
+ bind/util.go: remove unused func
+ rm chroot/util.go
+ imagebuildah: remove unused `dedupeStringSlice`
+ StageExecutor: EnsureContainerPath: catch error from SecureJoin()
+ imagebuildah/build.go: return <expr> instead of branching
+ rmi: avoid redundant branching
+ conformance tests: nilness: allocate map
+ imagebuildah/build.go: avoid redundant `filepath.Join()`
+ imagebuildah/build.go: avoid redundant `os.Stat()`
+ imagebuildah: omit comparison to bool
+ fix "ineffectual assignment" lint errors
+ docker: ignore "repeats json tag" lint error
+ pkg/unshare: use `...` instead of iterating a slice
+ conformance: bud test: use raw strings for regexes
+ conformance suite: remove unused func/var
+ buildah test suite: remove unused vars/funcs
+ testreport: fix golangci-lint errors
+ util: remove redundant `return` statement
+ chroot: only log clean-up errors
+ images_test: ignore golangci-lint error
+ blobcache: log error when draining the pipe
+ imagebuildah: check errors in deferred calls
+ chroot: fix error handling in deferred funcs
+ cmd: check all errors
+ chroot/run_test.go: check errors
+ chroot/run.go: check errors in deferred calls
+ imagebuildah.Executor: remove unused onbuild field
+ docker/types.go: remove unused struct fields
+ util: use strings.ContainsRune instead of index check
+ Cirrus: Initial implementation
+ Bump to v1.9.1-dev
+
+## v1.9.0 (2019-06-15)
+ buildah-run: fix-out-of-range panic (2)
+ Bump back to v1.9.0-dev
+
+
+
+## v1.8.4 (2019-06-13)
+ Update containers/image to v2.0.0
+ run: fix hang with run and --isolation=chroot
+ run: fix hang when using run
+ chroot: drop unused function call
+ remove --> before imgageID on build
+ Always close stdin pipe
+ Write deny to setgroups when doing single user mapping
+ Avoid including linux/memfd.h
+ Add a test for the symlink pointing to a directory
+ Add missing continue
+ Fix the handling of symlinks to absolute paths
+ Only set default network sysctls if not rootless
+ Support --dns=none like podman
+ fix bug --cpu-shares parsing typo
+ Fix validate complaint
+ Update vendor on containers/storage to v1.12.10
+ Create directory paths for COPY thereby ensuring correct perms
+ imagebuildah: use a stable sort for comparing build args
+ imagebuildah: tighten up cache checking
+ bud.bats: add a test verying the order of --build-args
+ add -t to podman run
+ imagebuildah: simplify screening by top layers
+ imagebuildah: handle ID mappings for COPY --from
+ imagebuildah: apply additionalTags ourselves
+ bud.bats: test additional tags with cached images
+ bud.bats: add a test for WORKDIR and COPY with absolute destinations
+ Cleanup Overlay Mounts content
+
+## v1.8.3 (2019-06-04)
+ Add support for file secret mounts
+ Add ability to skip secrets in mounts file
+ allow 32bit builds
+ fix tutorial instructions
+ imagebuilder: pass the right contextDir to Add()
+ add: use fileutils.PatternMatcher for .dockerignore
+ bud.bats: add another .dockerignore test
+ unshare: fallback to single usermapping
+ addHelperSymlink: clear the destination on os.IsExist errors
+ bud.bats: test replacing symbolic links
+ imagebuildah: fix handling of destinations that end with '/'
+ bud.bats: test COPY with a final "/" in the destination
+ linux: add check for sysctl before using it
+ unshare: set _CONTAINERS_ROOTLESS_GID
+ Rework buildahimamges
+ build context: support https git repos
+ Add a test for ENV special chars behaviour
+ Check in new Dockerfiles
+ Apply custom SHELL during build time
+ config: expand variables only at the command line
+ SetEnv: we only need to expand v once
+ Add default /root if empty on chroot iso
+ Add support for Overlay volumes into the container.
+ Export buildah validate volume functions so it can share code with libpod
+ Bump baseline test to F30
+ Fix rootless handling of /dev/shm size
+ Avoid fmt.Printf() in the library
+ imagebuildah: tighten cache checking back up
+ Handle WORKDIR with dangling target
+ Default Authfile to proper path
+ Make buildah run --isolation follow BUILDAH_ISOLATION environment
+ Vendor in latest containers/storage and containers/image
+ getParent/getChildren: handle layerless images
+ imagebuildah: recognize cache images for layerless images
+ bud.bats: test scratch images with --layers caching
+ Get CHANGELOG.md updates
+ Add some symlinks to test our .dockerignore logic
+ imagebuildah: addHelper: handle symbolic links
+ commit/push: use an everything-allowed policy
+ Correct manpage formatting in files section
+ Remove must be root statement from buildah doc
+ Change image names to stable, testing and upstream
+ Bump back to v1.9.0-dev
+
+## v1.8.2 (2019-05-02)
+ Vendor Storage 1.12.6
+ Create scratch file in TESTDIR
+ Test bud-copy-dot with --layers picks up changed file
+ Bump back to 1.9.0-dev
+
+## v1.8.1 (2019-05-01)
+ Don't create directory on container
+ Replace kubernetes/pause in tests with k8s.gcr.io/pause
+ imagebuildah: don't remove intermediate images if we need them
+ Rework buildahimagegit to buildahimageupstream
+ Fix Transient Mounts
+ Handle WORKDIRs that are symlinks
+ allow podman to build a client for windows
+ Touch up 1.9-dev to 1.9.0-dev
+ Bump to 1.9-dev
+
+## v1.8.0 (2019-04-26)
+ Resolve symlink when checking container path
+ commit: commit on every instruction, but not always with layers
+ CommitOptions: drop the unused OnBuild field
+ makeImageRef: pass in the whole CommitOptions structure
+ cmd: API cleanup: stores before images
+ run: check if SELinux is enabled
+ Fix buildahimages Dockerfiles to include support for additionalimages mounted from host.
+ Detect changes in rootdir
+ Fix typo in buildah-pull(1)
+ Vendor in latest containers/storage
+ Keep track of any build-args used during buildah bud --layers
+ commit: always set a parent ID
+ imagebuildah: rework unused-argument detection
+ fix bug dest path when COPY .dockerignore
+ Move Host IDMAppings code from util to unshare
+ Add BUILDAH_ISOLATION rootless back
+ Travis CI: fail fast, upon error in any step
+ imagebuildah: only commit images for intermediate stages if we have to
+ Use errors.Cause() when checking for IsNotExist errors
+ auto pass http_proxy to container
+ Bump back to 1.8-dev
+
+## v1.7.3 (2019-04-16)
+ imagebuildah: don't leak image structs
+ Add Dockerfiles for buildahimages
+ Bump to Replace golang 1.10 with 1.12
+ add --dns* flags to buildah bud
+ Add hack/build_speed.sh test speeds on building container images
+ Create buildahimage Dockerfile for Quay
+ rename 'is' to 'expect_output'
+ squash.bats: test squashing in multi-layered builds
+ bud.bats: test COPY --from in a Dockerfile while using the cache
+ commit: make target image names optional
+ Fix bud-args to allow comma separation
+ oops, missed some tests in commit.bats
+ new helper: expect_line_count
+ New tests for #1467 (string slices in cmdline opts)
+ Workarounds for dealing with travis; review feedback
+ BATS tests - extensive but minor cleanup
+ imagebuildah: defer pulling images for COPY --from
+ imagebuildah: centralize COMMIT and image ID output
+ Travis: do not use traviswait
+ imagebuildah: only initialize imagebuilder configuration once per stage
+ Make cleaner error on Dockerfile build errors
+ unshare: move to pkg/
+ unshare: move some code from cmd/buildah/unshare
+ Fix handling of Slices versus Arrays
+ imagebuildah: reorganize stage and per-stage logic
+ imagebuildah: add empty layers for instructions
+ Add missing step in installing into Ubuntu
+ fix bug in .dockerignore support
+ imagebuildah: deduplicate prepended "FROM" instructions
+ Touch up intro
+ commit: set created-by to the shell if it isn't set
+ commit: check that we always set a "created-by"
+ docs/buildah.md: add "containers-" prefixes under "SEE ALSO"
+ Bump back to 1.8-dev
+
+## v1.7.2 (2019-03-28)
+ mount: do not create automatically a namespace
+ buildah: correctly create the userns if euid!=0
+ imagebuildah.Build: consolidate cleanup logic
+ CommitOptions: drop the redundant Store field
+ Move pkg/chrootuser from libpod to buildah.
+ imagebuildah: record image IDs and references more often
+ vendor imagebuilder v1.1.0
+ imagebuildah: fix requiresStart/noRunsRemaining confusion
+ imagebuildah: check for unused args across stages
+ bump github.com/containernetworking/cni to v0.7.0-rc2
+ imagebuildah: use "useCache" instead of "noCache"
+ imagebuildah.resolveNameToImageRef(): take name as a parameter
+ Export fields of the DokcerIgnore struct
+ imagebuildah: drop the duplicate containerIDs list
+ rootless: by default use the host network namespace
+ imagebuildah: split Executor and per-stage execution
+ imagebuildah: move some fields around
+ golint: make golint happy
+ docs: 01-intro.md: add missing . in Dockerfile examples
+ fix bug using .dockerignore
+ Do not create empty mounts.conf file
+ images: suppress a spurious blank line with no images
+ from: distinguish between ADD and COPY
+ fix bug to not separate each --label value with comma
+ buildah-bud.md: correct a typo, note a default
+ Remove mistaken code that got merged in other PR
+ add sample registries.conf to docs
+ escape shell variables in README example
+ slirp4netns: set mtu to 65520
+ images: imageReposToMap() already adds <none>:<none>
+ imagebuildah.ReposToMap: move to cmd
+ Build: resolve copyFrom references earlier
+ Allow rootless users to use the cache directory in homedir
+ bud.bats: use the per-test temp directory
+ bud.bats: log output before counting length
+ Simplify checks for leftover args
+ Print commitID with --layers
+ fix bug images use the template to print results
+ rootless: honor --net host
+ onsi/gomeage add missing files
+ vendor latest openshift/imagebuilder
+ Remove noop from squash help
+ Prepend a comment to files setup in container
+ imagebuildah resolveSymlink: fix handling of relative links
+ Errors should be printed to stderr
+ Add recommends for slirp4netns and fuse-overlay
+ Update pull and pull-always flags
+ Hide from users command options that we don't want them to use.
+ Update secrets fipsmode patch to work on rootless containers
+ fix unshare option handling and documentation
+ Vendor in latest containers/storage
+ Hard-code docker.Transport use in pull --all-tags
+ Use a types.ImageReference instead of (transport, name) strings in pullImage etc.
+ Move the computation of srcRef before first pullAndFindImage
+ Don't throw away user-specified tag for pull --all-tags
+ CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference
+ Don't try to parse imageName as transport:image in pullImage
+ Use reference.WithTag instead of manual string manipulation in Pull
+ Don't pass image = transport:repo:tag, transport=transport to pullImage
+ Fix confusing variable naming in Pull
+ Don't try to parse image name as a transport:image
+ Fix error reporting when parsing trans+image
+ Remove 'transport == ""' handling from the pull path
+ Clean up "pulls" of local image IDs / ID prefixes
+ Simplify ExpandNames
+ Document the semantics of transport+name returned by ResolveName
+ UPdate gitvalidation epoch
+ Bump back to 1.8-dev
+
+## v1.7.1 (2019-02-26)
+ vendor containers/image v1.5
+ Move secrets code from libpod into buildah
+ Update CHANGELOG.md with the past changes
+ README.md: fix typo
+ Fix a few issues found by tests/validate/gometalinter.sh
+ Neutralize buildah/unshare on non-Linux platforms
+ Explicitly specify a directory to find(1)
+ README.md: rephrase Buildah description
+ Stop printing default twice in cli --help
+ install.md: add section about vendoring
+ Bump to 1.8-dev
+
+## v1.7 (2019-02-21)
+ vendor containers/image v1.4
+ Make "images --all" faster
+ Remove a misleading comment
+ Remove quiet option from pull options
+ Make sure buildah pull --all-tags only works with docker transport
+ Support oci layout format
+ Fix pulling of images within buildah
+ Fix tls-verify polarity
+ Travis: execute make vendor and hack/tree_status.sh
+ vendor.conf: remove unused dependencies
+ add missing vendor/github.com/containers/libpod/vendor.conf
+ vendor.conf: remove github.com/inconshreveable/mousetrap
+ make vendor: always fetch the latest vndr
+ add hack/tree_status.sh script
+ Bump c/Storage to 1.10
+ Add --all-tags test to pull
+ mount: make error clearer
+ Remove global flags from cli help
+ Set --disable-compression to true as documented
+ Help document using buildah mount in rootless mode
+ healthcheck start-period: update documentation
+ Vendor in latest c/storage and c/image
+ dumpbolt: handle nested buckets
+ Fix buildah commit compress by default
+ Test on xenial, not trusty
+ unshare: reexec using a memfd copy instead of the binary
+ Add --target to bud command
+ Fix example for setting multiple environment variables
+ main: fix rootless mode
+ buildah: force umask 022
+ pull.bats: specify registry config when using registries
+ pull.bats: use the temporary directory, not /tmp
+ unshare: do not set rootless mode if euid=0
+ Touch up cli help examples and a few nits
+ Add an undocumented dumpbolt command
+ Move tar commands into containers/storage
+ Fix bud issue with 2 line Dockerfile
+ Add package install descriptions
+ Note configuration file requirements
+ Replace urfave/cli with cobra
+ cleanup vendor.conf
+ Vendor in latest containers/storage
+ Add Quiet to PullOptions and PushOptions
+ cmd/commit: add flag omit-timestamp to allow for deterministic builds
+ Add options for empty-layer history entries
+ Make CLI help descriptions and usage a bit more consistent
+ vndr opencontainers/selinux
+ Bump baseline test Fedora to 29
+ Bump to v1.7-dev-1
+ Bump to v1.6-1
+ Add support for ADD --chown
+ imagebuildah: make EnsureContainerPath() check/create the right one
+ Bump 1.7-dev
+ Fix contrib/rpm/bulidah.spec changelog date
+
+## v1.6-1 (2019-01-18)
+ Add support for ADD --chown
+ imagebuildah: make EnsureContainerPath() check/create the right one
+ Fix contrib/rpm/bulidah.spec changelog date
+ Vendor in latest containers/storage
+ Revendor everything
+ Revendor in latest code by release
+ unshare: do not set USER=root
+ run: ignore EIO when flushing at the end, avoid double log
+ build-using-dockerfile,commit: disable compression by default
+ Update some comments
+ Make rootless work under no_pivot_root
+ Add CreatedAtRaw date field for use with Format
+ Properly format images JSON output
+ pull: add all-tags option
+ Fix support for multiple Short options
+ pkg/blobcache: add synchronization
+ Skip empty files in file check of conformance test
+ Use NoPivot also for RUN, not only for run
+ Remove no longer used isReferenceInsecure / isRegistryInsecure
+ Do not set OCIInsecureSkipTLSVerify based on registries.conf
+ Remove duplicate entries from images JSON output
+ vendor parallel-copy from containers/image
+ blobcache.bats: adjust explicit push tests
+ Handle one line Dockerfile with layers
+ We should only warn if user actually requests Hostname be set in image
+ Fix compiler Warning about comparing different size types
+ imagebuildah: don't walk if rootdir and path are equal
+ Add aliases for buildah containers, so buildah list, ls and ps work
+ vendor: use faster version instead compress/gzip
+ vendor: update libpod
+ Properly handle Hostname inside of RUN command
+ docs: mention how to mount in rootless mode
+ tests: use fully qualified name for centos image
+ travis.yml: use the fully qualified name for alpine
+ mount: allow mount only when using vfs
+ Add some tests for buildah pull
+ Touch up images -q processing
+ Refactor: Use library shared idtools.ParseIDMap() instead of bundling it
+ bump GITVALIDATE_EPOCH
+ cli.BudFlags: add `--platform` nop
+ Makefile: allow packagers to more easily add tags
+ Makefile: soften the requirement on git
+ tests: add containers json test
+ Inline blobCache.putBlob into blobCacheDestination.PutBlob
+ Move saveStream and putBlob near blobCacheDestination.PutBlob
+ Remove BlobCache.PutBlob
+ Update for API changes
+ Vendor c/image after merging c/image#536
+ Handle 'COPY --from' in Dockerfile
+ Vendor in latest content from github.com/containers/storage
+ Clarify docker.io default in push with docker-daemon
+ Test blob caching
+ Wire in a hidden --blob-cache option
+ Use a blob cache when we're asked to use one
+ Add --disable-compression to 'build-using-dockerfile'
+ Add a blob cache implementation
+ vendor: update containers/storage
+ Update for sysregistriesv2 API changes
+ Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53
+ clean up makefile variables
+ Fix file permission
+ Complete the instructions for the command
+ Show warning when a build arg not used
+ Assume user 0 group 0, if /etc/passwd file in container.
+ Add buildah info command
+ Enable -q when --filter is used for images command
+ Add v1.5 Release Announcement
+ Fix dangling filter for images command
+ Fix completions to print Names as well as IDs
+ tests: Fix file permissions
+ Bump 1.6-dev
+
+## v1.5-1 (2018-11-21)
+ Bump min go to 1.10 in install.md
+ vendor: update ostree-go
+ Update docker build command line in conformance test
+ Print command in SystemExec as debug information
+ Add some skip word for inspect check in conformance test
+ Update regex for multi stage base test
+ Sort CLI flags
+ vendor: update containers/storage
+ Add note to install about non-root on RHEL/CentOS
+ Update imagebuild depdency to support heading ARGs in Dockerfile
+ rootless: do not specify --rootless to the OCI runtime
+ Export resolvesymlink function
+ Exclude --force-rm from common bud cli flags
+ run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume
+ rootless: use slirp4netns to setup the network namespace
+ Instructions for completing the pull command
+ Fix travis to not run environment variable patch
+ rootless: only discard network configuration names
+ run: only set up /etc/hosts or /etc/resolv.conf with network
+ common: getFormat: match entire string not only the prefix
+ vendor: update libpod
+ Change validation EPOCH
+ Fixing broken link for container-registries.conf
+ Restore rootless isolation test for from volume ro test
+ ostree: fix tag for build constraint
+ Handle directories better in bud -f
+ vndr in latest containers/storage
+ Fix unshare gofmt issue
+ runSetupBuiltinVolumes(): break up volume setup
+ common: support a per-user registries conf file
+ unshare: do not override the configuration
+ common: honor the rootless configuration file
+ unshare: create a new mount namespace
+ unshare: support libpod rootless pkg
+ Use libpod GetDefaultStorage to report proper storage config
+ Allow container storage to manage the SELinux labels
+ Resolve image names with default transport in from command
+ run: When the value of isolation is set, use the set value instead of the default value.
+ Vendor in latest containers/storage and opencontainers/selinux
+ Remove no longer valid todo
+ Check for empty buildTime in version
+ Change gofmt so it runs on all but 1.10
+ Run gofmt only on Go 1.11
+ Walk symlinks when checking cached images for copied/added files
+ ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder
+ Set WorkingDir to empty, not / for conformance
+ Update calls in e2e to addres 1101
+ imagebuilder.BuildDockerfiles: return the image ID
+ Update for changes in the containers/image API
+ bump(github.com/containers/image)
+ Allow setting --no-pivot default with an env var
+ Add man page and bash completion, for --no-pivot
+ Add the --no-pivot flag to the run command
+ Improve reporting about individual pull failures
+ Move the "short name but no search registries" error handling to resolveImage
+ Return a "search registries were needed but empty" indication in util.ResolveName
+ Simplify handling of the "tried to pull an image but found nothing" case in newBuilder
+ Don't even invoke the pull loop if options.FromImage == ""
+ Eliminate the long-running ref and img variables in resolveImage
+ In resolveImage, return immediately on success
+ Fix From As in Dockerfile
+ Vendor latest containers/image
+ Vendor in latest libpod
+ Sort CLI flags of buildah bud
+ Change from testing with golang 1.9 to 1.11.
+ unshare: detect when unprivileged userns are disabled
+ Optimize redundant code
+ fix missing format param
+ chroot: fix the args check
+ imagebuildah: make ResolveSymLink public
+ Update copy chown test
+ buildah: use the same logic for XDG_RUNTIME_DIR as podman
+ V1.4 Release Announcement
+ Podman --privileged selinux is broken
+ papr: mount source at gopath
+ parse: Modify the return value
+ parse: modify the verification of the isolation value
+ Make sure we log or return every error
+ pullImage(): when completing an image name, try docker://
+ Fix up Tutorial 3 to account for format
+ Vendor in latest containers/storage and containers/image
+ docs/tutorials/01-intro.md: enhanced installation instructions
+ Enforce "blocked" for registries for the "docker" transport
+ Correctly set DockerInsecureSkipTLSVerify when pulling images
+ chroot: set up seccomp and capabilities after supplemental groups
+ chroot: fix capabilities list setup and application
+ .papr.yml: log the podman version
+ namespaces.bats: fix handling of uidmap/gidmap options in pairs
+ chroot: only create user namespaces when we know we need them
+ Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS)
+ bash/buildah: add isolation option to the from command
+
+## v1.4 (2018-10-02)
+ from: fix isolation option
+ Touchup pull manpage
+ Export buildah ReserveSELinuxLables so podman can use it
+ Add buildah.io to README.md and doc fixes
+ Update rmi man for prune changes
+ Ignore file not found removal error in bud
+ bump(github.com/containers/{storage,image})
+ NewImageSource(): only create one Diff() at a time
+ Copy ExposedPorts from base image into the config
+ tests: run conformance test suite in Travis
+ Change rmi --prune to not accept an imageID
+ Clear intermediate container IDs after each stage
+ Request podman version for build issues
+ unshare: keep the additional groups of the user
+ Builtin volumes should be owned by the UID/GID of the container
+ Get rid of dangling whitespace in markdown files
+ Move buildah from projecatatomic/buildah to containers/buildah
+ nitpick: parse.validateFlags loop in bud cli
+ bash: Completion options
+ Add signature policy to push tests
+ vendor in latest containers/image
+ Fix grammar in Container Tools Guide
+ Don't build btrfs if it is not installed
+ new: Return image-pulling errors from resolveImage
+ pull: Return image-pulling errors from pullImage
+ Add more volume mount tests
+ chroot: create missing parent directories for volume mounts
+ Push: Allow an empty destination
+ Add Podman relationship to readme, create container tools guide
+ Fix arg usage in buildah-tag
+ Add flags/arguments order verification to other commands
+ Handle ErrDuplicateName errors from store.CreateContainer()
+ Evaluate symbolic links on Add/Copy Commands
+ Vendor in latest containers/image and containers/storage
+ Retain bounding set when running containers as non root
+ run container-diff tests in Travis
+ buildah-images.md: Fix option contents
+ push: show image digest after push succeed
+ Vendor in latest containers/storage,image,libpod and runc
+ Change references to cri-o to point at new repository
+ Exclude --layers from the common bug cli flags
+ demos: Increase the executable permissions
+ run: clear default seccomp filter if not enabled
+ Bump maximum cyclomatic complexity to 45
+ stdin: on HUP, read everything
+ nitpick: use tabs in tests/helpers.bash
+ Add flags/arguments order verification to one arg commands
+ nitpick: decrease cognitive complexity in buildah-bud
+ rename: Avoid renaming the same name as other containers
+ chroot isolation: chroot() before setting up seccomp
+ Small nitpick at the "if" condition in tag.go
+ cmd/images: Modify json option
+ cmd/images: Disallow the input of image when using the -a option
+ Fix examples to include context directory
+ Update containers/image to fix commit layer issue
+ cmd/containers: End loop early when using the json option
+ Make buildah-from error message clear when flags are after arg
+ Touch up README.md for conformance tests
+ Update container/storage for lock fix
+ cmd/rm: restore the correct containerID display
+ Remove debug lines
+ Remove docker build image after each test
+ Add README for conformance test
+ Update the MakeOptions to accept all command options for buildah
+ Update regrex to fit the docker output in test "run with JSON"
+ cmd/buildah: Remove redundant variable declarations
+ Warn about using Commands in Dockerfile that are not supported by OCI.
+ Add buildah bud conformance test
+ Fix rename to also change container name in builder
+ Makefile: use $(GO) env-var everywhere
+ Cleanup code to more closely match Docker Build images
+ Document BUILDAH_* environment variables in buildah bud --help output
+ Return error immediately if error occurs in Prepare step
+ Fix --layers ADD from url issue
+ Add "Sign your PRs" TOC item to contributing.md.
+ Display the correct ID after deleting image
+ rmi: Modify the handling of errors
+ Let util.ResolveName() return parsing errors
+ Explain Open Container Initiative (OCI) acronym, add link
+ Update vendor for urfave/cli back to master
+ Handle COPY --chown in Dockerfile
+ Switch to Recommends container-selinux
+ Update vendor for containernetworking, imagebuildah and podman
+ Document STORAGE_DRIVER and STORAGE_OPTS environment variable
+ Change references to projectatomic/libpod to containers/libpod
+ Add container PATH retrieval example
+ Expand variables names for --env
+ imagebuildah: provide a way to provide stdin for RUN
+ Remove an unused srcRef.NewImageSource in pullImage
+ chroot: correct a comment
+ chroot: bind mount an empty directory for masking
+ Don't bother with --no-pivot for rootless isolation
+ CentOS need EPEL repo
+ Export a Pull() function
+ Remove stream options, since docker build does not have it
+ release v1.3: mention openSUSE
+ Add Release Announcements directory
+ Bump to v1.4-dev
+
+## 1.3 (2018-08-4)
+ Revert pull error handling from 881
+ bud should not search context directory for Dockerfile
+ Set BUILDAH_ISOLATION=rootless when running unprivileged
+ .papr.sh: Also test with BUILDAH_ISOLATION=rootless
+ Skip certain tests when we're using "rootless" isolation
+ .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ Add and implement IsolationOCIRootless
+ Add a value for IsolationOCIRootless
+ Fix rmi to remove intermediate images associated with an image
+ Return policy error on pull
+ Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c
+ Switch to github.com/containers/image/pkg/sysregistriesv2
+ unshare: make adjusting the OOM score optional
+ Add flags validation
+ chroot: handle raising process limits
+ chroot: make the resource limits name map module-global
+ Remove rpm.bats, we need to run this manually
+ Set the default ulimits to match Docker
+ buildah: no args is out of bounds
+ unshare: error message missed the pid
+ preprocess ".in" suffixed Dockerfiles
+ Fix the the in buildah-config man page
+ Only test rpmbuild on latest fedora
+ Add support for multiple Short options
+ Update to latest urvave/cli
+ Add additional SELinux tests
+ Vendor in latest github.com/containers/{image;storage}
+ Stop testing with golang 1.8
+ Fix volume cache issue with buildah bud --layers
+ Create buildah pull command
+ Increase the deadline for gometalinter during 'make validate'
+ .papr.sh: Also test with BUILDAH_ISOLATION=chroot
+ .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ Add a Dockerfile
+ Set BUILDAH_ISOLATION=chroot when running unprivileged
+ Add and implement IsolationChroot
+ Update github.com/opencontainers/runc
+ maybeReexecUsingUserNamespace: add a default for root
+ Allow ping command without NET_RAW Capabilities
+ rmi.storageImageID: fix Wrapf format warning
+ Allow Dockerfile content to come from stdin
+ Vendor latest container/storage to fix overlay mountopt
+ userns: assign additional IDs sequentially
+ Remove default dev/pts
+ Add OnBuild test to baseline test
+ tests/run.bats(volumes): use :z when SELinux is enabled
+ Avoid a stall in runCollectOutput()
+ Use manifest from container/image
+ Vendor in latest containers/image and containers/storage
+ add rename command
+ Completion command
+ Update CHANGELOG.md
+ Update vendor for runc to fix 32 bit builds
+ bash completion: remove shebang
+ Update vendor for runc to fix 32 bit builds
+
+## 1.2 (2018-07-14)
+ Vendor in lates containers/image
+ build-using-dockerfile: let -t include transports again
+ Block use of /proc/acpi and /proc/keys from inside containers
+ Fix handling of --registries-conf
+ Fix becoming a maintainer link
+ add optional CI test fo darwin
+ Don't pass a nil error to errors.Wrapf()
+ image filter test: use kubernetes/pause as a "since"
+ Add --cidfile option to from
+ vendor: update containers/storage
+ Contributors need to find the CONTRIBUTOR.md file easier
+ Add a --loglevel option to build-with-dockerfile
+ Create Development plan
+ cmd: Code improvement
+ allow buildah cross compile for a darwin target
+ Add unused function param lint check
+ docs: Follow man-pages(7) suggestions for SYNOPSIS
+ Start using github.com/seccomp/containers-golang
+ umount: add all option to umount all mounted containers
+ runConfigureNetwork(): remove an unused parameter
+ Update github.com/opencontainers/selinux
+ Fix buildah bud --layers
+ Force ownership of /etc/hosts and /etc/resolv.conf to 0:0
+ main: if unprivileged, reexec in a user namespace
+ Vendor in latest imagebuilder
+ Reduce the complexity of the buildah.Run function
+ mount: output it before replacing lastError
+ Vendor in latest selinux-go code
+ Implement basic recognition of the "--isolation" option
+ Run(): try to resolve non-absolute paths using $PATH
+ Run(): don't include any default environment variables
+ build without seccomp
+ vendor in latest runtime-tools
+ bind/mount_unsupported.go: remove import errors
+ Update github.com/opencontainers/runc
+ Add Capabilities lists to BuilderInfo
+ Tweaks for commit tests
+ commit: recognize committing to second storage locations
+ Fix ARGS parsing for run commands
+ Add info on registries.conf to from manpage
+ Switch from using docker to podman for testing in .papr
+ buildah: set the HTTP User-Agent
+ ONBUILD tutorial
+ Add information about the configuration files to the install docs
+ Makefile: add uninstall
+ Add tilde info for push to troubleshooting
+ mount: support multiple inputs
+ Use the right formatting when adding entries to /etc/hosts
+ Vendor in latest go-selinux bindings
+ Allow --userns-uid-map/--userns-gid-map to be global options
+ bind: factor out UnmountMountpoints
+ Run(): simplify runCopyStdio()
+ Run(): handle POLLNVAL results
+ Run(): tweak terminal mode handling
+ Run(): rename 'copyStdio' to 'copyPipes'
+ Run(): don't set a Pdeathsig for the runtime
+ Run(): add options for adding and removing capabilities
+ Run(): don't use a callback when a slice will do
+ setupSeccomp(): refactor
+ Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers
+ Escape use of '_' in .md docs
+ Break out getProcIDMappings()
+ Break out SetupIntermediateMountNamespace()
+ Add Multi From Demo
+ Use the c/image conversion code instead of converting configs manually
+ Don't throw away the manifest MIME type and guess again
+ Consolidate loading manifest and config in initConfig
+ Pass a types.Image to Builder.initConfig
+ Require an image ID in importBuilderDataFromImage
+ Use c/image/manifest.GuessMIMEType instead of a custom heuristic
+ Do not ignore any parsing errors in initConfig
+ Explicitly handle "from scratch" images in Builder.initConfig
+ Fix parsing of OCI images
+ Simplify dead but dangerous-looking error handling
+ Don't ignore v2s1 history if docker_version is not set
+ Add --rm and --force-rm to buildah bud
+ Add --all,-a flag to buildah images
+ Separate stdio buffering from writing
+ Remove tty check from images --format
+ Add environment variable BUILDAH_RUNTIME
+ Add --layers and --no-cache to buildah bud
+ Touch up images man
+ version.md: fix DESCRIPTION
+ tests: add containers test
+ tests: add images test
+ images: fix usage
+ fix make clean error
+ Change 'registries' to 'container registries' in man
+ add commit test
+ Add(): learn to record hashes of what we add
+ Minor update to buildah config documentation for entrypoint
+ Bump to v1.2-dev
+ Add registries.conf link to a few man pages
+
+## 1.1 (2018-06-08)
+ Drop capabilities if running container processes as non root
+ Print Warning message if cmd will not be used based on entrypoint
+ Update 01-intro.md
+ Shouldn't add insecure registries to list of search registries
+ Report errors on bad transports specification when pushing images
+ Move parsing code out of common for namespaces and into pkg/parse.go
+ Add disable-content-trust noop flag to bud
+ Change freenode chan to buildah
+ runCopyStdio(): don't close stdin unless we saw POLLHUP
+ Add registry errors for pull
+ runCollectOutput(): just read until the pipes are closed on us
+ Run(): provide redirection for stdio
+ rmi, rm: add test
+ add mount test
+ Add parameter judgment for commands that do not require parameters
+ Add context dir to bud command in baseline test
+ run.bats: check that we can run with symlinks in the bundle path
+ Give better messages to users when image can not be found
+ use absolute path for bundlePath
+ Add environment variable to buildah --format
+ rm: add validation to args and all option
+ Accept json array input for config entrypoint
+ Run(): process RunOptions.Mounts, and its flags
+ Run(): only collect error output from stdio pipes if we created some
+ Add OnBuild support for Dockerfiles
+ Quick fix on demo readme
+ run: fix validate flags
+ buildah bud should require a context directory or URL
+ Touchup tutorial for run changes
+ Validate common bud and from flags
+ images: Error if the specified imagename does not exist
+ inspect: Increase err judgments to avoid panic
+ add test to inspect
+ buildah bud picks up ENV from base image
+ Extend the amount of time travis_wait should wait
+ Add a make target for Installing CNI plugins
+ Add tests for namespace control flags
+ copy.bats: check ownerships in the container
+ Fix SELinux test errors when SELinux is enabled
+ Add example CNI configurations
+ Run: set supplemental group IDs
+ Run: use a temporary mount namespace
+ Use CNI to configure container networks
+ add/secrets/commit: Use mappings when setting permissions on added content
+ Add CLI options for specifying namespace and cgroup setup
+ Always set mappings when using user namespaces
+ Run(): break out creation of stdio pipe descriptors
+ Read UID/GID mapping information from containers and images
+ Additional bud CI tests
+ Run integration tests under travis_wait in Travis
+ build-using-dockerfile: add --annotation
+ Implement --squash for build-using-dockerfile and commit
+ Vendor in latest container/storage for devicemapper support
+ add test to inspect
+ Vendor github.com/onsi/ginkgo and github.com/onsi/gomega
+ Test with Go 1.10, too
+ Add console syntax highlighting to troubleshooting page
+ bud.bats: print "$output" before checking its contents
+ Manage "Run" containers more closely
+ Break Builder.Run()'s "run runc" bits out
+ util.ResolveName(): handle completion for tagged/digested image names
+ Handle /etc/hosts and /etc/resolv.conf properly in container
+ Documentation fixes
+ Make it easier to parse our temporary directory as an image name
+ Makefile: list new pkg/ subdirectoris as dependencies for buildah
+ containerImageSource: return more-correct errors
+ API cleanup: PullPolicy and TerminalPolicy should be types
+ Make "run --terminal" and "run -t" aliases for "run --tty"
+ Vendor github.com/containernetworking/cni v0.6.0
+ Update github.com/containers/storage
+ Update github.com/containers/libpod
+ Add support for buildah bud --label
+ buildah push/from can push and pull images with no reference
+ Vendor in latest containers/image
+ Update gometalinter to fix install.tools error
+ Update troubleshooting with new run workaround
+ Added a bud demo and tidied up
+ Attempt to download file from url, if fails assume Dockerfile
+ Add buildah bud CI tests for ENV variables
+ Re-enable rpm .spec version check and new commit test
+ Update buildah scratch demo to support el7
+ Added Docker compatibility demo
+ Update to F28 and new run format in baseline test
+ Touchup man page short options across man pages
+ Added demo dir and a demo. chged distrorlease
+ builder-inspect: fix format option
+ Add cpu-shares short flag (-c) and cpu-shares CI tests
+ Minor fixes to formatting in rpm spec changelog
+ Fix rpm .spec changelog formatting
+ CI tests and minor fix for cache related noop flags
+ buildah-from: add effective value to mount propagation
+
+## 1.0 (2018-05-06)
+ Declare Buildah 1.0
+ Add cache-from and no-cache noops, and fix doco
+ Update option and documentation for --force-rm
+ Adding noop for --force-rm to match --rm
+ Add buildah bud ENTRYPOINT,CMD,RUN tests
+ Adding buildah bud RUN test scenarios
+ Extend tests for empty buildah run command
+ Fix formatting error in run.go
+ Update buildah run to make command required
+ Expanding buildah run cmd/entrypoint tests
+ Update test cases for buildah run behaviour
+ Remove buildah run cmd and entrypoint execution
+ Add Files section with registries.conf to pertinent man pages
+ tests/config: perfect test
+ tests/from: add name test
+ Do not print directly to stdout in Commit()
+ Touch up auth test commands
+ Force "localhost" as a default registry
+ Drop util.GetLocalTime()
+ Vendor in latest containers/image
+ Validate host and container paths passed to --volume
+ test/from: add add-host test
+ Add --compress, --rm, --squash flags as a noop for bud
+ Add FIPS mode secret to buildah run and bud
+ Add config --comment/--domainname/--history-comment/--hostname
+ 'buildah config': stop replacing Created-By whenever it's not specified
+ Modify man pages so they compile correctly in mandb
+ Add description on how to do --isolation to buildah-bud man page
+ Add support for --iidfile to bud and commit
+ Refactor buildah bud for vendoring
+ Fail if date or git not installed
+ Revert update of entrypoint behaviour to match docker
+ Vendor in latest imagebuilder code to fix multiple stage builds
+ Add /bin/sh -c to entrypoint in config
+ image_test: Improve the test
+ Fix README example of buildah config
+ buildah-image: add validation to 'format'
+ Simple changes to allow buildah to pass make validate
+ Clarify the use of buildah config options
+ containers_test: Perfect testing
+ buildah images and podman images are listing different sizes
+ buildah-containers: add tests and example to the man page
+ buildah-containers: add validation to 'format'
+ Clarify the use of buildah config options
+ Minor fix for lighttpd example in README
+ Add tls-verification to troubleshooting
+ Modify buildah rmi to account for changes in containers/storage
+ Vendor in latest containers/image and containers/storage
+ addcopy: add src validation
+ Remove tarball as an option from buildah push --help
+ Fix secrets patch
+ Update entrypoint behaviour to match docker
+ Display imageId after commit
+ config: add support for StopSignal
+ Fix docker login issue in travis.yml
+ Allow referencing stages as index and names
+ Add multi-stage builds tests
+ Add multi-stage builds support
+ Add accessor functions for comment and stop signal
+ Vendor in latest imagebuilder, to get mixed case AS support
+ Allow umount to have multi-containers
+ Update buildah push doc
+ buildah bud walks symlinks
+ Imagename is required for commit atm, update manpage
+
+## 0.16.0 (2018-04-08)
+ Bump to v0.16.0
+ Remove requires for ostree-lib in rpm spec file
+ Add support for shell
+ buildah.spec should require ostree-libs
+ Vendor in latest containers/image
+ bash: prefer options
+ Change image time to locale, add troubleshooting.md, add logo to other mds
+ buildah-run.md: fix error SYNOPSIS
+ docs: fix error example
+ Allow --cmd parameter to have commands as values
+ Touchup README to re-enable logo
+ Clean up README.md
+ Make default-mounts-file a hidden option
+ Document the mounts.conf file
+ Fix man pages to format correctly
+ Add various transport support to buildah from
+ Add unit tests to run.go
+ If the user overrides the storage driver, the options should be dropped
+ Show Config/Manifest as JSON string in inspect when format is not set
+ Switch which for that in README.md
+ Remove COPR
+ Fix wrong order of parameters
+ Vendor in latest containers/image
+ Remove shallowCopy(), which shouldn't be saving us time any more
+ shallowCopy: avoid a second read of the container's layer
+
+## 0.5 - 2017-11-07
+ Add secrets patch to buildah
+ Add proper SELinux labeling to buildah run
+ Add tls-verify to bud command
+ Make filtering by date use the image's date
+ images: don't list unnamed images twice
+ Fix timeout issue
+ Add further tty verbiage to buildah run
+ Make inspect try an image on failure if type not specified
+ Add support for `buildah run --hostname`
+ Tons of bug fixes and code cleanup
+
+## 0.4 - 2017-09-22
+### Added
+ Update buildah spec file to match new version
+ Bump to version 0.4
+ Add default transport to push if not provided
+ Add authentication to commit and push
+ Remove --transport flag
+ Run: don't complain about missing volume locations
+ Add credentials to buildah from
+ Remove export command
+ Bump containers/storage and containers/image
+
+## 0.3 - 2017-07-20
+## 0.2 - 2017-07-18
+### Added
+ Vendor in latest containers/image and containers/storage
+ Update image-spec and runtime-spec to v1.0.0
+ Add support for -- ending options parsing to buildah run
+ Add/Copy need to support glob syntax
+ Add flag to remove containers on commit
+ Add buildah export support
+ update 'buildah images' and 'buildah rmi' commands
+ buildah containers/image: Add JSON output option
+ Add 'buildah version' command
+ Handle "run" without an explicit command correctly
+ Ensure volume points get created, and with perms
+ Add a -a/--all option to "buildah containers"
+
+## 0.1 - 2017-06-14
+### Added
+ Vendor in latest container/storage container/image
+ Add a "push" command
+ Add an option to specify a Create date for images
+ Allow building a source image from another image
+ Improve buildah commit performance
+ Add a --volume flag to "buildah run"
+ Fix inspect/tag-by-truncated-image-ID
+ Include image-spec and runtime-spec versions
+ buildah mount command should list mounts when no arguments are given.
+ Make the output image format selectable
+ commit images in multiple formats
+ Also import configurations from V2S1 images
+ Add a "tag" command
+ Add an "inspect" command
+ Update reference comments for docker types origins
+ Improve configuration preservation in imagebuildah
+ Report pull/commit progress by default
+ Contribute buildah.spec
+ Remove --mount from buildah-from
+ Add a build-using-dockerfile command (alias: bud)
+ Create manpages for the buildah project
+ Add installation for buildah and bash completions
+ Rename "list"/"delete" to "containers"/"rm"
+ Switch `buildah list quiet` option to only list container id's
+ buildah delete should be able to delete multiple containers
+ Correctly set tags on the names of pulled images
+ Don't mix "config" in with "run" and "commit"
+ Add a "list" command, for listing active builders
+ Add "add" and "copy" commands
+ Add a "run" command, using runc
+ Massive refactoring
+ Make a note to distinguish compression of layers
+
+## 0.0 - 2017-01-26
+### Added
+ Initial version, needs work
diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md
new file mode 100644
index 0000000..1b9b2de
--- /dev/null
+++ b/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The Buildah Project Community Code of Conduct
+
+The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..1104d33
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,177 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Contributing to Buildah
+
+We'd love to have you join the community! Below summarizes the processes
+that we follow.
+
+## Topics
+
+* [Reporting Issues](#reporting-issues)
+* [Working On Issues](#working-on-issues)
+* [Submitting Pull Requests](#submitting-pull-requests)
+* [Sign your PRs](#sign-your-prs)
+* [Merge bot interaction](#merge-bot-interaction)
+* [Communications](#communications)
+* [Becoming a Maintainer](#becoming-a-maintainer)
+
+## Reporting Issues
+
+Before reporting an issue, check our backlog of
+[open issues](https://github.com/containers/buildah/issues)
+to see if someone else has already reported it. If so, feel free to add
+your scenario, or additional information, to the discussion. Or simply
+"subscribe" to it to be notified when it is updated.
+
+If you find a new issue with the project we'd love to hear about it! The most
+important aspect of a bug report is that it includes enough information for
+us to reproduce it. So, please include as much detail as possible and try
+to remove the extra stuff that doesn't really relate to the issue itself.
+The easier it is for us to reproduce it, the faster it'll be fixed!
+
+Please don't include any private/sensitive information in your issue!
+
+## Working On Issues
+
+Once you have decided to contribute to Buildah by working on an issue, check our
+backlog of [open issues](https://github.com/containers/buildah/issues) looking
+for any that do not have an "In Progress" label attached to it. Often issues
+will be assigned to someone, to be worked on at a later time. If you have the
+time to work on the issue now, add yourself as an assignee, and set the
+"In Progress" label if you’re a member of the “Containers” GitHub organization.
+If you can not set the label, just add a quick comment in the issue asking that
+the “In Progress” label be set and a member will do so for you.
+
+## Submitting Pull Requests
+
+No Pull Request (PR) is too small! Typos, additional comments in the code,
+new testcases, bug fixes, new features, more documentation, ... it's all
+welcome!
+
+While bug fixes can first be identified via an "issue", that is not required.
+It's ok to just open up a PR with the fix, but make sure you include the same
+information you would have included in an issue - like how to reproduce it.
+
+PRs for new features should include some background on what use cases the
+new code is trying to address. When possible and when it makes sense, try to break-up
+larger PRs into smaller ones - it's easier to review smaller
+code changes. But only if those smaller ones make sense as stand-alone PRs.
+
+Regardless of the type of PR, all PRs should include:
+* well documented code changes
+* additional testcases. Ideally, they should fail w/o your code change applied
+* documentation changes
+
+Squash your commits into logical pieces of work that might want to be reviewed
+separate from the rest of the PRs. But, squashing down to just one commit is ok
+too since in the end the entire PR will be reviewed anyway. When in doubt,
+squash.
+
+PRs that fix issues should include a reference like `Closes #XXXX` in the
+commit message so that github will automatically close the referenced issue
+when the PR is merged.
+
+<!--
+All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
+-->
+
+### Sign your PRs
+
+The sign-off is a line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
+
+## Merge bot interaction
+
+Maintainers should never merge anything directly into upstream
+branches. Instead, interact with the [openshift-ci-robot](https://github.com/openshift-ci-robot/)
+through PR comments as summarized [here](https://prow.ci.openshift.org/command-help?repo=containers%2Fbuildah).
+This ensures all upstream
+branches contain commits in a predictable order, and that every commit
+has passed automated testing at some point in the past. A
+[Maintainer portal](https://prow.ci.openshift.org/pr?query=is%3Apr%20state%3Aopen%20repo%3Acontainers%2Fbuildah)
+is available, showing all PRs awaiting review and approval.
+
+## Communications
+
+For general questions or discussions, please use the
+IRC channel `#podman` on `irc.libera.chat`. If you are unfamiliar with IRC you can start a web client at https://web.libera.chat/#podman.
+
+Alternatively, [\[matrix\]](https://matrix.org) can be used to access the same channel via federation at https://matrix.to/#/#podman:chat.fedoraproject.org.
+
+### For discussions around issues/bugs and features:
+
+#### GitHub
+You can also use GitHub
+[issues](https://github.com/containers/buildah/issues)
+and
+[PRs](https://github.com/containers/buildah/pulls)
+tracking system.
+
+#### Buildah Mailing List
+
+
+You can join the Buildah mailing list by sending an email to `buildah-join@lists.buildah.io` with the word `subscribe` in the subject. You can also go to this [page](https://lists.podman.io/admin/lists/buildah.lists.buildah.io/), then scroll down to the bottom of the page and enter your email and optionally name, then click on the "Subscribe" button.
+
+## Becoming a Maintainer
+
+To become a maintainer you must first be nominated by an existing maintainer.
+If a majority (>50%) of maintainers agree then the proposal is adopted and
+you will be added to the list.
+
+Removing a maintainer requires at least 75% of the remaining maintainers
+approval, or if the person requests to be removed then it is automatic.
+Normally, a maintainer will only be removed if they are considered to be
+inactive for a long period of time or are viewed as disruptive to the community.
+
+The current list of maintainers can be found in the
+[MAINTAINERS](MAINTAINERS) file.
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MAINTAINERS b/MAINTAINERS
new file mode 100644
index 0000000..5725ca9
--- /dev/null
+++ b/MAINTAINERS
@@ -0,0 +1,4 @@
+Dan Walsh <dwalsh@redhat.com> (@rhatdan)
+Nalin Dahyabhai <nalin@redhat.com> (@nalind)
+Tom Sweeney <tsweeney@redhat.com> (@tomsweeneyredhat)
+Urvashi Mohnani <umohnani@redhat.com> (@umohnani8)
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..85b43c7
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,217 @@
+export GOPROXY=https://proxy.golang.org
+
+APPARMORTAG := $(shell hack/apparmor_tag.sh)
+STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
+SECURITYTAGS ?= seccomp $(APPARMORTAG)
+TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh)
+BUILDTAGS += $(TAGS)
+PREFIX := /usr/local
+BINDIR := $(PREFIX)/bin
+BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions
+BUILDFLAGS := -tags "$(BUILDTAGS)"
+BUILDAH := buildah
+
+GO := go
+GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
+GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
+# test for go module support
+ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
+export GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
+export GO_TEST=GO111MODULE=on $(GO) test -mod=vendor
+else
+export GO_BUILD=$(GO) build
+export GO_TEST=$(GO) test
+endif
+RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
+
+COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
+GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
+SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
+STATIC_STORAGETAGS = "containers_image_openpgp $(STORAGE_TAGS)"
+
+# we get GNU make 3.x in MacOS build envs, which wants # to be escaped in
+# strings, while the 4.x we have on Linux doesn't. this is the documented
+# workaround
+COMMENT := \#
+CNI_COMMIT := $(shell sed -n 's;^$(COMMENT) github.com/containernetworking/cni \([^ \n]*\).*$$;\1;p' vendor/modules.txt)
+RUNC_COMMIT := $(shell sed -n 's;^$(COMMENT) github.com/opencontainers/runc \([^ \n]*\).*$$;\1;p' vendor/modules.txt)
+LIBSECCOMP_COMMIT := release-2.3
+
+EXTRA_LDFLAGS ?=
+BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
+SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
+
+LINTFLAGS ?=
+
+ifeq ($(BUILDDEBUG), 1)
+ override GOGCFLAGS += -N -l
+endif
+
+# make all BUILDDEBUG=1
+# Note: Uses the -N -l go compiler options to disable compiler optimizations
+# and inlining. Using these build options allows you to subsequently
+# use source debugging tools like delve.
+all: bin/buildah bin/imgtype bin/copy bin/tutorial docs
+
+# Update nix/nixpkgs.json its latest stable commit
+.PHONY: nixpkgs
+nixpkgs:
+ @nix run \
+ -f channel:nixos-20.09 nix-prefetch-git \
+ -c nix-prefetch-git \
+ --no-deepClone \
+ https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
+
+# Build statically linked binary
+.PHONY: static
+static:
+ @nix build -f nix/
+ mkdir -p ./bin
+ cp -rfp ./result/bin/* ./bin/
+
+bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
+
+ifneq ($(shell as --version | grep x86_64),)
+internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s
+ $(AS) -o $(patsubst %.s,%.o,$^) $^
+ $(LD) -o $@ $(patsubst %.s,%.o,$^)
+ strip $@
+else
+.PHONY: internal/mkcw/embed/entrypoint
+endif
+
+internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint
+ $(RM) $@
+ gzip -k $^
+
+.PHONY: buildah
+buildah: bin/buildah
+
+ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list)))
+LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))
+DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS))
+WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS)))
+FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS))
+.PHONY: cross
+cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) $(FREEBSD_CROSS_TARGETS)
+
+bin/buildah.%:
+ mkdir -p ./bin
+ GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
+
+bin/imgtype: $(SOURCES) tests/imgtype/imgtype.go
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go
+
+bin/copy: $(SOURCES) tests/copy/copy.go
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/copy/copy.go
+
+bin/tutorial: $(SOURCES) tests/tutorial/tutorial.go
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/tutorial/tutorial.go
+
+.PHONY: clean
+clean:
+ $(RM) -r bin tests/testreport/testreport
+ $(MAKE) -C docs clean
+
+.PHONY: docs
+docs: install.tools ## build the docs on the host
+ $(MAKE) -C docs
+
+# For vendoring to work right, the checkout directory must be such that our top
+# level is at $GOPATH/src/github.com/containers/buildah.
+.PHONY: gopath
+gopath:
+ test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
+
+codespell:
+ codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w
+
+.PHONY: validate
+validate: install.tools
+ ./tests/validate/whitespace.sh
+ ./hack/xref-helpmsgs-manpages
+ ./tests/validate/pr-should-include-tests
+
+.PHONY: install.tools
+install.tools:
+ $(MAKE) -C tests/tools
+
+.PHONY: runc
+runc: gopath
+ rm -rf ../../opencontainers/runc
+ git clone https://github.com/opencontainers/runc ../../opencontainers/runc
+ cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(STORAGETAGS) $(SECURITYTAGS)"
+ ln -sf ../../opencontainers/runc/runc
+
+.PHONY: install.libseccomp.sudo
+install.libseccomp.sudo: gopath
+ rm -rf ../../seccomp/libseccomp
+ git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
+ cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && sudo make install
+
+.PHONY: install.cni.sudo
+install.cni.sudo: gopath
+ rm -rf ../../containernetworking/plugins
+ git clone https://github.com/containernetworking/plugins ../../containernetworking/plugins
+ cd ../../containernetworking/plugins && ./build_linux.sh && sudo install -D -v -m755 -t /opt/cni/bin/ bin/*
+
+.PHONY: install
+install:
+ install -d -m 755 $(DESTDIR)/$(BINDIR)
+ install -m 755 bin/buildah $(DESTDIR)/$(BINDIR)/buildah
+ $(MAKE) -C docs install
+
+.PHONY: uninstall
+uninstall:
+ rm -f $(DESTDIR)/$(BINDIR)/buildah
+ rm -f $(PREFIX)/share/man/man1/buildah*.1
+ rm -f $(DESTDIR)/$(BASHINSTALLDIR)/buildah
+
+.PHONY: install.completions
+install.completions:
+ install -m 755 -d $(DESTDIR)/$(BASHINSTALLDIR)
+ install -m 644 contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah
+
+.PHONY: install.runc
+install.runc:
+ install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
+
+.PHONY: test-conformance
+test-conformance:
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 60m ./tests/conformance
+
+.PHONY: test-integration
+test-integration: install.tools
+ ./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/.
+ cd tests; ./test_runner.sh
+
+tests/testreport/testreport: tests/testreport/testreport.go
+ $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
+
+.PHONY: test-unit
+test-unit: tests/testreport/testreport
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd | grep -v chroot | grep -v copier) -timeout 45m
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" $(RACEFLAGS) ./chroot ./copier -timeout 60m
+ tmp=$(shell mktemp -d) ; \
+ mkdir -p $$tmp/root $$tmp/runroot; \
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
+
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.20 make vendor
+
+.PHONY: vendor
+vendor:
+ GO111MODULE=on $(GO) mod tidy
+ GO111MODULE=on $(GO) mod vendor
+ GO111MODULE=on $(GO) mod verify
+
+.PHONY: lint
+lint: install.tools
+ ./tests/tools/build/golangci-lint run $(LINTFLAGS)
+
+# CAUTION: This is not a replacement for RPMs provided by your distro.
+# Only intended to build and test the latest unreleased changes.
+.PHONY: rpm
+rpm:
+ rpkg local
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..2eb9ea8
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,28 @@
+approvers:
+ - TomSweeneyRedHat
+ - ashley-cui
+ - cevich
+ - flouthoc
+ - giuseppe
+ - lsm5
+ - nalind
+ - rhatdan
+ - umohnani8
+ - vrothberg
+reviewers:
+ - QiWang19
+ - TomSweeneyRedHat
+ - ashley-cui
+ - baude
+ - cevich
+ - edsantiago
+ - giuseppe
+ - haircommander
+ - jwhonce
+ - lsm5
+ - mheon
+ - mrunalp
+ - nalind
+ - rhatdan
+ - umohnani8
+ - vrothberg
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..ded4870
--- /dev/null
+++ b/README.md
@@ -0,0 +1,132 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah)
+
+
+The Buildah package provides a command line tool that can be used to
+* create a working container, either from scratch or using an image as a starting point
+* create an image, either from a working container or via the instructions in a Dockerfile
+* images can be built in either the OCI image format or the traditional upstream docker image format
+* mount a working container's root filesystem for manipulation
+* unmount a working container's root filesystem
+* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
+* delete a working container or an image
+* rename a local container
+
+## Buildah Information for Developers
+
+For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
+
+**[Buildah Demos](demos)**
+
+**[Changelog](CHANGELOG.md)**
+
+**[Contributing](CONTRIBUTING.md)**
+
+**[Development Plan](developmentplan.md)**
+
+**[Installation notes](install.md)**
+
+**[Troubleshooting Guide](troubleshooting.md)**
+
+**[Tutorials](docs/tutorials)**
+
+## Buildah and Podman relationship
+
+Buildah and Podman are two complementary open-source projects that are
+available on most Linux platforms and both projects reside at
+[GitHub.com](https://github.com) with Buildah
+[here](https://github.com/containers/buildah) and Podman
+[here](https://github.com/containers/podman). Both, Buildah and Podman are
+command line tools that work on Open Container Initiative (OCI) images and
+containers. The two projects differentiate in their specialization.
+
+Buildah specializes in building OCI images. Buildah's commands replicate all
+of the commands that are found in a Dockerfile. This allows building images
+with and without Dockerfiles while not requiring any root privileges.
+Buildah’s ultimate goal is to provide a lower-level coreutils interface to
+build images. The flexibility of building images without Dockerfiles allows
+for the integration of other scripting languages into the build process.
+Buildah follows a simple fork-exec model and does not run as a daemon
+but it is based on a comprehensive API in golang, which can be vendored
+into other tools.
+
+Podman specializes in all of the commands and functions that help you to maintain and modify
+OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
+created from those images. For building container images via Dockerfiles, Podman uses Buildah's
+golang API and can be installed independently from Buildah.
+
+A major difference between Podman and Buildah is their concept of a container. Podman
+allows users to create "traditional containers" where the intent of these containers is
+to be long lived. While Buildah containers are really just created to allow content
+to be added back to the container image. An easy way to think of it is the
+`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
+command emulates the `docker run` command in functionality. Because of this and their underlying
+storage differences, you can not see Podman containers from within Buildah or vice versa.
+
+In short, Buildah is an efficient way to create OCI images while Podman allows
+you to manage and maintain those images and containers in a production environment using
+familiar container cli commands. For more details, see the
+[Container Tools Guide](https://github.com/containers/buildah/tree/main/docs/containertools).
+
+## Example
+
+From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
+
+```bash
+$ cat > lighttpd.sh <<"EOF"
+#!/usr/bin/env bash
+
+set -x
+
+ctr1=$(buildah from "${1:-fedora}")
+
+## Get all updates and install our minimal httpd server
+buildah run "$ctr1" -- dnf update -y
+buildah run "$ctr1" -- dnf install -y lighttpd
+
+## Include some buildtime annotations
+buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
+
+## Run our server and expose the port
+buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
+buildah config --port 80 "$ctr1"
+
+## Commit this container to an image name
+buildah commit "$ctr1" "${2:-$USER/lighttpd}"
+EOF
+
+$ chmod +x lighttpd.sh
+$ ./lighttpd.sh
+```
+
+## Commands
+| Command | Description |
+| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. |
+| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. |
+| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. |
+| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. |
+| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. |
+| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
+| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
+| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. |
+| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. |
+| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. |
+| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. |
+| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. |
+| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. |
+| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. |
+| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. |
+| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. |
+| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. |
+| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. |
+| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. |
+| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. |
+| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information |
+
+**Future goals include:**
+* more CI tests
+* additional CLI commands (?)
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..dfc531a
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the Buildah Project
+
+The Buildah Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
diff --git a/add.go b/add.go
new file mode 100644
index 0000000..c61de5a
--- /dev/null
+++ b/add.go
@@ -0,0 +1,727 @@
+package buildah
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/chrootuser"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/hashicorp/go-multierror"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/runc/libcontainer/userns"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+// AddAndCopyOptions holds options for add and copy commands.
+type AddAndCopyOptions struct {
+ //Chmod sets the access permissions of the destination content.
+ Chmod string
+ // Chown is a spec for the user who should be given ownership over the
+ // newly-added content, potentially overriding permissions which would
+ // otherwise be set to 0:0.
+ Chown string
+ // Checksum is a standard container digest string (e.g. <algorithm>:<digest>)
+ // and is the expected hash of the content being copied.
+ Checksum string
+ // PreserveOwnership, if Chown is not set, tells us to avoid setting
+ // ownership of copied items to 0:0, instead using whatever ownership
+ // information is already set. Not meaningful for remote sources or
+ // local archives that we extract.
+ PreserveOwnership bool
+ // All of the data being copied will pass through Hasher, if set.
+ // If the sources are URLs or files, their contents will be passed to
+ // Hasher.
+ // If the sources include directory trees, Hasher will be passed
+ // tar-format archives of the directory trees.
+ Hasher io.Writer
+ // Excludes is the contents of the .containerignore file.
+ Excludes []string
+ // IgnoreFile is the path to the .containerignore file.
+ IgnoreFile string
+ // ContextDir is the base directory for content being copied and
+ // Excludes patterns.
+ ContextDir string
+ // ID mapping options to use when contents to be copied are part of
+ // another container, and need ownerships to be mapped from the host to
+ // that container's values before copying them into the container.
+ IDMappingOptions *define.IDMappingOptions
+ // DryRun indicates that the content should be digested, but not actually
+ // copied into the container.
+ DryRun bool
+ // Clear the setuid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetuidBit bool
+ // Clear the setgid bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripSetgidBit bool
+ // Clear the sticky bit on items being copied. Has no effect on
+ // archives being extracted, where the bit is always preserved.
+ StripStickyBit bool
+}
+
+// sourceIsRemote returns true if "source" is a remote location.
+func sourceIsRemote(source string) bool {
+ return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
+}
+
+// getURL writes a tar archive containing the named content
+func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error {
+ url, err := url.Parse(src)
+ if err != nil {
+ return err
+ }
+ response, err := http.Get(src)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+
+ if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("invalid response status %d", response.StatusCode)
+ }
+
+ // Figure out what to name the new content.
+ name := renameTarget
+ if name == "" {
+ name = path.Base(url.Path)
+ }
+ // If there's a date on the content, use it. If not, use the Unix epoch
+ // for compatibility.
+ date := time.Unix(0, 0).UTC()
+ lastModified := response.Header.Get("Last-Modified")
+ if lastModified != "" {
+ d, err := time.Parse(time.RFC1123, lastModified)
+ if err != nil {
+ return fmt.Errorf("parsing last-modified time: %w", err)
+ }
+ date = d
+ }
+ // Figure out the size of the content.
+ size := response.ContentLength
+ var responseBody io.Reader = response.Body
+ if size < 0 {
+ // Create a temporary file and copy the content to it, so that
+ // we can figure out how much content there is.
+ f, err := os.CreateTemp(mountpoint, "download")
+ if err != nil {
+ return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ size, err = io.Copy(f, response.Body)
+ if err != nil {
+ return fmt.Errorf("writing %q to temporary file %q: %w", src, f.Name(), err)
+ }
+ _, err = f.Seek(0, io.SeekStart)
+ if err != nil {
+ return fmt.Errorf("setting up to read %q from temporary file %q: %w", src, f.Name(), err)
+ }
+ responseBody = f
+ }
+ var digester digest.Digester
+ if srcDigest != "" {
+ digester = srcDigest.Algorithm().Digester()
+ responseBody = io.TeeReader(responseBody, digester.Hash())
+ }
+ // Write the output archive. Set permissions for compatibility.
+ tw := tar.NewWriter(writer)
+ defer tw.Close()
+ uid := 0
+ gid := 0
+ if chown != nil {
+ uid = chown.UID
+ gid = chown.GID
+ }
+ var mode int64 = 0600
+ if chmod != nil {
+ mode = int64(*chmod)
+ }
+ hdr := tar.Header{
+ Typeflag: tar.TypeReg,
+ Name: name,
+ Size: size,
+ Uid: uid,
+ Gid: gid,
+ Mode: mode,
+ ModTime: date,
+ }
+ err = tw.WriteHeader(&hdr)
+ if err != nil {
+ return fmt.Errorf("writing header: %w", err)
+ }
+
+ if _, err := io.Copy(tw, responseBody); err != nil {
+ return fmt.Errorf("writing content from %q to tar stream: %w", src, err)
+ }
+
+ if digester != nil {
+ if responseDigest := digester.Digest(); responseDigest != srcDigest {
+ return fmt.Errorf("unexpected response digest for %q: %s, want %s", src, responseDigest, srcDigest)
+ }
+ }
+
+ return nil
+}
+
+// includeDirectoryAnyway returns true if "path" is a prefix for an exception
+// known to "pm". If "path" is a directory that "pm" claims matches its list
+// of patterns, but "pm"'s list of exclusions contains a pattern for which
+// "path" is a prefix, then IncludeDirectoryAnyway() will return true.
+// This is not always correct, because it relies on the directory part of any
+// exception paths to be specified without wildcards.
+func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool {
+ if !pm.Exclusions() {
+ return false
+ }
+ prefix := strings.TrimPrefix(path, string(os.PathSeparator)) + string(os.PathSeparator)
+ for _, pattern := range pm.Patterns() {
+ if !pattern.Exclusion() {
+ continue
+ }
+ spec := strings.TrimPrefix(pattern.String(), string(os.PathSeparator))
+ if strings.HasPrefix(spec, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Add copies the contents of the specified sources into the container's root
+// filesystem, optionally extracting contents of local files that look like
+// non-empty archives.
+func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err2 := b.Unmount(); err2 != nil {
+ logrus.Errorf("error unmounting container: %v", err2)
+ }
+ }()
+
+ contextDir := options.ContextDir
+ currentDir := options.ContextDir
+ if options.ContextDir == "" {
+ contextDir = string(os.PathSeparator)
+ currentDir, err = os.Getwd()
+ if err != nil {
+ return fmt.Errorf("determining current working directory: %w", err)
+ }
+ } else {
+ if !filepath.IsAbs(options.ContextDir) {
+ contextDir, err = filepath.Abs(options.ContextDir)
+ if err != nil {
+ return fmt.Errorf("converting context directory path %q to an absolute path: %w", options.ContextDir, err)
+ }
+ }
+ }
+
+ // Figure out what sorts of sources we have.
+ var localSources, remoteSources []string
+ for i, src := range sources {
+ if sourceIsRemote(src) {
+ remoteSources = append(remoteSources, src)
+ continue
+ }
+ if !filepath.IsAbs(src) && options.ContextDir == "" {
+ sources[i] = filepath.Join(currentDir, src)
+ }
+ localSources = append(localSources, sources[i])
+ }
+
+ // Check how many items our local source specs matched. Each spec
+ // should have matched at least one item, otherwise we consider it an
+ // error.
+ var localSourceStats []*copier.StatsForGlob
+ if len(localSources) > 0 {
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
+ }
+ localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
+ if err != nil {
+ return fmt.Errorf("checking on sources under %q: %w", contextDir, err)
+ }
+ }
+ numLocalSourceItems := 0
+ for _, localSourceStat := range localSourceStats {
+ if localSourceStat.Error != "" {
+ errorText := localSourceStat.Error
+ rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
+ if err != nil {
+ errorText = fmt.Sprintf("%v; %s", err, errorText)
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
+ }
+ return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText)
+ }
+ if len(localSourceStat.Globbed) == 0 {
+ return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT)
+ }
+ numLocalSourceItems += len(localSourceStat.Globbed)
+ }
+ if numLocalSourceItems+len(remoteSources) == 0 {
+ return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
+ }
+
+ // Find out which user (and group) the destination should belong to.
+ var chownDirs, chownFiles *idtools.IDPair
+ var userUID, userGID uint32
+ if options.Chown != "" {
+ userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
+ if err != nil {
+ return fmt.Errorf("looking up UID/GID for %q: %w", options.Chown, err)
+ }
+ }
+ var chmodDirsFiles *os.FileMode
+ if options.Chmod != "" {
+ p, err := strconv.ParseUint(options.Chmod, 8, 32)
+ if err != nil {
+ return fmt.Errorf("parsing chmod %q: %w", options.Chmod, err)
+ }
+ perm := os.FileMode(p)
+ chmodDirsFiles = &perm
+ }
+
+ chownDirs = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
+ chownFiles = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
+ if options.Chown == "" && options.PreserveOwnership {
+ chownDirs = nil
+ chownFiles = nil
+ }
+
+ // If we have a single source archive to extract, or more than one
+ // source item, or the destination has a path separator at the end of
+ // it, and it's not a remote URL, the destination needs to be a
+ // directory.
+ if destination == "" || !filepath.IsAbs(destination) {
+ tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
+ if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
+ destination = tmpDestination + string(os.PathSeparator)
+ } else {
+ destination = tmpDestination
+ }
+ }
+ destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator)) || destination == b.WorkDir()
+ destCanBeFile := false
+ if len(sources) == 1 {
+ if len(remoteSources) == 1 {
+ destCanBeFile = sourceIsRemote(sources[0])
+ }
+ if len(localSources) == 1 {
+ item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
+ if item.IsDir || (item.IsArchive && extract) {
+ destMustBeDirectory = true
+ }
+ if item.IsRegular {
+ destCanBeFile = true
+ }
+ }
+ }
+
+ // We care if the destination either doesn't exist, or exists and is a
+ // file. If the source can be a single file, for those cases we treat
+ // the destination as a file rather than as a directory tree.
+ renameTarget := ""
+ extractDirectory := filepath.Join(mountPoint, destination)
+ statOptions := copier.StatOptions{
+ CheckForArchives: extract,
+ }
+ destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
+ if err != nil {
+ return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
+ }
+ if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
+ // destination doesn't exist - extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
+ }
+
+ // if the destination is a directory that doesn't yet exist, let's copy it.
+ newDestDirFound := false
+ if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
+ newDestDirFound = true
+ }
+
+ if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
+ if destMustBeDirectory {
+ return fmt.Errorf("destination %v already exists but is not a directory", destination)
+ }
+ // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
+ renameTarget = filepath.Base(extractDirectory)
+ extractDirectory = filepath.Dir(extractDirectory)
+ }
+
+ pm, err := fileutils.NewPatternMatcher(options.Excludes)
+ if err != nil {
+ return fmt.Errorf("processing excludes list %v: %w", options.Excludes, err)
+ }
+
+ // Make sure that, if it's a symlink, we'll chroot to the target of the link;
+ // knowing that target requires that we resolve it within the chroot.
+ evalOptions := copier.EvalOptions{}
+ evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
+ if err != nil {
+ return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
+ }
+ extractDirectory = evaluated
+
+ // Set up ID maps.
+ var srcUIDMap, srcGIDMap []idtools.IDMap
+ if options.IDMappingOptions != nil {
+ srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
+ }
+ destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
+
+ // Create the target directory if it doesn't exist yet.
+ mkdirOptions := copier.MkdirOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownNew: chownDirs,
+ }
+ if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
+ return fmt.Errorf("ensuring target directory exists: %w", err)
+ }
+
+ // Copy each source in turn.
+ for _, src := range sources {
+ var multiErr *multierror.Error
+ var getErr, closeErr, renameErr, putErr error
+ var wg sync.WaitGroup
+ if sourceIsRemote(src) {
+ pipeReader, pipeWriter := io.Pipe()
+ var srcDigest digest.Digest
+ if options.Checksum != "" {
+ srcDigest, err = digest.Parse(options.Checksum)
+ if err != nil {
+ return fmt.Errorf("invalid checksum flag: %w", err)
+ }
+ }
+ wg.Add(1)
+ go func() {
+ getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ b.ContentDigester.Start("")
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ ChownDirs: nil,
+ ChmodDirs: nil,
+ ChownFiles: nil,
+ ChmodFiles: nil,
+ IgnoreDevices: userns.RunningInUserNS(),
+ }
+ putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ }
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = fmt.Errorf("reading %q: %w", src, getErr)
+ }
+ if putErr != nil {
+ putErr = fmt.Errorf("storing %q: %w", src, putErr)
+ }
+ multiErr = multierror.Append(getErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
+ }
+ continue
+ }
+
+ if options.Checksum != "" {
+ return fmt.Errorf("checksum flag is not supported for local sources")
+ }
+
+ // Dig out the result of running glob+stat on this source spec.
+ var localSourceStat *copier.StatsForGlob
+ for _, st := range localSourceStats {
+ if st.Glob == src {
+ localSourceStat = st
+ break
+ }
+ }
+ if localSourceStat == nil {
+ continue
+ }
+
+ // Iterate through every item that matched the glob.
+ itemsCopied := 0
+ for _, glob := range localSourceStat.Globbed {
+ rel := glob
+ if filepath.IsAbs(glob) {
+ if rel, err = filepath.Rel(contextDir, glob); err != nil {
+ return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
+ }
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
+ }
+ // Check for dockerignore-style exclusion of this item.
+ if rel != "." {
+ excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
+ }
+ if excluded {
+ // non-directories that are excluded are excluded, no question, but
+ // directories can only be skipped if we don't have to allow for the
+ // possibility of finding things to include under them
+ globInfo := localSourceStat.Results[glob]
+ if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
+ continue
+ }
+ } else {
+ // if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it.
+ if newDestDirFound {
+ itemsCopied++
+ }
+ }
+ } else {
+ // Make sure we don't trigger a "copied nothing" error for an empty context
+ // directory if we were told to copy the context directory itself. We won't
+ // actually copy it, but we need to make sure that we don't produce an error
+ // due to potentially not having anything in the tarstream that we passed.
+ itemsCopied++
+ }
+ st := localSourceStat.Results[glob]
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ renamedItems := 0
+ writer := io.WriteCloser(pipeWriter)
+ if renameTarget != "" {
+ writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ hdr.Name = renameTarget
+ renamedItems++
+ return false, false, nil
+ })
+ }
+ writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ itemsCopied++
+ return false, false, nil
+ })
+ getOptions := copier.GetOptions{
+ UIDMap: srcUIDMap,
+ GIDMap: srcGIDMap,
+ Excludes: options.Excludes,
+ ExpandArchives: extract,
+ ChownDirs: chownDirs,
+ ChmodDirs: chmodDirsFiles,
+ ChownFiles: chownFiles,
+ ChmodFiles: chmodDirsFiles,
+ StripSetuidBit: options.StripSetuidBit,
+ StripSetgidBit: options.StripSetgidBit,
+ StripStickyBit: options.StripStickyBit,
+ }
+ getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
+ closeErr = writer.Close()
+ if renameTarget != "" && renamedItems > 1 {
+ renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
+ }
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ if st.IsDir {
+ b.ContentDigester.Start("dir")
+ } else {
+ b.ContentDigester.Start("file")
+ }
+ hashCloser := b.ContentDigester.Hash()
+ hasher := io.Writer(hashCloser)
+ if options.Hasher != nil {
+ hasher = io.MultiWriter(hasher, options.Hasher)
+ }
+ if options.DryRun {
+ _, putErr = io.Copy(hasher, pipeReader)
+ } else {
+ putOptions := copier.PutOptions{
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ DefaultDirOwner: chownDirs,
+ DefaultDirMode: nil,
+ ChownDirs: nil,
+ ChmodDirs: nil,
+ ChownFiles: nil,
+ ChmodFiles: nil,
+ IgnoreDevices: userns.RunningInUserNS(),
+ }
+ putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
+ }
+ hashCloser.Close()
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ if getErr != nil {
+ getErr = fmt.Errorf("reading %q: %w", src, getErr)
+ }
+ if closeErr != nil {
+ closeErr = fmt.Errorf("closing %q: %w", src, closeErr)
+ }
+ if renameErr != nil {
+ renameErr = fmt.Errorf("renaming %q: %w", src, renameErr)
+ }
+ if putErr != nil {
+ putErr = fmt.Errorf("storing %q: %w", src, putErr)
+ }
+ multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
+ if multiErr != nil && multiErr.ErrorOrNil() != nil {
+ if len(multiErr.Errors) > 1 {
+ return multiErr.ErrorOrNil()
+ }
+ return multiErr.Errors[0]
+ }
+ }
+ if itemsCopied == 0 {
+ excludesFile := ""
+ if options.IgnoreFile != "" {
+ excludesFile = " using " + options.IgnoreFile
+ }
+ return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
+ }
+ }
+ return nil
+}
+
+// userForRun returns the user (and group) information which we should use for
+// running commands
+func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, string, error) {
+ if userspec == "" {
+ userspec = b.User()
+ }
+
+ uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+
+ }
+ return u, homeDir, err
+}
+
+// userForCopy returns the user (and group) information which we should use for
+// setting ownership of contents being copied. It's just like what
+// userForRun() does, except for the case where we're passed a single numeric
+// value, where we need to use that value for both the UID and the GID.
+func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint32, error) {
+ var (
+ user, group string
+ uid, gid uint64
+ err error
+ )
+
+ split := strings.SplitN(userspec, ":", 2)
+ user = split[0]
+ if len(split) > 1 {
+ group = split[1]
+ }
+
+ // If userspec did not specify any values for user or group, then fail
+ if user == "" && group == "" {
+ return 0, 0, fmt.Errorf("can't find uid for user %s", userspec)
+ }
+
+ // If userspec specifies values for user or group, check for numeric values
+ // and return early. If not, then translate username/groupname
+ if user != "" {
+ uid, err = strconv.ParseUint(user, 10, 32)
+ }
+ if err == nil {
+ // default gid to uid
+ gid = uid
+ if group != "" {
+ gid, err = strconv.ParseUint(group, 10, 32)
+ }
+ }
+ // If err != nil, then user or group not numeric, check filesystem
+ if err == nil {
+ return uint32(uid), uint32(gid), nil
+ }
+
+ owner, _, err := b.userForRun(mountPoint, userspec)
+ if err != nil {
+ return 0xffffffff, 0xffffffff, err
+ }
+ return owner.UID, owner.GID, nil
+}
+
+// EnsureContainerPathAs creates the specified directory owned by USER
+// with the file mode set to MODE.
+func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err2 := b.Unmount(); err2 != nil {
+ logrus.Errorf("error unmounting container: %v", err2)
+ }
+ }()
+
+ uid, gid := uint32(0), uint32(0)
+ if user != "" {
+ if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil {
+ uid = uidForCopy
+ gid = gidForCopy
+ }
+ }
+
+ destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
+
+ idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
+ opts := copier.MkdirOptions{
+ ChmodNew: mode,
+ ChownNew: idPair,
+ UIDMap: destUIDMap,
+ GIDMap: destGIDMap,
+ }
+ return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts)
+
+}
diff --git a/bind/mount.go b/bind/mount.go
new file mode 100644
index 0000000..213b1f6
--- /dev/null
+++ b/bind/mount.go
@@ -0,0 +1,304 @@
+//go:build linux
+// +build linux
+
+package bind
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/containers/buildah/util"
+ cutil "github.com/containers/common/pkg/util"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// SetupIntermediateMountNamespace creates a new mount namespace and bind
+// mounts all bind-mount sources into a subdirectory of bundlePath that can
+// only be reached by the root user of the container's user namespace, except
+// for Mounts which include the NoBindOption option in their options list. The
+// NoBindOption will then merely be removed.
+func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) {
+ defer stripNoBindOption(spec)
+
+ // We expect a root directory to be defined.
+ if spec.Root == nil {
+ return nil, errors.New("configuration has no root filesystem?")
+ }
+ rootPath := spec.Root.Path
+
+ // Create a new mount namespace in which to do the things we're doing.
+ if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
+ return nil, fmt.Errorf("creating new mount namespace for %v: %w", spec.Process.Args, err)
+ }
+
+ // Make all of our mounts private to our namespace.
+ if err := mount.MakeRPrivate("/"); err != nil {
+ return nil, fmt.Errorf("making mounts private to mount namespace for %v: %w", spec.Process.Args, err)
+ }
+
+ // Make sure the bundle directory is searchable. We created it with
+ // TempDir(), so it should have started with permissions set to 0700.
+ info, err := os.Stat(bundlePath)
+ if err != nil {
+ return nil, fmt.Errorf("checking permissions on %q: %w", bundlePath, err)
+ }
+ if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
+ return nil, fmt.Errorf("loosening permissions on %q: %w", bundlePath, err)
+ }
+
+ // Figure out who needs to be able to reach these bind mounts in order
+ // for the container to be started.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Hand back a callback that the caller can use to clean up everything
+ // we're doing here.
+ unmount := []string{}
+ unmountAll = func() (err error) {
+ for _, mountpoint := range unmount {
+ // Unmount it and anything under it.
+ if err2 := UnmountMountpoints(mountpoint, nil); err2 != nil {
+ logrus.Warnf("pkg/bind: error unmounting %q: %v", mountpoint, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ if err2 := unix.Unmount(mountpoint, unix.MNT_DETACH); err2 != nil {
+ if errno, ok := err2.(syscall.Errno); !ok || errno != syscall.EINVAL {
+ logrus.Warnf("pkg/bind: error detaching %q: %v", mountpoint, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ // Remove just the mountpoint.
+ retry := 10
+ remove := unix.Unlink
+ err2 := remove(mountpoint)
+ for err2 != nil && retry > 0 {
+ if errno, ok := err2.(syscall.Errno); ok {
+ switch errno {
+ default:
+ retry = 0
+ continue
+ case syscall.EISDIR:
+ remove = unix.Rmdir
+ err2 = remove(mountpoint)
+ case syscall.EBUSY:
+ if err3 := unix.Unmount(mountpoint, unix.MNT_DETACH); err3 == nil {
+ err2 = remove(mountpoint)
+ }
+ }
+ retry--
+ }
+ }
+ if err2 != nil {
+ logrus.Warnf("pkg/bind: error removing %q: %v", mountpoint, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ return err
+ }
+
+ // Create a top-level directory that the "root" user will be able to
+ // access, that "root" from containers which use different mappings, or
+ // other unprivileged users outside of containers, shouldn't be able to
+ // access.
+ mnt := filepath.Join(bundlePath, "mnt")
+ if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
+ return unmountAll, fmt.Errorf("creating %q owned by the container's root user: %w", mnt, err)
+ }
+
+ // Make that directory private, and add it to the list of locations we
+ // unmount at cleanup time.
+ if err = mount.MakeRPrivate(mnt); err != nil {
+ return unmountAll, fmt.Errorf("marking filesystem at %q as private: %w", mnt, err)
+ }
+ unmount = append([]string{mnt}, unmount...)
+
+ // Create a bind mount for the root filesystem and add it to the list.
+ rootfs := filepath.Join(mnt, "rootfs")
+ if err = os.Mkdir(rootfs, 0000); err != nil {
+ return unmountAll, fmt.Errorf("creating directory %q: %w", rootfs, err)
+ }
+ if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
+ return unmountAll, fmt.Errorf("bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err)
+ }
+ logrus.Debugf("bind mounted %q to %q", rootPath, rootfs)
+ unmount = append([]string{rootfs}, unmount...)
+ spec.Root.Path = rootfs
+
+ // Do the same for everything we're binding in.
+ mounts := make([]specs.Mount, 0, len(spec.Mounts))
+ for i := range spec.Mounts {
+ // If we're not using an intermediate, leave it in the list.
+ if leaveBindMountAlone(spec.Mounts[i]) {
+ mounts = append(mounts, spec.Mounts[i])
+ continue
+ }
+ // Check if the source is a directory or something else.
+ info, err := os.Stat(spec.Mounts[i].Source)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source)
+ continue
+ }
+ return unmountAll, fmt.Errorf("checking if %q is a directory: %w", spec.Mounts[i].Source, err)
+ }
+ stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i))
+ if info.IsDir() {
+ // If the source is a directory, make one to use as the
+ // mount target.
+ if err = os.Mkdir(stage, 0000); err != nil {
+ return unmountAll, fmt.Errorf("creating directory %q: %w", stage, err)
+ }
+ } else {
+ // If the source is not a directory, create an empty
+ // file to use as the mount target.
+ file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
+ if err != nil {
+ return unmountAll, fmt.Errorf("creating file %q: %w", stage, err)
+ }
+ file.Close()
+ }
+ // Bind mount the source from wherever it is to a place where
+ // we know the runtime helper will be able to get to it...
+ if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
+ return unmountAll, fmt.Errorf("bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err)
+ }
+ logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage)
+ spec.Mounts[i].Source = stage
+ // ... and update the source location that we'll pass to the
+ // runtime to our intermediate location.
+ mounts = append(mounts, spec.Mounts[i])
+ unmount = append([]string{stage}, unmount...)
+ }
+ spec.Mounts = mounts
+
+ return unmountAll, nil
+}
+
+// Decide if the mount should not be redirected to an intermediate location first.
+func leaveBindMountAlone(mount specs.Mount) bool {
+ // If we know we shouldn't do a redirection for this mount, skip it.
+ if cutil.StringInSlice(NoBindOption, mount.Options) {
+ return true
+ }
+ // If we're not bind mounting it in, we don't need to do anything for it.
+ if mount.Type != "bind" && !cutil.StringInSlice("bind", mount.Options) && !cutil.StringInSlice("rbind", mount.Options) {
+ return true
+ }
+ return false
+}
+
+// UnmountMountpoints unmounts the given mountpoints and anything that's hanging
+// off of them, rather aggressively. If a mountpoint also appears in the
+// mountpointsToRemove slice, the mountpoints are removed after they are
+// unmounted.
+func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return fmt.Errorf("retrieving list of mounts: %w", err)
+ }
+ // getChildren returns the list of mount IDs that hang off of the
+ // specified ID.
+ getChildren := func(id int) []int {
+ var list []int
+ for _, info := range mounts {
+ if info.Parent == id {
+ list = append(list, info.ID)
+ }
+ }
+ return list
+ }
+ // getTree returns the list of mount IDs that hang off of the specified
+ // ID, and off of those mount IDs, etc.
+ getTree := func(id int) []int {
+ mounts := []int{id}
+ i := 0
+ for i < len(mounts) {
+ children := getChildren(mounts[i])
+ mounts = append(mounts, children...)
+ i++
+ }
+ return mounts
+ }
+ // getMountByID looks up the mount info with the specified ID
+ getMountByID := func(id int) *mount.Info {
+ for i := range mounts {
+ if mounts[i].ID == id {
+ return mounts[i]
+ }
+ }
+ return nil
+ }
+ // getMountByPoint looks up the mount info with the specified mountpoint
+ getMountByPoint := func(mountpoint string) *mount.Info {
+ for i := range mounts {
+ if mounts[i].Mountpoint == mountpoint {
+ return mounts[i]
+ }
+ }
+ return nil
+ }
+ // find the top of the tree we're unmounting
+ top := getMountByPoint(mountpoint)
+ if top == nil {
+ if err != nil {
+ return fmt.Errorf("%q is not mounted: %w", mountpoint, err)
+ }
+ return nil
+ }
+ // add all of the mounts that are hanging off of it
+ tree := getTree(top.ID)
+ // unmount each mountpoint, working from the end of the list (leaf nodes) to the top
+ for i := range tree {
+ var st unix.Stat_t
+ id := tree[len(tree)-i-1]
+ mount := getMountByID(id)
+ // check if this mountpoint is mounted
+ if err := unix.Lstat(mount.Mountpoint, &st); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint)
+ continue
+ }
+ return fmt.Errorf("checking if %q is mounted: %w", mount.Mountpoint, err)
+ }
+ if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations)
+ logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
+ continue
+ }
+ // do the unmount
+ if err := unix.Unmount(mount.Mountpoint, 0); err != nil {
+ // if it was busy, detach it
+ if errno, ok := err.(syscall.Errno); ok && errno == syscall.EBUSY {
+ err = unix.Unmount(mount.Mountpoint, unix.MNT_DETACH)
+ }
+ if err != nil {
+ // if it was invalid (not mounted), hide the error, else return it
+ if errno, ok := err.(syscall.Errno); !ok || errno != syscall.EINVAL {
+ logrus.Warnf("error unmounting %q: %v", mount.Mountpoint, err)
+ continue
+ }
+ }
+ }
+ // if we're also supposed to remove this thing, do that, too
+ if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
+ if err := os.Remove(mount.Mountpoint); err != nil {
+ return fmt.Errorf("removing %q: %w", mount.Mountpoint, err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/bind/mount_unsupported.go b/bind/mount_unsupported.go
new file mode 100644
index 0000000..88ca2ca
--- /dev/null
+++ b/bind/mount_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package bind
+
+import (
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// SetupIntermediateMountNamespace returns a no-op unmountAll() and no error.
+func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) {
+ stripNoBindOption(spec)
+ return func() error { return nil }, nil
+}
diff --git a/bind/util.go b/bind/util.go
new file mode 100644
index 0000000..3f77f3e
--- /dev/null
+++ b/bind/util.go
@@ -0,0 +1,27 @@
+package bind
+
+import (
+ "github.com/containers/common/pkg/util"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+ // NoBindOption is an option which, if present in a Mount structure's
+ // options list, will cause SetupIntermediateMountNamespace to not
+ // redirect it through a bind mount.
+ NoBindOption = "nobuildahbind"
+)
+
+func stripNoBindOption(spec *specs.Spec) {
+ for i := range spec.Mounts {
+ if util.StringInSlice(NoBindOption, spec.Mounts[i].Options) {
+ prunedOptions := make([]string, 0, len(spec.Mounts[i].Options))
+ for _, option := range spec.Mounts[i].Options {
+ if option != NoBindOption {
+ prunedOptions = append(prunedOptions, option)
+ }
+ }
+ spec.Mounts[i].Options = prunedOptions
+ }
+ }
+}
diff --git a/btrfs_installed_tag.sh b/btrfs_installed_tag.sh
new file mode 100755
index 0000000..f2f2b33
--- /dev/null
+++ b/btrfs_installed_tag.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
+#include <btrfs/ioctl.h>
+EOF
+if test $? -ne 0 ; then
+ echo exclude_graphdriver_btrfs
+fi
diff --git a/btrfs_tag.sh b/btrfs_tag.sh
new file mode 100755
index 0000000..ea753d4
--- /dev/null
+++ b/btrfs_tag.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
+#include <btrfs/version.h>
+EOF
+if test $? -ne 0 ; then
+ echo btrfs_noversion
+fi
diff --git a/buildah.go b/buildah.go
new file mode 100644
index 0000000..e4ed5dc
--- /dev/null
+++ b/buildah.go
@@ -0,0 +1,551 @@
+package buildah
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/ioutils"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // Package is the name of this package, used in help output and to
+ // identify working containers.
+ Package = define.Package
+ // Version for the Package.
+ Version = define.Version
+ // The value we use to identify what type of information, currently a
+ // serialized Builder structure, we are using as per-container state.
+ // This should only be changed when we make incompatible changes to
+ // that data structure, as it's used to distinguish containers which
+ // are "ours" from ones that aren't.
+ containerType = Package + " 0.0.1"
+ // The file in the per-container directory which we use to store our
+ // per-container state. If it isn't there, then the container isn't
+ // one of our build containers.
+ stateFile = Package + ".json"
+)
+
+// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+type PullPolicy = define.PullPolicy
+
+const (
+ // PullIfMissing is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should be pulled from a
+ // registry if a local copy of it is not already present.
+ PullIfMissing = define.PullIfMissing
+ // PullAlways is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that a fresh, possibly updated, copy of the image
+ // should be pulled from a registry before the build proceeds.
+ PullAlways = define.PullAlways
+ // PullIfNewer is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should only be pulled
+ // from a registry if a local copy is not already present or if a
+ // newer version the image is present on the repository.
+ PullIfNewer = define.PullIfNewer
+ // PullNever is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that the source image should not be pulled from a
+ // registry if a local copy of it is not already present.
+ PullNever = define.PullNever
+)
+
+// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled,
+// or NetworkEnabled.
+type NetworkConfigurationPolicy = define.NetworkConfigurationPolicy
+
+const (
+ // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that the default behavior should be used.
+ NetworkDefault = define.NetworkDefault
+ // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that network interfaces should NOT be configured for
+ // newly-created network namespaces.
+ NetworkDisabled = define.NetworkDisabled
+ // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that network interfaces should be configured for
+ // newly-created network namespaces.
+ NetworkEnabled = define.NetworkEnabled
+)
+
+// Builder objects are used to represent containers which are being used to
+// build images. They also carry potential updates which will be applied to
+// the image's configuration when the container's contents are used to build an
+// image.
+type Builder struct {
+ store storage.Store
+
+ // Logger is the logrus logger to write log messages with
+ Logger *logrus.Logger `json:"-"`
+
+ // Args define variables that users can pass at build-time to the builder
+ Args map[string]string
+ // Type is used to help identify a build container's metadata. It
+ // should not be modified.
+ Type string `json:"type"`
+ // FromImage is the name of the source image which was used to create
+ // the container, if one was used. It should not be modified.
+ FromImage string `json:"image,omitempty"`
+ // FromImageID is the ID of the source image which was used to create
+ // the container, if one was used. It should not be modified.
+ FromImageID string `json:"image-id"`
+ // FromImageDigest is the digest of the source image which was used to
+ // create the container, if one was used. It should not be modified.
+ FromImageDigest string `json:"image-digest"`
+ // Config is the source image's configuration. It should not be
+ // modified.
+ Config []byte `json:"config,omitempty"`
+ // Manifest is the source image's manifest. It should not be modified.
+ Manifest []byte `json:"manifest,omitempty"`
+
+ // Container is the name of the build container. It should not be modified.
+ Container string `json:"container-name,omitempty"`
+ // ContainerID is the ID of the build container. It should not be modified.
+ ContainerID string `json:"container-id,omitempty"`
+ // MountPoint is the last location where the container's root
+ // filesystem was mounted. It should not be modified.
+ MountPoint string `json:"mountpoint,omitempty"`
+ // ProcessLabel is the SELinux process label associated with the container
+ ProcessLabel string `json:"process-label,omitempty"`
+ // MountLabel is the SELinux mount label associated with the container
+ MountLabel string `json:"mount-label,omitempty"`
+
+ // ImageAnnotations is a set of key-value pairs which is stored in the
+ // image's manifest.
+ ImageAnnotations map[string]string `json:"annotations,omitempty"`
+ // ImageCreatedBy is a description of how this container was built.
+ ImageCreatedBy string `json:"created-by,omitempty"`
+ // ImageHistoryComment is a description of how our added layers were built.
+ ImageHistoryComment string `json:"history-comment,omitempty"`
+
+ // Image metadata and runtime settings, in multiple formats.
+ OCIv1 v1.Image `json:"ociv1,omitempty"`
+ Docker docker.V2Image `json:"docker,omitempty"`
+ // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format.
+ DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
+
+ // Isolation controls how we handle "RUN" statements and the Run() method.
+ Isolation define.Isolation
+ // NamespaceOptions controls how we set up the namespaces for processes that we run in the container.
+ NamespaceOptions define.NamespaceOptions
+ // ConfigureNetwork controls whether or not network interfaces and
+ // routing are configured for a new network namespace (i.e., when not
+ // joining another's namespace and not just using the host's
+ // namespace), effectively deciding whether or not the process has a
+ // usable network.
+ ConfigureNetwork define.NetworkConfigurationPolicy
+ // CNIPluginPath is the location of CNI plugin helpers, if they should be
+ // run from a location other than the default location.
+ CNIPluginPath string
+ // CNIConfigDir is the location of CNI configuration files, if the files in
+ // the default configuration directory shouldn't be used.
+ CNIConfigDir string
+
+ // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
+ NetworkInterface nettypes.ContainerNetwork `json:"-"`
+
+ // GroupAdd is a list of groups to add to the primary process within
+ // the container. 'keep-groups' allows container processes to use
+ // supplementary groups.
+ GroupAdd []string
+ // ID mapping options to use when running processes in the container with non-host user namespaces.
+ IDMappingOptions define.IDMappingOptions
+ // Capabilities is a list of capabilities to use when running commands in the container.
+ Capabilities []string
+ // PrependedEmptyLayers are history entries that we'll add to a
+ // committed image, after any history items that we inherit from a base
+ // image, but before the history item for the layer that we're
+ // committing.
+ PrependedEmptyLayers []v1.History
+ // AppendedEmptyLayers are history entries that we'll add to a
+ // committed image after the history item for the layer that we're
+ // committing.
+ AppendedEmptyLayers []v1.History
+ CommonBuildOpts *define.CommonBuildOptions
+ // TopLayer is the top layer of the image
+ TopLayer string
+ // Format for the build Image
+ Format string
+ // TempVolumes are temporary mount points created during container runs
+ TempVolumes map[string]bool
+ // ContentDigester counts the digest of all Add()ed content
+ ContentDigester CompositeDigester
+ // Devices are the additional devices to add to the containers
+ Devices define.ContainerDevices
+}
+
+// BuilderInfo are used as objects to display container information
+type BuilderInfo struct {
+ Type string
+ FromImage string
+ FromImageID string
+ FromImageDigest string
+ GroupAdd []string
+ Config string
+ Manifest string
+ Container string
+ ContainerID string
+ MountPoint string
+ ProcessLabel string
+ MountLabel string
+ ImageAnnotations map[string]string
+ ImageCreatedBy string
+ OCIv1 v1.Image
+ Docker docker.V2Image
+ DefaultMountsFilePath string
+ Isolation string
+ NamespaceOptions define.NamespaceOptions
+ Capabilities []string
+ ConfigureNetwork string
+ CNIPluginPath string
+ CNIConfigDir string
+ IDMappingOptions define.IDMappingOptions
+ History []v1.History
+ Devices define.ContainerDevices
+}
+
+// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
+// This is used in the inspect command to display Manifest and Config as string and not []byte.
+func GetBuildInfo(b *Builder) BuilderInfo {
+ history := copyHistory(b.OCIv1.History)
+ history = append(history, copyHistory(b.PrependedEmptyLayers)...)
+ history = append(history, copyHistory(b.AppendedEmptyLayers)...)
+ sort.Strings(b.Capabilities)
+ return BuilderInfo{
+ Type: b.Type,
+ FromImage: b.FromImage,
+ FromImageID: b.FromImageID,
+ FromImageDigest: b.FromImageDigest,
+ Config: string(b.Config),
+ Manifest: string(b.Manifest),
+ Container: b.Container,
+ ContainerID: b.ContainerID,
+ GroupAdd: b.GroupAdd,
+ MountPoint: b.MountPoint,
+ ProcessLabel: b.ProcessLabel,
+ MountLabel: b.MountLabel,
+ ImageAnnotations: b.ImageAnnotations,
+ ImageCreatedBy: b.ImageCreatedBy,
+ OCIv1: b.OCIv1,
+ Docker: b.Docker,
+ DefaultMountsFilePath: b.DefaultMountsFilePath,
+ Isolation: b.Isolation.String(),
+ NamespaceOptions: b.NamespaceOptions,
+ ConfigureNetwork: fmt.Sprintf("%v", b.ConfigureNetwork),
+ CNIPluginPath: b.CNIPluginPath,
+ CNIConfigDir: b.CNIConfigDir,
+ IDMappingOptions: b.IDMappingOptions,
+ Capabilities: b.Capabilities,
+ History: history,
+ Devices: b.Devices,
+ }
+}
+
+// CommonBuildOptions are resources that can be defined by flags for both buildah from and build
+type CommonBuildOptions = define.CommonBuildOptions
+
+// BuilderOptions are used to initialize a new Builder.
+type BuilderOptions struct {
+ // Args define variables that users can pass at build-time to the builder
+ Args map[string]string
+ // FromImage is the name of the image which should be used as the
+ // starting point for the container. It can be set to an empty value
+ // or "scratch" to indicate that the container should not be based on
+ // an image.
+ FromImage string
+ // ContainerSuffix is the suffix to add for generated container names
+ ContainerSuffix string
+ // Container is a desired name for the build container.
+ Container string
+ // PullPolicy decides whether or not we should pull the image that
+ // we're using as a base image. It should be PullIfMissing,
+ // PullAlways, or PullNever.
+ PullPolicy define.PullPolicy
+ // Registry is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone can not be resolved to a
+ // reference to a source image. No separator is implicitly added.
+ Registry string
+ // BlobDirectory is the name of a directory in which we'll attempt
+ // to store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ BlobDirectory string
+ GroupAdd []string
+ // Logger is the logrus logger to write log messages with
+ Logger *logrus.Logger `json:"-"`
+ // Mount signals to NewBuilder() that the container should be mounted
+ // immediately.
+ Mount bool
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to log the reading
+ // of the source image from a registry, if we end up pulling the image.
+ ReportWriter io.Writer
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // DefaultMountsFilePath is the file path holding the mounts to be
+ // mounted in "host-path:container-path" format
+ DefaultMountsFilePath string
+ // Isolation controls how we handle "RUN" statements and the Run()
+ // method.
+ Isolation define.Isolation
+ // NamespaceOptions controls how we set up namespaces for processes that
+ // we might need to run using the container's root filesystem.
+ NamespaceOptions define.NamespaceOptions
+ // ConfigureNetwork controls whether or not network interfaces and
+ // routing are configured for a new network namespace (i.e., when not
+ // joining another's namespace and not just using the host's
+ // namespace), effectively deciding whether or not the process has a
+ // usable network.
+ ConfigureNetwork define.NetworkConfigurationPolicy
+ // CNIPluginPath is the location of CNI plugin helpers, if they should be
+ // run from a location other than the default location.
+ CNIPluginPath string
+ // CNIConfigDir is the location of CNI configuration files, if the files in
+ // the default configuration directory shouldn't be used.
+ CNIConfigDir string
+
+ // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
+ NetworkInterface nettypes.ContainerNetwork `json:"-"`
+
+ // ID mapping options to use if we're setting up our own user namespace.
+ IDMappingOptions *define.IDMappingOptions
+ // Capabilities is a list of capabilities to use when
+ // running commands in the container.
+ Capabilities []string
+ CommonBuildOpts *define.CommonBuildOptions
+ // Format for the container image
+ Format string
+ // Devices are the additional devices to add to the containers
+ Devices define.ContainerDevices
+ // DefaultEnv is deprecated and ignored.
+ DefaultEnv []string
+ // MaxPullRetries is the maximum number of attempts we'll make to pull
+ // any one image from the external registry if the first attempt fails.
+ MaxPullRetries int
+ // PullRetryDelay is how long to wait before retrying a pull attempt.
+ PullRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
+ // ProcessLabel is the SELinux process label associated with the container
+ ProcessLabel string
+ // MountLabel is the SELinux mount label associated with the container
+ MountLabel string
+ // PreserveBaseImageAnns indicates that we should preserve base
+ // image information (Annotations) that are present in our base image,
+ // rather than overwriting them with information about the base image
+ // itself. Useful as an internal implementation detail of multistage
+ // builds, and does not need to be set by most callers.
+ PreserveBaseImageAnns bool
+}
+
+// ImportOptions are used to initialize a Builder from an existing container
+// which was created elsewhere.
+type ImportOptions struct {
+ // Container is the name of the build container.
+ Container string
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+}
+
+// ImportFromImageOptions are used to initialize a Builder from an image.
+type ImportFromImageOptions struct {
+ // Image is the name or ID of the image we'd like to examine.
+ Image string
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // github.com/containers/image/types SystemContext to hold information
+ // about which registries we should check for completing image names
+ // that don't include a domain portion.
+ SystemContext *types.SystemContext
+}
+
+// ConfidentialWorkloadOptions encapsulates options which control whether or not
+// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image
+// instead of the usual rootfs contents.
+type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions
+
+// NewBuilder creates a new build container.
+func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
+ if options.CommonBuildOpts == nil {
+ options.CommonBuildOpts = &CommonBuildOptions{}
+ }
+ return newBuilder(ctx, store, options)
+}
+
+// ImportBuilder creates a new build configuration using an already-present
+// container.
+func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
+ return importBuilder(ctx, store, options)
+}
+
+// ImportBuilderFromImage creates a new builder configuration using an image.
+// The returned object can be modified and examined, but it can not be saved
+// or committed because it is not associated with a working container.
+func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
+ return importBuilderFromImage(ctx, store, options)
+}
+
+// OpenBuilder loads information about a build container given its name or ID.
+func OpenBuilder(store storage.Store, container string) (*Builder, error) {
+ cdir, err := store.ContainerDirectory(container)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil {
+ return nil, err
+ }
+ b := &Builder{}
+ if err = json.Unmarshal(buildstate, &b); err != nil {
+ return nil, fmt.Errorf("parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err)
+ }
+ if b.Type != containerType {
+ return nil, fmt.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
+ }
+
+ netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath)
+ if err != nil {
+ return nil, err
+ }
+ b.NetworkInterface = netInt
+ b.store = store
+ b.fixupConfig(nil)
+ b.setupLogger()
+ if b.CommonBuildOpts == nil {
+ b.CommonBuildOpts = &CommonBuildOptions{}
+ }
+ return b, nil
+}
+
+// OpenBuilderByPath loads information about a build container given a
+// path to the container's root filesystem
+func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, err
+ }
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ builderMatchesPath := func(b *Builder, path string) bool {
+ return (b.MountPoint == path)
+ }
+ for _, container := range containers {
+ cdir, err := store.ContainerDirectory(container.ID)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
+ continue
+ }
+ return nil, err
+ }
+ b := &Builder{}
+ err = json.Unmarshal(buildstate, &b)
+ if err == nil && b.Type == containerType && builderMatchesPath(b, abs) {
+ b.store = store
+ b.fixupConfig(nil)
+ b.setupLogger()
+ if b.CommonBuildOpts == nil {
+ b.CommonBuildOpts = &CommonBuildOptions{}
+ }
+ return b, nil
+ }
+ if err != nil {
+ logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err)
+ } else if b.Type != containerType {
+ logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type)
+ }
+ }
+ return nil, storage.ErrContainerUnknown
+}
+
+// OpenAllBuilders loads all containers which have a state file that we use in
+// their data directory, typically so that they can be listed.
+func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, err
+ }
+ for _, container := range containers {
+ cdir, err := store.ContainerDirectory(container.ID)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := os.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ logrus.Debugf("%v, ignoring container %q", err, container.ID)
+ continue
+ }
+ return nil, err
+ }
+ b := &Builder{}
+ err = json.Unmarshal(buildstate, &b)
+ if err == nil && b.Type == containerType {
+ b.store = store
+ b.setupLogger()
+ b.fixupConfig(nil)
+ if b.CommonBuildOpts == nil {
+ b.CommonBuildOpts = &CommonBuildOptions{}
+ }
+ builders = append(builders, b)
+ continue
+ }
+ if err != nil {
+ logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err)
+ } else if b.Type != containerType {
+ logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type)
+ }
+ }
+ return builders, nil
+}
+
+// Save saves the builder's current state to the build container's metadata.
+// This should not need to be called directly, as other methods of the Builder
+// object take care of saving their state.
+func (b *Builder) Save() error {
+ buildstate, err := json.Marshal(b)
+ if err != nil {
+ return err
+ }
+ cdir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return err
+ }
+ if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
+ return fmt.Errorf("saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
+ }
+ return nil
+}
diff --git a/buildah_test.go b/buildah_test.go
new file mode 100644
index 0000000..cb9284d
--- /dev/null
+++ b/buildah_test.go
@@ -0,0 +1,75 @@
+package buildah
+
+import (
+ "context"
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/types"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ var logLevel string
+ debug := false
+ if InitReexec() {
+ return
+ }
+ flag.BoolVar(&debug, "debug", false, "turn on debug logging")
+ flag.StringVar(&logLevel, "log-level", "error", "log level")
+ flag.Parse()
+ level, err := logrus.ParseLevel(logLevel)
+ if err != nil {
+ logrus.Fatalf("error parsing log level %q: %v", logLevel, err)
+ }
+ if debug && level < logrus.DebugLevel {
+ level = logrus.DebugLevel
+ }
+ logrus.SetLevel(level)
+ os.Exit(m.Run())
+}
+
+func TestOpenBuilderCommonBuildOpts(t *testing.T) {
+ ctx := context.TODO()
+ store, err := storage.GetStore(types.StoreOptions{
+ RunRoot: t.TempDir(),
+ GraphRoot: t.TempDir(),
+ GraphDriverName: "vfs",
+ })
+ require.NoError(t, err)
+ t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
+ b, err := NewBuilder(ctx, store, BuilderOptions{})
+ require.NoError(t, err)
+ require.NotNil(t, b.CommonBuildOpts)
+ b.CommonBuildOpts = nil
+ builderContainerID := b.ContainerID
+ err = b.Save()
+ require.NoError(t, err)
+ b, err = OpenBuilder(store, builderContainerID)
+ require.NoError(t, err)
+ require.NotNil(t, b.CommonBuildOpts)
+ builders, err := OpenAllBuilders(store)
+ require.NoError(t, err)
+ for _, b := range builders {
+ require.NotNil(t, b.CommonBuildOpts)
+ }
+ imageID, _, _, err := b.Commit(ctx, nil, CommitOptions{})
+ require.NoError(t, err)
+ b, err = ImportBuilderFromImage(ctx, store, ImportFromImageOptions{
+ Image: imageID,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, b.CommonBuildOpts)
+ container, err := store.CreateContainer("", nil, imageID, "", "", &storage.ContainerOptions{})
+ require.NoError(t, err)
+ require.NotNil(t, container)
+ b, err = ImportBuilder(ctx, store, ImportOptions{
+ Container: container.ID,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, b.CommonBuildOpts)
+}
diff --git a/changelog.txt b/changelog.txt
new file mode 100644
index 0000000..96e4dbb
--- /dev/null
+++ b/changelog.txt
@@ -0,0 +1,2970 @@
+-Changelog for v1.33.5 (2024-02-01)
+
+ * Bump c/common to v0.57.4, moby/buildkit v0.5.12
+
+-Changelog for v1.33.4 (2024-01-30)
+
+ * Bump c/image to v5.29.2 and c/common to v0.57.3
+
+-Changelog for v1.33.3 (2024-01-18)
+
+ * Bump c/common to 0.57.2 and c/image to 5.29.1
+
+- Changelog for v1.33.2 (2023-11-22)
+ * Update minimum to golang 1.20
+ * fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0
+ * fix(deps): update module github.com/moby/buildkit to v0.12.3
+ * Bump to v1.33.2-dev
+
+- Changelog for v1.33.1 (2023-11-18)
+ * fix(deps): update module github.com/moby/buildkit to v0.11.4 [security]
+ * test,heredoc: use fedora instead of docker.io/library/python:latest
+ * Bump to v1.33.1-dev
+
+- Changelog for v1.33.0 (2023-11-17)
+ * Never omit layers for emptyLayer instructions when squashing/cwing
+ * Add OverrideChanges and OverrideConfig to CommitOptions
+ * buildah: add heredoc support for RUN, COPY and ADD
+ * vendor: bump imagebuilder to v1.2.6-0.20231110114814-35a50d57f722
+ * conformance tests: archive the context directory as 0:0 (#5171)
+ * blobcacheinfo,test: blobs must be resued when pushing across registry
+ * Bump c/storage v1.51.0, c/image v5.29.0, c/common v0.57.0
+ * pkg/util.MirrorToTempFileIfPathIsDescriptor(): don't leak an fd
+ * StageExecutor.Execute: force a commit for --unsetenv, too
+ * Increase a copier+chroot test timeout
+ * Add support for --compat-auth-file in login/logout
+ * Update existing tests for error message change
+ * Update c/image and c/common to latest
+ * fix(deps): update module github.com/containerd/containerd to v1.7.9
+ * build: downgrade to go 1.20
+ * Add godoc for pkg/parse.GetTempDir
+ * conformance tests: use go-dockerclient for BuildKit builds
+ * Make TEE types case-insensitive
+ * fix(deps): update module golang.org/x/crypto to v0.15.0
+ * Tweak some help descriptions
+ * Stop using DefaultNetworkSysctl and use containers.conf only
+ * Implement ADD checksum flag #5135
+ * vendor of openshift/imagebuilder #5135
+ * Pass secrets from the host down to internal podman containers
+ * Update cirrus and version of golang
+ * image: replace GetStoreImage with ResolveReference
+ * vendor: bump c/image to 373c52a9466f
+ * pkg/parse.Platform(): minor simplification
+ * createConfigsAndManifests: clear history before cw-specific logic
+ * Use a constant definition instead of "scratch"
+ * conformance: use require.NoErrorf() more
+ * fix(deps): update module golang.org/x/term to v0.14.0
+ * fix(deps): update module golang.org/x/sync to v0.5.0
+ * fix(deps): update module github.com/spf13/cobra to v1.8.0
+ * fix(deps): update module golang.org/x/sys to v0.14.0
+ * fix(deps): update github.com/containers/common digest to 8354404
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.10
+ * fix(deps): update github.com/containers/luksy digest to b5a7f79
+ * Log the platform for build errors during multi-platform builds
+ * Use mask definitions from containers/common
+ * Vendor in latest containers/common
+ * fix(deps): update module github.com/containerd/containerd to v1.7.8
+ * fix(deps): update module go.etcd.io/bbolt to v1.3.8
+ * container.conf: support attributed string slices
+ * fix(deps): update module sigs.k8s.io/yaml to v1.4.0
+ * Use cutil.StringInSlice rather then contains
+ * Add --no-hostname option to buildah containers
+ * vendor c/common: appendable containers.conf strings, Part 1
+ * fix(deps): update module github.com/onsi/gomega to v1.28.1
+ * chroot.setupChrootBindMounts: pay more attention to flags
+ * chore(deps): update dependency containers/automation_images to v20231004
+ * Vendor containers/common
+ * chore(deps): update module golang.org/x/net to v0.17.0 [security]
+ * run: use internal.GetTempDir with os.MkdirTemp
+ * fix(deps): update module github.com/containerd/containerd to v1.7.7
+ * imagebuildah,multi-stage: do not remove base images
+ * gitignore: add mkcw binary
+ * mkcw: remove entrypoint binaries
+ * fix(deps): update module golang.org/x/crypto to v0.14.0
+ * fix(deps): update module golang.org/x/sys to v0.13.0
+ * fix(deps): update module golang.org/x/sync to v0.4.0
+ * Update some comments related to confidential workload
+ * Use the parent's image ID in the config that we pass to imagebuilder
+ * fix(deps): update github.com/containers/common digest to 8892536
+ * fix(deps): update github.com/containers/luksy digest to 6df88cb
+ * bug: Ensure the mount type is always BindMount by default
+ * Protocol can be specified with --port. Ex. --port 514/udp
+ * fix(deps): update module github.com/onsi/gomega to v1.28.0
+ * build,config: add support for --unsetlabel
+ * tests/bud: add tests
+ * [CI:BUILD] Packit: tag @containers/packit-build on copr build failures
+ * stage_executor: allow images without layers
+ * vendor of containers/common
+ * Removing selinux_tag.sh as no longer needed after 580356f [NO NEW TESTS NEEDED]
+ * add/copy: make sure we handle relative path names correctly
+ * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc5
+ * Bump to v1.33.0-dev
+ * imagebuildah: consider ignorefile with --build-context
+
+- Changelog for v1.32.0 (2023-09-14)
+ * GetTmpDir is not using ImageCopyTmpdir correctly
+ * Run codespell on code
+ * Bump vendor containers/(common, storage, image)
+ * Cirrus: Remove multi-arch buildah image builds
+ * fix(deps): update module github.com/containerd/containerd to v1.7.6
+ * Split GetTempDir from internal/util
+ * Move most of internal/parse to internal/volumes
+ * copier: remove libimage dependency via util package
+ * Add some docs for `build --cw`, `commit --cw`, and `mkcw`
+ * Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build`
+ * Make sure that pathnames picked up from the environment are absolute
+ * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4
+ * fix(deps): update module github.com/docker/docker to v24.0.6+incompatible
+ * Don't try to look up names when committing images
+ * fix(deps): update module golang.org/x/crypto to v0.13.0
+ * docs: use valid github repo
+ * fix(deps): update module golang.org/x/sys to v0.12.0
+ * vendor containers/common@12405381ff45
+ * push: --force-compression should be true with --compression-format
+ * Update module github.com/containerd/containerd to v1.7.5
+ * [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2
+ * docs: add reference to oci-hooks
+ * Support passing of ULimits as -1 to mean max
+ * GHA: Attempt to fix discussion_lock workflow
+ * Fixing the owner of the storage.conf.
+ * pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD
+ * Use buildah repo rather then podman repo
+ * GHA: Closed issue/PR comment-lock test
+ * fix(deps): update module github.com/containers/storage to v1.49.0
+ * chore(deps): update dependency containers/automation_images to v20230816
+ * Replace troff code with markdown in buildah-{copy,add}.1.md
+ * [CI:BUILD] rpm: spdx compatible license field
+ * executor: build-arg warnings must honor global args
+ * fix(deps): update module github.com/containers/ocicrypt to v1.1.8
+ * chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64`
+ * make,cross: restore loong64
+ * Clear CommonBuildOpts when loading Builder status
+ * buildah/push/manifest-push: add support for --force-compression
+ * vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9
+ * chore(deps): update dependency containers/automation_images to v20230809
+ * [CI:BUILD] RPM: fix buildtags
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.9
+ * chore(deps): update dependency ubuntu to v22
+ * chore(deps): update dependency containers/automation_images to v20230807
+ * [CI:BUILD] Packit: add fedora-eln targets
+ * [CI:BUILD] RPM: build docs with vendored go-md2man
+ * packit: Build PRs into default packit COPRs
+ * Update install.md
+ * Update install.md changes current Debian stable version name
+ * fix(deps): update module golang.org/x/term to v0.11.0
+ * fix(deps): update module golang.org/x/crypto to v0.12.0
+ * tests: fix layer-label tests
+ * buildah: add --layer-label for setting labels on layers
+ * Cirrus: container/rootless env. var. passthrough
+ * Cirrus: Remove duplicate env. var. definitions
+ * fix(deps): update github.com/containers/storage digest to c3da76f
+ * Add a missing .Close() call on an ImageSource
+ * Create only a reference when that's all we need
+ * Add a missing .Close() call on an ImageDestination
+ * CI:BUILD] RPM: define gobuild macro for rhel/centos stream
+ * manifest/push: add support for --add-compression
+ * manifest/inspect: add support for tls-verify and authfile
+ * vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a
+ * vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac
+ * fix(deps): update module github.com/containerd/containerd to v1.7.3
+ * fix(deps): update module github.com/onsi/gomega to v1.27.10
+ * fix(deps): update module github.com/docker/docker to v24.0.5+incompatible
+ * fix(deps): update module github.com/containers/image/v5 to v5.26.1
+ * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0
+ * Update vendor of containers/(storage,image,common)
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.8
+ * [CI:BUILD] Packit: remove pre-sync action
+ * fix(deps): update module github.com/containers/common to v0.55.2
+ * [CI:BUILD] Packit: downstream task script needs GOPATH
+ * Vendor in containers/(common, image, storage)
+ * fix(deps): update module golang.org/x/term to v0.10.0
+ * [CI:BUILD] Packit: fix pre-sync action for downstream tasks
+ * contrib/buildahimage: set config correctly for rootless build user
+ * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4
+ * Bump to v1.32.0-dev
+ * Update debian install instructions
+ * pkg/overlay: add limited support for FreeBSD
+
+- Changelog for v1.31.0 (2023-06-30)
+ * Bump c/common to 0.55.1 and c/image to 5.26.1
+ * Bump c/image to 5.26.0 and c/common to 0.54.0
+ * vendor: update c/{common,image,storage} to latest
+ * chore: pkg imported more than once
+ * buildah: add pasta(1) support
+ * use slirp4netns package from c/common
+ * update c/common to latest
+ * add hostname to /etc/hosts when running with host network
+ * vendor: update c/common to latest
+ * [CI:BUILD] Packit: add jobs for downstream Fedora package builds
+ * fix(deps): update module golang.org/x/sync to v0.3.0
+ * fix(deps): update module golang.org/x/crypto to v0.10.0
+ * Add smoke tests for encryption CLI helpers
+ * fix(deps): update module golang.org/x/term to v0.9.0
+ * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.3
+ * Remove device mapper support
+ * Remove use of deprecated tar.TypeRegA
+ * Update tooling to support newer golangci-lint
+ * Make cli.EncryptConfig,DecryptConfig, GetFormat public
+ * Don't decrypt images by default
+ * fix(deps): update module github.com/onsi/gomega to v1.27.8
+ * fix(deps): update github.com/containers/storage digest to 3f3fb2f
+ * Renovate: Don't touch fragile test stuffs
+ * [CI:DOCS] Update comment to remove ambiguity
+ * fix(deps): update github.com/containers/image/v5 digest to abe5133
+ * fix(deps): update module github.com/sirupsen/logrus to v1.9.3
+ * fix(deps): update module github.com/containerd/containerd to v1.7.2
+ * Explicitly ref. quay images for CI
+ * At startup, log the effective capabilities for debugging
+ * parse: use GetTempDir from internal utils
+ * GetTmpDir: honor image_copy_tmp_dir from containers.conf
+ * docs/Makefile: don't show sed invocations
+ * CI: Support testing w/ podman-next COPR packages
+ * intermediate-images inherit-label test: make it debuggable
+ * fix(deps): update github.com/containers/common digest to 462ccdd
+ * Add a warning to `--secret` docs
+ * vendor: bump c/storage to v1.46.2-0.20230526114421-55ee2d19292f
+ * executor: apply label to only final stage
+ * remove registry.centos.org
+ * Go back to setting SysProcAttr.Pdeathsig for child processes
+ * Fix auth.json path (validated on Fedora 38) wq Signed-off-by: Andreas Mack <andreas.mack@gmail.com>
+ * fix(deps): update module github.com/stretchr/testify to v1.8.3
+ * CI: fix test broken by renovatebot
+ * chore(deps): update quay.io/libpod/testimage docker tag to v20221018
+ * fix(deps): update module github.com/onsi/gomega to v1.27.7
+ * test: use debian instead of docker.io/library/debian:testing-slim
+ * vendor: bump logrus to 1.9.2
+ * [skip-ci] Update tim-actions/get-pr-commits action to v1.3.0
+ * Revert "Proof of concept: nightly dependency treadmill"
+ * fix(deps): update module github.com/sirupsen/logrus to v1.9.1
+ * vendor in containers/(common,storage,image)
+ * fix(deps): update module github.com/docker/distribution to v2.8.2+incompatible
+ * run: drop Pdeathsig
+ * chroot: lock thread before setPdeathsig
+ * tests: add a case for required=false
+ * fix(deps): update module github.com/openshift/imagebuilder to v1.2.5
+ * build: validate volumes on backend
+ * secret: accept required flag w/o value
+ * fix(deps): update module github.com/containerd/containerd to v1.7.1
+ * fix(deps): update module golang.org/x/crypto to v0.9.0
+ * Update the demos README file to fix minor typos
+ * fix(deps): update module golang.org/x/sync to v0.2.0
+ * fix(deps): update module golang.org/x/term to v0.8.0
+ * manifest, push: use source as destination if not specified
+ * run,mount: remove path only if they didnt pre-exist
+ * Cirrus: Fix meta task failing to find commit
+ * parse: filter edge-case for podman-remote
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.7
+ * fix(deps): update module github.com/docker/docker to v23.0.5+incompatible
+ * build: --platform must accept only arch
+ * fix(deps): update module github.com/containers/common to v0.53.0
+ * makefile: increase conformance timeout
+ * Cap suffixDigitsModulo to a 9-digits suffix.
+ * Rename conflict to suffixDigitsModulo
+ * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0-rc.2
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.6
+ * chore(deps): update centos docker tag to v8
+ * Clarify the need for qemu-user-static package
+ * chore(deps): update quay.io/centos/centos docker tag to v8
+ * Renovate: Ensure test/tools/go.mod is managed
+ * Revert "buildah image should not enable fuse-overlayfs for rootful mode"
+ * Bump to v1.31.0-dev
+ * parse: add support for relabel bind mount option
+
+- Changelog for v1.30.0 (2023-04-06)
+ * fix(deps): update module github.com/opencontainers/runc to v1.1.5
+ * fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.7
+ * buildah image should not enable fuse-overlayfs for rootful mode
+ * stage_executor: inline network add default string
+ * fix(deps): update module github.com/containers/common to v0.51.2
+ * chore(deps): update dependency containers/automation_images to v20230330
+ * fix(deps): update module github.com/docker/docker to v23.0.2+incompatible
+ * chore(deps): update dependency containers/automation_images to v20230320
+ * fix(deps): update module github.com/onsi/gomega to v1.27.6
+ * fix(deps): update github.com/opencontainers/runtime-tools digest to e931285
+ * [skip-ci] Update actions/stale action to v8
+ * test: don't allow to override io.buildah.version
+ * executor: only apply label on the final stage
+ * Update docs/buildah-build.1.md
+ * update build instruction for Ubuntu
+ * code review
+ * build: accept arguments from file with --build-arg-file
+ * run_linux: Update heuristic for mounting /sys
+ * [CI:BUILD] Packit: Enable Copr builds on PR and commit to main
+ * fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.6
+ * Update to Go 1.18
+ * Disable dependabot in favor of renovate
+ * chore(deps): update dependency containers/automation_images to v20230314
+ * Fix requiring tests on Makefile changes
+ * Vendor in latest containers/(storage, common, image)
+ * imagebuildah: set len(short_image_id) to 12
+ * Re-enable conformance tests
+ * Skip conformance test failures with Docker 23.0.1
+ * Cirrus: Replace Ubuntu -> Debian SID
+ * run: add support for inline --network in RUN stmt
+ * vendor: bump imagebuilder to a3c3f8358ca31b1e4daa6
+ * stage_executor: attempt to push cache only when cacheKey is valid
+ * Add "ifnewer" as option in help message for pull command
+ * build: document behaviour of buildah's distributed cache
+ * fix(deps): update module golang.org/x/term to v0.6.0
+ * Add default list of capabilities required to run buildah in a container
+ * executor,copy: honor default ARG value while eval stage
+ * sshagent: use ExtendedAgent instead of Agent
+ * tests/bud: remove unwated test
+ * executor: do not warn on builtin default args
+ * executor: don't warn about unused TARGETARCH,TARGETOS,TARGETPLATFORM
+ * Fix tutorial for rootless mode
+ * Vendor in latest containers/(common, storage, image)
+ * Ignore the base image's base image annotations
+ * fix(deps): update module github.com/fsouza/go-dockerclient to v1.9.5
+ * build(deps): bump github.com/containers/storage from 1.45.3 to 1.45.4
+ * Vendor in latest containers/common
+ * docs/tutorials/04: add defaults for Run()
+ * imagebuildah.StageExecutor: suppress bogus "Pushing cache []:..."
+ * executor: also add stage with no children to cleanupStages
+ * [CI:BUILD] copr: fix el8 builds
+ * Fix documentation on which Capabilities are allowed by default
+ * Skip subject-length validation for renovate PRs
+ * Temporarily hard-skip bud-multiple-platform-values test
+ * fix(deps): update github.com/openshift/imagebuilder digest to 86828bf
+ * build(deps): bump github.com/containerd/containerd from 1.6.16 to 1.6.17
+ * build(deps): bump tim-actions/get-pr-commits from 1.1.0 to 1.2.0
+ * build(deps): bump github.com/containers/image/v5 from 5.24.0 to 5.24.1
+ * [skip-ci] Update tim-actions/get-pr-commits digest to 55b867b
+ * build(deps): bump github.com/opencontainers/selinux
+ * build(deps): bump golang.org/x/crypto from 0.5.0 to 0.6.0
+ * Add renovate configuration
+ * Run codespell on codebase
+ * login: support interspersed args for password
+ * conformance: use scratch for minimal test
+ * pkg/parse: expose public CleanCacheMount API
+ * build(deps): bump go.etcd.io/bbolt from 1.3.6 to 1.3.7
+ * build(deps): bump github.com/containerd/containerd from 1.6.15 to 1.6.16
+ * docs: specify order preference for FROM
+ * Bump to v1.30.0-dev
+
+- Changelog for v1.29.0 (2023-01-25)
+ * tests: improve build-with-network-test
+ * Bump c/storagev1.45.3, c/imagev5.24.0, c/commonv0.51.0
+ * build(deps): bump github.com/onsi/gomega from 1.25.0 to 1.26.0
+ * Flake 3710 has been closed. Reenable the test.
+ * [CI:DOCS] Fix two diversity issues in a tutorial
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.9.2 to 1.9.3
+ * vendor in latests containers/(storage, common, image)
+ * fix bud-multiple-platform-with-base-as-default-arg flake
+ * stage_executor: while mounting stages use freshly built stage
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.9.0 to 1.9.2
+ * build(deps): bump github.com/onsi/gomega from 1.24.2 to 1.25.0
+ * vendor in latests containers/(storage, common, image, ocicyrpt)
+ * [Itests: change the runtime-flag test for crun
+ * [CI:DOCS] README: drop sudo
+ * Fix multi-arch manifest-list build timeouts
+ * Cirrus: Update VM Images
+ * bud: Consolidate multiple synthetic LABEL instructions
+ * build, secret: allow realtive mountpoints wrt to work dir
+ * fixed squash documentation
+ * build(deps): bump github.com/containerd/containerd from 1.6.14 to 1.6.15
+ * Correct minor comment
+ * Vendor in latest containers/(common, image, storage)
+ * system tests: remove unhelpful assertions
+ * buildah: add prune command and expose CleanCacheMount API
+ * vendor: bump c/storage to a747b27
+ * Add support for --group-add to buildah from
+ * build(deps): bump actions/stale from 6 to 7
+ * Add documentation for buildah build --pull=missing
+ * build(deps): bump github.com/containerd/containerd from 1.6.12 to 1.6.14
+ * build(deps): bump github.com/docker/docker
+ * parse: default ignorefile must not point to symlink outside context
+ * buildah: wrap network setup errors
+ * build, mount: allow realtive mountpoints wrt to work dir
+ * Update to F37 CI VM Images, re-enable prior-fedora
+ * Update vendor or containers/(image, storage, common)
+ * build(deps): bump golang.org/x/crypto from 0.3.0 to 0.4.0
+ * Update contact information
+ * build(deps): bump golang.org/x/term from 0.2.0 to 0.3.0
+ * Replace io/ioutil calls with os calls
+ * [skip-ci] GHA/Cirrus-cron: Fix execution order
+ * Vendor in containers/common
+ * build(deps): bump golang.org/x/sys from 0.2.0 to 0.3.0
+ * remote-cache: support multiple sources and destinations
+ * Update c/storage after https://github.com/containers/storage/pull/1436
+ * util.SortMounts(): make the returned order more stable
+ * version: Bump to 1.29.0-dev
+ * [CI:BUILD] Cirrus: Migrate OSX task to M1
+ * Update vendor of containers/(common, storage, image)
+ * mount=type=cache: seperate cache parent on host for each user
+ * Fix installation instructions for Gentoo Linux
+ * build(deps): bump github.com/containerd/containerd from 1.6.9 to 1.6.10
+ * GHA: Reuse both cirrus rerun and check workflows
+ * Vendor in latest containers/(common,image,storage)
+ * build(deps): bump github.com/onsi/gomega from 1.24.0 to 1.24.1
+ * copier.Put(): clear up os/syscall mode bit confusion
+ * build(deps): bump golang.org/x/sys from 0.1.0 to 0.2.0
+ * Use TypeBind consistently to name bind/nullfs mounts
+ * Add no-new-privileges flag
+ * Update vendor of containers/(common, image, storage)
+ * imagebuildah:build with --all-platforms must honor args for base images
+ * codespell code
+ * Expand args and env when using --all-platforms
+ * build(deps): bump github.com/onsi/gomega from 1.23.0 to 1.24.0
+ * GHA: Simplify Cirrus-Cron check slightly
+ * Stop using ubi8
+ * remove unnecessary (hence misleading) rmi
+ * chroot: fix mounting of ro bind mounts
+ * executor: honor default ARG value while eval base name
+ * userns: add arbitrary steps/stage to --userns=auto test
+ * Don't set allow.mount in the vnet jail on Freebsd
+ * copier: Preserve file flags when copying archives on FreeBSD
+ * Remove quiet flag, so that it works in podman-remote
+ * test: fix preserve rootfs with --mount for podman-remote
+ * test: fix prune logic for cache-from after adding content summary
+ * vendor in latest containers/(storage, common, image)
+ * Fix RUN --mount=type=bind,from=<stage> not preserving rootfs of stage
+ * Define and use a safe, reliable test image
+ * Fix word missing in Container Tools Guide
+ * Makefile: Use $(MAKE) to start sub-makes in install.tools
+ * imagebuildah: pull cache from remote repo after adding content summary
+ * Makefile: Fix install on FreeBSD
+ * Ensure the cache volume locks are unlocked on all paths
+ * Vendor in latest containers/(common,storage)
+ * Simplify the interface of GetCacheMount and getCacheMount
+ * Fix cache locks with multiple mounts
+ * Remove calls to Lockfile.Locked()
+ * Maintain cache mount locks as lock objects instead of paths
+ * test: cleaning cache must not clean lockfiles
+ * run: honor lockfiles for multiple --mount instruction
+ * mount,cache: lockfiles must not be part of users cache content
+ * Update vendor containers/(common,image,storage)
+ * [CI:BUILD] copr: buildah rpm should depend on containers-common-extra
+ * pr-should-include-tests: allow specfile, golangci
+ * build(deps): bump dawidd6/action-send-mail from 3.7.0 to 3.7.1
+ * build(deps): bump github.com/docker/docker
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.8.3 to 1.9.0
+ * Update vendor containers/(common,image,storage)
+ * build(deps): bump actions/upload-artifact from 2 to 3
+ * build(deps): bump actions/checkout from 2 to 3
+ * build(deps): bump actions/stale from 1 to 6
+ * build(deps): bump dawidd6/action-send-mail from 2.2.2 to 3.7.0
+ * build(deps): bump tim-actions/get-pr-commits from 1.1.0 to 1.2.0
+ * sshagent: LockOSThread before setting SocketLabel
+ * Update tests for error message changes
+ * Update c/image after https://github.com/containers/image/pull/1299
+ * Fix ident for dependabot gha block
+ * build(deps): bump github.com/containers/ocicrypt from 1.1.5 to 1.1.6
+ * Fix man pages to match latest cobra settings
+ * build(deps): bump github.com/spf13/cobra from 1.5.0 to 1.6.0
+ * build(deps): bump github.com/onsi/gomega from 1.20.2 to 1.22.1
+ * test: retrofit 'bud with undefined build arg directory'
+ * imagebuildah: warnOnUnsetBuildArgs while processing stages from executor
+ * Update contrib/buildahimage/Containerfile
+ * Cirrus CI add flavor parameter
+ * Correction - `FLAVOR` not `FLAVOUR`
+ * Changed build argument from `RELEASE` to `FLAVOUR`
+ * Combine buildahimage Containerfiles
+ * bud.bats refactoring: $TEST_SCRATCH_DIR, part 2 of 2
+ * bud.bats refactoring: $TEST_SCRATCH_DIR, part 1 of 2
+ * System test cleanup: document, clarify, fix
+ * test: removing unneeded/expensive COPY
+ * test: warning behaviour for unset/set TARGETOS,TARGETARCH,TARGETPLATFORM
+ * Bump to v1.28.1-dev
+
+- Changelog for v1.28.0 (2022-09-30)
+ * Update vendor containers/(common,image)
+ * [CI:DOCS] Add quay-description update reminder
+ * vendor: bump c/common to v0.49.2-0.20220929111928-2d1b45ae2423
+ * build(deps): bump github.com/opencontainers/selinux
+ * Vendor in latest containers/storage
+ * Changing shell list operators from `;` to `&&`
+ * Fix buildahimage container.conf permissions regression
+ * Set sysctls from containers.conf
+ * refactor: stop using Normalize directly from containerd package
+ * config,builder: process variant while populating image spec
+ * Proof of concept: nightly dependency treadmill
+ * Run codespell on code
+ * Check for unset build args after TARGET args
+ * pkg/cli: improve completion test
+ * vendor in latest containers/(common,storage,image)
+ * copier: work around freebsd bug for "mkdir /"
+ * vendor: update c/image
+ * test: run in the host cgroup namespace
+ * vendor: update c/storage
+ * vendor: update c/common
+ * cmd: check for user UID instead of privileges
+ * run,build: conflict --isolation=chroot and --network
+ * Fix broken dns test (from merge collision)
+ * Fix stutters
+ * Fix broken command completion
+ * buildah bud --network=none should have no network
+ * build: support --skip-unused-stages for multi-stage builds
+ * Prevent use of --dns* options with --net=none
+ * buildah: make --cache-ttl=0s equivalent to --no-cache
+ * parse: make processing flags in --mount order agnostic
+ * Minor test fix for podman-remote
+ * build: honor <Containerfile>.containerignore as ignore file
+ * Update install.md: Debian 11 (Bullseye) is stable
+ * build(deps): bump github.com/docker/docker
+ * Use constants from containers/common for finding seccomp.json
+ * Don't call os.Exit(1) from manifest exist
+ * manifest: add support for buildah manifest exists
+ * Buildah should ignore /etc/crio/seccomp.json
+ * chroot: Fix cross build break
+ * chroot: Move isDevNull to run_common.go
+ * chroot: Fix setRlimit build on FreeBSD
+ * chroot: Move parseRLimits and setRlimits to run_common.go
+ * chroot: Fix runUsingChrootExecMain on FreeBSD
+ * chroot: Move runUsingChrootExecMain to run_common.go
+ * chroot: Factor out Linux-specific unshare options from runUsingChroot
+ * chroot: Move runUsingChroot to run_common.go
+ * chroot: Move RunUsingChroot and runUsingChrootMain to run_common.go
+ * chroot: Factor out /dev/ptmx pty implementation
+ * chroot: Add FreeBSD support for run with chroot isolation
+ * build(deps): bump github.com/docker/go-units from 0.4.0 to 0.5.0
+ * Replace k8s.gcr.io/pause in tests with registry.k8s.io/pause
+ * build(deps): bump github.com/onsi/gomega from 1.20.0 to 1.20.1
+ * Cirrus: use image with fewer downloaded dependencies
+ * build(deps): bump github.com/opencontainers/runc from 1.1.3 to 1.1.4
+ * run: add container gid to additional groups
+ * buildah: support for --retry and --retry-delay for push/pull failures
+ * Makefile: always call $(GO) instead of `go`
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.8.2 to 1.8.3
+ * test: use `T.TempDir` to create temporary test directory
+ * mount,cache: enable SElinux shared content label option by default
+ * commit: use race-free RemoveNames instead of SetNames
+ * Drop util/util.Cause()
+ * cmd/buildah: add "manifest create --amend"
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.8.1 to 1.8.2
+ * docs: specify git protocol is not supported for github hosted repo
+ * Scrub user and group names from layer diffs
+ * build(deps): bump github.com/containerd/containerd from 1.6.6 to 1.6.8
+ * version: bump to 1.28.0-dev
+
+- Changelog for v1.27.0 (2022-08-01)
+ * build: support filtering cache by duration using `--cache-ttl`.
+ * build: support building from commit when using git repo as build context.
+ * build: clean up git repos correctly when using subdirs.
+ * build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`.
+ * imagebuildah: optimize cache hits for `COPY` and `ADD` instructions.
+ * build: support OCI hooks for ephemeral build containers.
+ * build: add support for `--userns=auto`.
+ * copier: add NoOverwriteNonDirDir option .
+ * add initial support for building images using Buildah on FreeBSD.
+ * multistage: this now skips the computing of unwanted stages to improve performance.
+ * multiarch: support splitting build logs for `--platform` using `--logsplit`.
+ * build: add support for building images where the base image has no history.
+ * commit: allow disabling image history with `--omit-history`.
+ * build: add support for renaming a device in rootless setups.
+ * build: now supports additionalBuildContext in builds via the `--build-context` option.
+ * build: `--output` produces artifacts even if the build container is not committed.
+ * build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax.
+ * build: now accepts a branch and a subdirectory when the build context is a git repository.
+ * build: output now shows a progress bar while pushing and pulling images
+ * build: now errors out if the path to Containerfile is a directory.
+ * build: support building container images on environments that are rootless and without any valid login sessions.
+ * fix: `--output` now generates artifacts even if the entire build is cached.
+ * fix: `--output` generates artifacts only for the target stage in multi-stage builds.
+ * fix,add: now fails on a bad HTTP response instead of writing to container
+ * fix,squash: never use build cache when computing the last step of the last stage
+ * fix,build,run: allow reusing secret more than once in different RUN steps
+ * fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value.
+
+- Changelog for v1.26.0 (2022-05-04)
+ * imagebuildah,build: move deepcopy of args before we spawn goroutine
+ * Vendor in containers/storage v1.40.2
+ * buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated
+ * help output: get more consistent about option usage text
+ * Handle OS version and features flags
+ * buildah build: --annotation and --label should remove values
+ * buildah build: add a --env
+ * buildah: deep copy options.Args before performing concurrent build/stage
+ * test: inline platform and builtinargs behaviour
+ * vendor: bump imagebuilder to master/009dbc6
+ * build: automatically set correct TARGETPLATFORM where expected
+ * build(deps): bump github.com/fsouza/go-dockerclient
+ * Vendor in containers/(common, storage, image)
+ * imagebuildah, executor: process arg variables while populating baseMap
+ * buildkit: add support for custom build output with --output
+ * Cirrus: Update CI VMs to F36
+ * fix staticcheck linter warning for deprecated function
+ * Fix docs build on FreeBSD
+ * build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0
+ * copier.unwrapError(): update for Go 1.16
+ * copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit
+ * copier.Put(): write to read-only directories
+ * build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools
+ * Rename $TESTSDIR (the plural one), step 4 of 3
+ * Rename $TESTSDIR (the plural one), step 3 of 3
+ * Rename $TESTSDIR (the plural one), step 2 of 3
+ * Rename $TESTSDIR (the plural one), step 1 of 3
+ * build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3
+ * Ed's periodic test cleanup
+ * using consistent lowercase 'invalid' word in returned err msg
+ * Update vendor of containers/(common,storage,image)
+ * use etchosts package from c/common
+ * run: set actual hostname in /etc/hostname to match docker parity
+ * update c/common to latest main
+ * Update vendor of containers/(common,storage,image)
+ * Stop littering
+ * manifest-create: allow creating manifest list from local image
+ * Update vendor of storage,common,image
+ * Bump golang.org/x/crypto to 7b82a4e
+ * Initialize network backend before first pull
+ * oci spec: change special mount points for namespaces
+ * tests/helpers.bash: assert handle corner cases correctly
+ * buildah: actually use containers.conf settings
+ * integration tests: learn to start a dummy registry
+ * Fix error check to work on Podman
+ * buildah build should accept at most one arg
+ * tests: reduce concurrency for flaky bud-multiple-platform-no-run
+ * vendor in latest containers/common,image,storage
+ * manifest-add: allow override arch,variant while adding image
+ * Remove a stray `\` from .containerenv
+ * Vendor in latest opencontainers/selinux v1.10.1
+ * build, commit: allow removing default identity labels
+ * Create shorter names for containers based on image IDs
+ * test: skip rootless on cgroupv2 in root env
+ * fix hang when oci runtime fails
+ * Set permissions for GitHub actions
+ * copier test: use correct UID/GID in test archives
+ * run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM
+ * Bump back to v1.26.0-dev
+ * build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1
+ * Included the URL to check the SHA
+
+- Changelog for v1.25.1 (2022-03-30)
+ * buildah: create WORKDIR with USER permissions
+ * vendor: update github.com/openshift/imagebuilder
+ * copier: attempt to open the dir before adding it
+ * Updated dependabot to get updates for GitHub actions.
+ * Switch most calls to filepath.Walk to filepath.WalkDir
+ * build: allow --no-cache and --layers so build cache can be overrided
+ * build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0
+ * Bump to v1.26.0-dev
+ * build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+
+- Changelog for v1.25.0 (2022-03-25)
+ * install: drop RHEL/CentOS 7 doc
+ * build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5
+ * Bump c/storage to v1.39.0 in main
+ * Add a test for CVE-2022-27651
+ * build(deps): bump github.com/docker/docker
+ * Bump github.com/prometheus/client_golang to v1.11.1
+ * [CI:DOCS] man pages: sort flags, and keep them that way
+ * build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2
+ * Don't pollute
+ * network setup: increase timeout to 4 minutes
+ * do not set the inheritable capabilities
+ * build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ * build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3
+ * parse: convert exposed GetVolumes to internal only
+ * buildkit: mount=type=cache support locking external cache store
+ * .in support: improve error message when cpp is not installed
+ * buildah image: install cpp
+ * build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1
+ * build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0
+ * build(deps): bump github.com/docker/docker
+ * Add --no-hosts flag to eliminate use of /etc/hosts within containers
+ * test: remove skips for rootless users
+ * test: unshare mount/umount if test is_rootless
+ * tests/copy: read correct containers.conf
+ * build(deps): bump github.com/docker/distribution
+ * cirrus: add seperate task and matrix for rootless
+ * tests: skip tests for rootless which need unshare
+ * buildah: test rootless integration
+ * vendor: bump c/storage to main/93ce26691863
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10
+ * tests/copy: initialize the network, too
+ * [CI:DOCS] remove references to Kubic for CentOS and Ubuntu
+ * build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1
+ * use c/image/pkg/blobcache
+ * vendor c/image/v5@v5.20.0
+ * add: ensure the context directory is an absolute path
+ * executor: docker builds must inherit healthconfig from base if any
+ * docs: Remove Containerfile and containeringore
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9
+ * helpers.bash: Use correct syntax
+ * speed up combination-namespaces test
+ * build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ * Bump back to 1.25.0-dev
+ * build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0
+
+- Changelog for v1.24.2 (2022-02-16)
+ * Increase subuid/subgid to 65535
+ * history: only add proxy vars to history if specified
+ * run_linux: use --systemd-cgroup
+ * buildah: new global option --cgroup-manager
+ * Makefile: build with systemd when available
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8
+ * Bump c/common to v0.47.4
+ * Cirrus: Use updated VM images
+ * conformance: add a few "replace-directory-with-symlink" tests
+ * Bump back to v1.25.0-dev
+
+- Changelog for v1.24.1 (2022-02-03)
+ * executor: Add support for inline --platform within Dockerfile
+ * caps: fix buildah run --cap-add=all
+ * Update vendor of openshift/imagebuilder
+ * Bump version of containers/image and containers/common
+ * Update vendor of containers/common
+ * System tests: fix accidental vandalism of source dir
+ * build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2
+ * imagebuildah.BuildDockerfiles(): create the jobs semaphore
+ * build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1
+ * overlay: always honor mountProgram
+ * overlay: move mount program invocation to separate function
+ * overlay: move mount program lookup to separate function
+ * Bump to v1.25.0-dev [NO TESTS NEEDED]
+
+- Changelog for v1.24.0 (2022-01-26)
+ * Update vendor of containers/common
+ * build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ * Github-workflow: Report both failures and errors.
+ * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0
+ * Update docs/buildah-build.1.md
+ * [CI:DOCS] Fix typos and improve language
+ * buildah bud --network add support for custom networks
+ * Make pull commands be consistent
+ * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing
+ * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0
+ * Vendor in latest containers/image
+ * Run codespell on code
+ * .github/dependabot.yml: add tests/tools go.mod
+ * CI: rm git-validation, add GHA job to validate PRs
+ * tests/tools: bump go-md2man to v2.0.1
+ * tests/tools/Makefile: simplify
+ * tests/tools: bump onsi/ginkgo to v1.16.5
+ * vendor: bump c/common and others
+ * mount: add support for custom upper and workdir with overlay mounts
+ * linux: fix lookup for runtime
+ * overlay: add MountWithOptions to API which extends support for advanced overlay
+ * Allow processing of SystemContext from FlagSet
+ * .golangci.yml: enable unparam linter
+ * util/resolveName: rm bool return
+ * tests/tools: bump golangci-lint
+ * .gitignore: fixups
+ * all: fix capabilities.NewPid deprecation warnings
+ * bind/mount.go: fix linter comment
+ * all: fix gosimple warning S1039
+ * tests/e2e/buildah_suite_test.go: fix gosimple warnings
+ * imagebuildah/executor.go: fix gosimple warning
+ * util.go: fix gosimple warning
+ * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0
+ * Enable git-daemon tests
+ * Allow processing of id options from FlagSet
+ * Cirrus: Re-order tasks for more parallelism
+ * Cirrus: Freshen VM images
+ * Fix platform handling for empty os/arch values
+ * Allow processing of network options from FlagSet
+ * Fix permissions on secrets directory
+ * Update containers/image and containers/common
+ * bud.bats: use a local git daemon for the git protocol test
+ * Allow processing of common options from FlagSet
+ * Cirrus: Run int. tests in parallel with unit
+ * vendor c/common
+ * Fix default CNI paths
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7
+ * multi-stage: enable mounting stages across each other with selinux enabled
+ * executor: Share selinux label of first stage with other stages in a build
+ * buildkit: add from field to bind and cache mounts so images can be used as source
+ * Use config.ProxyEnv from containers/common
+ * use libnetwork from c/common for networking
+ * setup the netns in the buildah parent process
+ * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6
+ * build: fix libsubid test
+ * Allow callers to replace the ContainerSuffix
+ * parse: allow parsing anomaly non-human value for memory control group
+ * .cirrus: remove static_build from ci
+ * stage_executor: re-use all possible layers from cache for squashed builds
+ * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0
+ * Allow rootless buildah to set resource limits on cgroup V2
+ * build(deps): bump github.com/docker/docker
+ * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification
+ * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3
+ * Wire logger through to config
+ * copier.Put: check for is-not-a-directory using lstat, not stat
+ * Turn on rootless cgroupv2 tests
+ * Grab all of the containers.conf settings for namespaces.
+ * image: set MediaType in OCI manifests
+ * copier: RemoveAll possibly-directories
+ * Simple README fix
+ * images: accept multiple filter with logical AND
+ * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1
+ * UPdate vendor of container/storage
+ * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0
+ * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0
+ * Make LocalIP public function so Podman can use it
+ * Fix UnsetEnv for buildah bud
+ * Tests should rely only on static/unchanging images
+ * run: ensure that stdio pipes are labeled correctly
+ * build(deps): bump github.com/docker/docker
+ * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10
+ * chroot: don't use the generate default seccomp filter for unit tests
+ * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8
+ * ssh-agent: Increase timeout before we explicitly close connection
+ * docs/tutorials: update
+ * Clarify that manifest defaults to localhost as the registry name
+ * "config": remove a stray bit of debug output
+ * "commit": fix a flag typo
+ * Fix an error message: unlocking vs locking
+ * Expand the godoc for CommonBuildOptions.Secrets
+ * chroot: accept an "rw" option
+ * Add --unsetenv option to buildah commit and build
+ * define.TempDirForURL(): show CombinedOutput when a command fails
+ * config: support the variant field
+ * rootless: do not bind mount /sys if not needed
+ * Fix tutorial to specify command on buildah run line
+ * build: history should not contain ARG values
+ * docs: Use guaranteed path for go-md2man
+ * run: honor --network=none from builder if nothing specified
+ * networkpolicy: Should be enabled instead of default when explictly set
+ * Add support for env var secret sources
+ * build(deps): bump github.com/docker/docker
+ * fix: another non-portable shebang
+ * Rootless containers users should use additional groups
+ * Support overlayfs path contains colon
+ * Report ignorefile location when no content added
+ * Add support for host.containers.internal in the /etc/hosts
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5
+ * imagebuildah: fix nil deref
+ * buildkit: add support for mount=type=cache
+ * Default secret mode to 400
+ * [CI:DOCS] Include manifest example usage
+ * docs: update buildah-from, buildah-pull 'platform' option compatibility notes
+ * docs: update buildah-build 'platform' option compatibility notes
+ * De-dockerize the man page as much as possible
+ * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st
+ * docs: Fix and Update Containerfile man page with supported mount types
+ * mount: add tmpcopyup to tmpfs mount option
+ * buildkit: Add support for --mount=type=tmpfs
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1
+ * Fix command doc links in README.md
+ * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1
+ * build: Add support for buildkit like --mount=type=bind
+ * Bump containerd to v1.5.7
+ * build(deps): bump github.com/docker/docker
+ * tests: stop pulling php, composer
+ * Fix .containerignore link file
+ * Cirrus: Fix defunct package metadata breaking cache
+ * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0
+ * buildah build: add --all-platforms
+ * Add man page for Containerfile and .containerignore
+ * Plumb the remote logger throughut Buildah
+ * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x)
+ * Run: Cleanup run directory after every RUN step
+ * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0
+ * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation
+ * Makefile: check for `-race` using `-mod=vendor`
+ * imagebuildah: fix an attempt to write to a nil map
+ * push: support to specify the compression format
+ * conformance: allow test cases to specify dockerUseBuildKit
+ * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0
+ * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1
+ * unmarshalConvertedConfig(): handle zstd compression
+ * tests/copy/copy: wire up compression options
+ * Update to github.com/vbauerster/mpb v7.1.5
+ * Add flouthoc to OWNERS
+ * build: Add additional step nodes when labels are modified
+ * Makefile: turn on race detection whenever it's available
+ * conformance: add more tests for exclusion short-circuiting
+ * Update VM Images + Drop prior-ubuntu testing
+ * Bump to v1.24.0-dev
+
+- Changelog for v1.23.0 (2021-09-13)
+ * Vendor in containers/common v0.44.0
+ * build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0
+ * Update 05-openshift-rootless-build.md
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.4 to 1.8.5
+ * .cirrus.yml: run cross_build_task on Big Sur
+ * Makefile: update cross targets
+ * Add support for rootless overlay mounts
+ * Cirrus: Increase unit-test timeout
+ * Docs: Clarify rmi w/ manifest/index use
+ * build: mirror --authfile to filesystem if pointing to FD instead of file
+ * Fix build with .git url with branch
+ * manifest: rm should remove only manifests not referenced images.
+ * vendor: bump c/common to v0.43.3-0.20210902095222-a7acc160fb25
+ * Avoid rehashing and noop compression writer
+ * corrected man page section; .conf file to mention its man page
+ * copy: add --max-parallel-downloads to tune that copy option
+ * copier.Get(): try to avoid descending into directories
+ * tag: Support tagging manifest list instead of resolving to images
+ * Install new manpages to correct sections
+ * conformance: tighten up exception specifications
+ * Add support for libsubid
+ * Add epoch time field to buildah images
+ * Fix ownership of /home/build/.local/share/containers
+ * build(deps): bump github.com/containers/image/v5 from 5.15.2 to 5.16.0
+ * Rename bud to build, while keeping an alias for to bud.
+ * Replace golang.org/x/crypto/ssh/terminal with golang.org/x/term
+ * build(deps): bump github.com/opencontainers/runc from 1.0.1 to 1.0.2
+ * build(deps): bump github.com/onsi/gomega from 1.15.0 to 1.16.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.3 to 1.7.4
+ * build(deps): bump github.com/containers/common from 0.43.1 to 0.43.2
+ * Move DiscoverContainerfile to pkg/util directory
+ * build(deps): bump github.com/containers/image/v5 from 5.15.1 to 5.15.2
+ * Remove some references to Docker
+ * build(deps): bump github.com/containers/image/v5 from 5.15.0 to 5.15.1
+ * imagebuildah: handle --manifest directly
+ * build(deps): bump github.com/containers/common from 0.42.1 to 0.43.1
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.3 to 1.8.4
+ * executor: make sure imageMap is updated with terminatedStage
+ * tests/serve/serve.go: use a kernel-assigned port
+ * Bump go for vendor-in-container from 1.13 to 1.16
+ * imagebuildah: move multiple-platform building internal
+ * Adds GenerateStructure helper function to support rootfs-overlay.
+ * Run codespell to fix spelling
+ * Implement SSH RUN mount
+ * build(deps): bump github.com/onsi/gomega from 1.14.0 to 1.15.0
+ * Fix resolv.conf content with run --net=private
+ * run: fix nil deref using the option's logger
+ * build(deps): bump github.com/containerd/containerd from 1.5.1 to 1.5.5
+ * make vendor-in-container
+ * bud: teach --platform to take a list
+ * set base-image annotations
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.2 to 1.8.3
+ * [CI:DOCS] Fix CHANGELOG.md
+ * Bump to v1.23.0-dev [NO TESTS NEEDED]
+ * Accept repositories on login/logout
+
+- Changelog for v1.22.0 (2021-08-02)
+ * c/image, c/storage, c/common vendor before Podman 3.3 release
+ * WIP: tests: new assert()
+ * Proposed patch for 3399 (shadowutils)
+ * Fix handling of --restore shadow-utils
+ * build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0
+ * runtime-flag (debug) test: handle old & new runc
+ * build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0
+ * Allow dst and destination for target in secret mounts
+ * Multi-arch: Always push updated version-tagged img
+ * Add a few tests on cgroups V2
+ * imagebuildah.stageExecutor.prepare(): remove pseudonym check
+ * refine dangling filter
+ * Chown with environment variables not set should fail
+ * Just restore protections of shadow-utils
+ * build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1
+ * Remove specific kernel version number requirement from install.md
+ * Multi-arch image workflow: Make steps generic
+ * chroot: fix environment value leakage to intermediate processes
+ * Update nix pin with `make nixpkgs`
+ * buildah source - create and manage source images
+ * Update cirrus-cron notification GH workflow
+ * Reuse code from containers/common/pkg/parse
+ * Cirrus: Freshen VM images
+ * build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6
+ * Fix excludes exception begining with / or ./
+ * Fix syntax for --manifest example
+ * build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0
+ * vendor containers/common@main
+ * Cirrus: Drop dependence on fedora-minimal
+ * Adjust conformance-test error-message regex
+ * Workaround appearance of differing debug messages
+ * Cirrus: Install docker from package cache
+ * build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2
+ * Switch rusagelogfile to use options.Out
+ * build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5
+ * Turn stdio back to blocking when command finishes
+ * Add support for default network creation
+ * Cirrus: Updates for master->main rename
+ * Change references from master to main
+ * Add `--env` and `--workingdir` flags to run command
+ * build(deps): bump github.com/opencontainers/runc
+ * [CI:DOCS] buildah bud: spelling --ignore-file requires parameter
+ * [CI:DOCS] push/pull: clarify supported transports
+ * Remove unused function arguments
+ * Create mountOptions for mount command flags
+ * Extract version command implementation to function
+ * Add --json flags to `mount` and `version` commands
+ * build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3
+ * build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1
+ * copier.Put(): set xattrs after ownership
+ * buildah add/copy: spelling
+ * build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0
+ * buildah copy and buildah add should support .containerignore
+ * Remove unused util.StartsWithValidTransport
+ * Fix documentation of the --format option of buildah push
+ * Don't use alltransports.ParseImageName with known transports
+ * build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1
+ * man pages: clarify `rmi` removes dangling parents
+ * tests: make it easer to override the location of the copy helper
+ * build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0
+ * [CI:DOCS] Fix links to c/image master branch
+ * imagebuildah: use the specified logger for logging preprocessing warnings
+ * Fix copy into workdir for a single file
+ * Fix docs links due to branch rename
+ * Update nix pin with `make nixpkgs`
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2
+ * build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6
+ * build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2
+ * build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4
+ * fix(docs): typo
+ * Move to v1.22.0-dev
+ * Fix handling of auth.json file while in a user namespace
+ * Add rusage-logfile flag to optionally send rusage to a file
+ * imagebuildah: redo step logging
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3
+ * build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1
+ * Add volumes to make running buildah within a container easier
+ * build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0
+ * Add and use a "copy" helper instead of podman load/save
+ * Bump github.com/containers/common from 0.38.4 to 0.39.0
+ * containerImageRef/containerImageSource: don't buffer uncompressed layers
+ * containerImageRef(): squashed images have no parent images
+ * Sync. workflow across skopeo, buildah, and podman
+ * Bump github.com/containers/storage from 1.31.1 to 1.31.2
+ * Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95
+ * Bump to v1.21.1-dev [NO TESTS NEEDED]
+
+- Changelog for v1.21.0 (2021-05-19)
+ * Don't blow up if cpp detects errors
+ * Vendor in containers/common v0.38.4
+ * Remove 'buildah run --security-opt' from completion
+ * update c/common
+ * Fix handling of --default-mounts-file
+ * update vendor of containers/storage v1.31.1
+ * Bump github.com/containers/storage from 1.30.3 to 1.31.0
+ * Send logrus messages back to caller when building
+ * github: Fix bad repo. ref in workflow config
+ * Check earlier for bad image tags name
+ * buildah bud: fix containers/podman/issues/10307
+ * Bump github.com/containers/storage from 1.30.1 to 1.30.3
+ * Cirrus: Support [CI:DOCS] test skipping
+ * Notification email for cirrus-cron build failures
+ * Bump github.com/opencontainers/runc from 1.0.0-rc93 to 1.0.0-rc94
+ * Fix race condition
+ * Fix copy race while walking paths
+ * Preserve ownership of lower directory when doing an overlay mount
+ * Bump github.com/onsi/gomega from 1.11.0 to 1.12.0
+ * Update nix pin with `make nixpkgs`
+ * codespell cleanup
+ * Multi-arch github-action workflow unification
+ * Bump github.com/containers/image/v5 from 5.11.1 to 5.12.0
+ * Bump github.com/onsi/ginkgo from 1.16.1 to 1.16.2
+ * imagebuildah: ignore signatures when tagging images
+ * update to latest libimage
+ * Bump github.com/containers/common from 0.37.0 to 0.37.1
+ * Bump github.com/containers/storage from 1.30.0 to 1.30.1
+ * Upgrade to GitHub-native Dependabot
+ * Document location of auth.json file if XDG_RUNTIME_DIR is not set
+ * run.bats: fix flake in run-user test
+ * Cirrus: Update F34beta -> F34
+ * pr-should-include-tests: try to make work in buildah
+ * runUsingRuntime: when relaying error from the runtime, mention that
+ * Run(): avoid Mkdir() into the rootfs
+ * imagebuildah: replace archive with chrootarchive
+ * imagebuildah.StageExecutor.volumeCacheSaveVFS(): set up bind mounts
+ * conformance: use :Z with transient mounts when SELinux is enabled
+ * bud.bats: fix a bats warning
+ * imagebuildah: create volume directories when using overlays
+ * imagebuildah: drop resolveSymlink()
+ * namespaces test - refactoring and cleanup
+ * Refactor 'idmapping' system test
+ * Cirrus: Update Ubuntu images to 21.04
+ * Tiny fixes in bud system tests
+ * Add compabitility wrappers for removed packages
+ * Fix expected message at pulling image
+ * Fix system tests of 'bud' subcommand
+ * [CI:DOCS] Update steps for CentOS runc users
+ * Add support for secret mounts
+ * Add buildah manifest rm command
+ * restore push/pull and util API
+ * [CI:DOCS] Remove older distro docs
+ * Rename rhel secrets to subscriptions
+ * vendor in openshift/imagebuilder
+ * Remove buildah bud --loglevel ...
+ * use new containers/common/libimage package
+ * Fix copier when using globs
+ * Test namespace flags of 'bud' subcommand
+ * Add system test of 'bud' subcommand
+ * Output names of multiple tags in buildah bud
+ * push to docker test: don't get fooled by podman
+ * copier: add Remove()
+ * build(deps): bump github.com/containers/image/v5 from 5.10.5 to 5.11.1
+ * Restore log timestamps
+ * Add system test of 'buildah help' with a tiny fix
+ * tests: copy.bats: fix infinite hang
+ * Do not force hard code to crun in rootless mode
+ * build(deps): bump github.com/openshift/imagebuilder from 1.2.0 to 1.2.1
+ * build(deps): bump github.com/containers/ocicrypt from 1.1.0 to 1.1.1
+ * build(deps): bump github.com/containers/common from 0.35.4 to 0.36.0
+ * Fix arg missing warning in bud
+ * Check without flag in 'from --cgroup-parent' test
+ * Minor fixes to Buildah as a library tutorial documentation
+ * Add system test of 'buildah version' for packaged buildah
+ * Add a few system tests of 'buildah from'
+ * Log the final error with %+v at logging level "trace"
+ * copier: add GetOptions.NoCrossDevice
+ * Update nix pin with `make nixpkgs`
+ * Bump to v1.20.2-dev
+
+- Changelog for v1.20.1 (2021-04-13)
+ * Run container with isolation type set at 'from'
+ * bats helpers.bash - minor refactoring
+ * Bump containers/storage vendor to v1.29.0
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1
+ * Cirrus: Update VMs w/ F34beta
+ * CLI add/copy: add a --from option
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0
+ * Add authentication system tests for 'commit' and 'bud'
+ * fix local image lookup for custom platform
+ * Double-check existence of OCI runtimes
+ * Cirrus: Make use of shared get_ci_vm container
+ * Add system tests of "buildah run"
+ * Update nix pin with `make nixpkgs`
+ * Remove some stuttering on returns errors
+ * Setup alias for --tty to --terminal
+ * Add conformance tests for COPY /...
+ * Put a few more minutes on the clock for the CI conformance test
+ * Add a conformance test for COPY --from $symlink
+ * Add conformance tests for COPY ""
+ * Check for symlink in builtin volume
+ * Sort all mounts by destination directory
+ * System-test cleanup
+ * Export parse.Platform string to be used by podman-remote
+ * blobcache: fix sequencing error
+ * build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4
+ * Fix URL in demos/buildah_multi_stage.sh
+ * Add a few system tests
+ * [NO TESTS NEEDED] Use --recurse-modules when building git context
+ * Bump to v1.20.1-dev
+
+- Changelog for v1.20.0 (2021-03-25)
+ * vendor in containers/storage v1.28.1
+ * build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3
+ * tests: prefetch: use buildah, not podman, for pulls
+ * Use faster way to check image tag existence during multi-arch build
+ * Add information about multi-arch images to the Readme
+ * COPY --chown: expand the conformance test
+ * pkg/chrootuser: use a bufio.Scanner
+ * [CI:DOCS] Fix rootful typo in docs
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2
+ * Add documentation and testing for .containerignore
+ * build(deps): bump github.com/sirupsen/logrus from 1.8.0 to 1.8.1
+ * build(deps): bump github.com/hashicorp/go-multierror from 1.1.0 to 1.1.1
+ * Lookup Containerfile if user specifies a directory
+ * Add Tag format placeholder to docs
+ * copier: ignore sockets
+ * image: propagate errors from extractRootfs
+ * Remove system test of 'buildah containers -a'
+ * Clarify userns options are usable only as root in man pages
+ * Fix system test of 'containers -a'
+ * Remove duplicated code in addcopy
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1
+ * build(deps): bump github.com/onsi/gomega from 1.10.5 to 1.11.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.1 to 1.7.2
+ * Update multi-arch buildah build setup with new logic
+ * Update nix pin with `make nixpkgs`
+ * overlay.bats: fix the "overlay source permissions" test
+ * imagebuildah: use overlay for volumes when using overlay
+ * Make PolicyMap and PullPolicy names align
+ * copier: add GetOptions.IgnoreUnreadable
+ * Check local image to match system context
+ * fix: Containerfiles - smaller set of userns u/gids
+ * Set upperdir permissions based on source
+ * Shrink the vendoring size of pkc/cli
+ * Clarify image name match failure message
+ * ADD/COPY: create the destination directory first, chroot to it
+ * copier.GetOptions: add NoDerefSymLinks
+ * copier: add an Eval function
+ * Update system test for 'from --cap-add/drop'
+ * copier: fix a renaming bug
+ * copier: return child process stderr if we can't JSON decode the response
+ * Add some system tests
+ * build(deps): bump github.com/containers/storage from 1.26.0 to 1.27.0
+ * complement add/copy --chmod documentation
+ * buildah login and logout, do not need to enter user namespace
+ * Add multi-arch image build
+ * chmod/chown added/fixed in bash completions
+ * OWNERS: add @lsm5
+ * buildah add/copy --chmod dockerfile implementation
+ * bump github.com/openshift/imagebuilder from 1.1.8 to 1.2.0
+ * buildah add/copy --chmod cli implementation for files and urls
+ * Make sure we set the buildah version label
+ * Isolation strings, should match user input
+ * [CI:DOCS] buildah-from.md: remove dup arch,os
+ * build(deps): bump github.com/containers/image/v5 from 5.10.2 to 5.10.3
+ * Cirrus: Temp. disable prior-fedora (F32) testing
+ * pr-should-include-tests: recognized "renamed" tests
+ * build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.0 to 1.7.1
+ * build(deps): bump github.com/containers/common from 0.34.2 to 0.35.0
+ * Fix reaping of stages with no instructions
+ * add stale bot
+ * Add base image name to comment
+ * build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3
+ * Don't fail copy to emptydir
+ * buildah: use volatile containers
+ * vendor: update containers/storage
+ * Eliminate the use of containers/building import in pkg subdirs
+ * Add more support for removing config
+ * Improve messages about --cache-from not being supported
+ * Revert patch to allow COPY/ADD of empty dirs.
+ * Don't fail copy to emptydir
+ * Fix tutorial for rootless mode
+ * Fix caching layers with build args
+ * Vendor in containers/image v5.10.2
+ * build(deps): bump github.com/containers/common from 0.34.0 to 0.34.2
+ * build(deps): bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0
+ * 'make validate': require PRs to include tests
+ * build(deps): bump github.com/onsi/gomega from 1.10.4 to 1.10.5
+ * build(deps): bump github.com/containers/storage from 1.24.5 to 1.25.0
+ * Use chown function for U volume flag from containers/common repository
+ * --iidfile: print hash prefix
+ * bump containernetworking/cni to v0.8.1 - fix for CVE-2021-20206
+ * run: fix check for host pid namespace
+ * Finish plumbing for buildah bud --manifest
+ * buildah manifest add localimage should work
+ * Stop testing directory permissions with latest docker
+ * Fix build arg check
+ * build(deps): bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0
+ * [ci:docs] Fix man page for buildah push
+ * Update nix pin with `make nixpkgs`
+ * Bump to containers/image v5.10.1
+ * Rebuild layer if a change in ARG is detected
+ * Bump golang.org/x/crypto to the latest
+ * Add Ashley and Urvashi to Approvers
+ * local image lookup by digest
+ * Use build-arg ENV val from local environment if set
+ * Pick default OCI Runtime from containers.conf
+ * Added required devel packages
+ * Cirrus: Native OSX Build
+ * Cirrus: Two minor cleanup items
+ * Workaround for RHEL gating test failure
+ * build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0
+ * build(deps): bump github.com/mattn/go-shellwords from 1.0.10 to 1.0.11
+ * Reset upstream branch to dev version
+ * If destination does not exists, do not throw error
+
+- Changelog for v1.19.0 (2021-01-08)
+ * Update vendor of containers/storage and containers/common
+ * Buildah inspect should be able to inspect manifests
+ * Make buildah push support pushing manifests lists and digests
+ * Fix handling of TMPDIR environment variable
+ * Add support for --manifest flags
+ * Upper directory should match mode of destination directory
+ * Only grab the OS, Arch if the user actually specified them
+ * Use --arch and --os and --variant options to select architecture and os
+ * Cirrus: Track libseccomp and golang version
+ * copier.PutOptions: add an "IgnoreDevices" flag
+ * fix: `rmi --prune` when parent image is in store.
+ * build(deps): bump github.com/containers/storage from 1.24.3 to 1.24.4
+ * build(deps): bump github.com/containers/common from 0.31.1 to 0.31.2
+ * Allow users to specify stdin into containers
+ * Drop log message on failure to mount on /sys file systems to info
+ * Spelling
+ * SELinux no longer requires a tag.
+ * build(deps): bump github.com/opencontainers/selinux from 1.6.0 to 1.8.0
+ * build(deps): bump github.com/containers/common from 0.31.0 to 0.31.1
+ * Update nix pin with `make nixpkgs`
+ * Switch references of /var/run -> /run
+ * Allow FROM to be overriden with from option
+ * copier: don't assume we can chroot() on Unixy systems
+ * copier: add PutOptions.NoOverwriteDirNonDir, Get/PutOptions.Rename
+ * copier: handle replacing directories with not-directories
+ * copier: Put: skip entries with zero-length names
+ * build(deps): bump github.com/containers/storage from 1.24.2 to 1.24.3
+ * Add U volume flag to chown source volumes
+ * Turn off PRIOR_UBUNTU Test until vm is updated
+ * pkg, cli: rootless uses correct isolation
+ * build(deps): bump github.com/onsi/gomega from 1.10.3 to 1.10.4
+ * update installation doc to reflect current status
+ * Move away from using docker.io
+ * enable short-name aliasing
+ * build(deps): bump github.com/containers/storage from 1.24.1 to 1.24.2
+ * build(deps): bump github.com/containers/common from 0.30.0 to 0.31.0
+ * Throw errors when using bogus --network flags
+ * pkg/supplemented test: replace our null blobinfocache
+ * build(deps): bump github.com/containers/common from 0.29.0 to 0.30.0
+ * inserts forgotten quotation mark
+ * Not prefer use local image create/add manifest
+ * Add container information to .containerenv
+ * Add --ignorefile flag to use alternate .dockerignore flags
+ * Add a source debug build
+ * Fix crash on invalid filter commands
+ * build(deps): bump github.com/containers/common from 0.27.0 to 0.29.0
+ * Switch to using containers/common pkg's
+ * fix: non-portable shebang #2812
+ * Remove copy/paste errors that leaked `Podman` into man pages.
+ * Add suggests cpp to spec file
+ * Apply suggestions from code review
+ * update docs for debian testing and unstable
+ * imagebuildah: disable pseudo-terminals for RUN
+ * Compute diffID for mapped-layer at creating image source
+ * intermediateImageExists: ignore images whose history we can't read
+ * Bump to v1.19.0-dev
+ * build(deps): bump github.com/containers/common from 0.26.3 to 0.27.0
+
+- Changelog for v1.18.0 (2020-11-16)
+ * Fix testing error caused by simultanious merge
+ * Vendor in containers/storage v1.24.0
+ * short-names aliasing
+ * Add --policy flag to buildah pull
+ * Stop overwrapping and stuttering
+ * copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ * Run: don't forcibly disable UTS namespaces in rootless mode
+ * test: ensure non-directory in a Dockerfile path is handled correctly
+ * Add a few tests for `pull` command
+ * Fix buildah config --cmd to handle array
+ * build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ * Fix NPE when Dockerfile path contains non-directory entries
+ * Update buildah bud man page from podman build man page
+ * Move declaration of decryption-keys to common cli
+ * Run: correctly call copier.Mkdir
+ * util: digging UID/GID out of os.FileInfo should work on Unix
+ * imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ * Verify userns-uid-map and userns-gid-map input
+ * Use CPP, CC and flags in dep check scripts
+ * Avoid overriding LDFLAGS in Makefile
+ * ADD: handle --chown on URLs
+ * Update nix pin with `make nixpkgs`
+ * (*Builder).Run: MkdirAll: handle EEXIST error
+ * copier: try to force loading of nsswitch modules before chroot()
+ * fix MkdirAll usage
+ * build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ * build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ * Use osusergo build tag for static build
+ * imagebuildah: cache should take image format into account
+ * Bump to v1.18.0-dev
+
+- Changelog for v1.17.0 (2020-10-29)
+ * Handle cases where other tools mount/unmount containers
+ * overlay.MountReadOnly: support RO overlay mounts
+ * overlay: use fusermount for rootless umounts
+ * overlay: fix umount
+ * Switch default log level of Buildah to Warn. Users need to see these messages
+ * Drop error messages about OCI/Docker format to Warning level
+ * build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2
+ * tests/testreport: adjust for API break in storage v1.23.6
+ * build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6
+ * copier: put: ignore Typeflag="g"
+ * Use curl to get repo file (fix #2714)
+ * build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0
+ * build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1
+ * Remove docs that refer to bors, since we're not using it
+ * Buildah bud should not use stdin by default
+ * bump containerd, docker, and golang.org/x/sys
+ * Makefile: cross: remove windows.386 target
+ * copier.copierHandlerPut: don't check length when there are errors
+ * Stop excessive wrapping
+ * CI: require that conformance tests pass
+ * bump(github.com/openshift/imagebuilder) to v1.1.8
+ * Skip tlsVerify insecure BUILD_REGISTRY_SOURCES
+ * Fix build path wrong https://github.com/containers/podman/issues/7993
+ * refactor pullpolicy to avoid deps
+ * build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0
+ * CI: run gating tasks with a lot more memory
+ * ADD and COPY: descend into excluded directories, sometimes
+ * copier: add more context to a couple of error messages
+ * copier: check an error earlier
+ * copier: log stderr output as debug on success
+ * Update nix pin with `make nixpkgs`
+ * Set directory ownership when copied with ID mapping
+ * build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
+ * build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0
+ * Cirrus: Remove bors artifacts
+ * Sort build flag definitions alphabetically
+ * ADD: only expand archives at the right time
+ * Remove configuration for bors
+ * Shell Completion for podman build flags
+ * Bump c/common to v0.24.0
+ * New CI check: xref --help vs man pages
+ * CI: re-enable several linters
+ * Move --userns-uid-map/--userns-gid-map description into buildah man page
+ * add: preserve ownerships and permissions on ADDed archives
+ * Makefile: tweak the cross-compile target
+ * Bump containers/common to v0.23.0
+ * chroot: create bind mount targets 0755 instead of 0700
+ * Change call to Split() to safer SplitN()
+ * chroot: fix handling of errno seccomp rules
+ * build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
+ * Add In Progress section to contributing
+ * integration tests: make sure tests run in ${topdir}/tests
+ * Run(): ignore containers.conf's environment configuration
+ * Warn when setting healthcheck in OCI format
+ * Cirrus: Skip git-validate on branches
+ * tools: update git-validation to the latest commit
+ * tools: update golangci-lint to v1.18.0
+ * Add a few tests of push command
+ * Add(): fix handling of relative paths with no ContextDir
+ * build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0
+ * Lint: Use same linters as podman
+ * Validate: reference HEAD
+ * Fix buildah mount to display container names not ids
+ * Update nix pin with `make nixpkgs`
+ * Add missing --format option in buildah from man page
+ * Fix up code based on codespell
+ * build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7
+ * build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5
+ * Improve buildah completions
+ * Cirrus: Fix validate commit epoch
+ * Fix bash completion of manifest flags
+ * Uniform some man pages
+ * Update Buildah Tutorial to address BZ1867426
+ * Update bash completion of `manifest add` sub command
+ * copier.Get(): hard link targets shouldn't be relative paths
+ * build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2
+ * Pass timestamp down to history lines
+ * Timestamp gets updated everytime you inspect an image
+ * bud.bats: use absolute paths in newly-added tests
+ * contrib/cirrus/lib.sh: don't use CN for the hostname
+ * tests: Add some tests
+ * Update `manifest add` man page
+ * Extend flags of `manifest add`
+ * build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4
+ * build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1
+ * Bump to v1.17.0-dev
+ * CI: expand cross-compile checks
+
+- Changelog for v1.16.0 (2020-09-03)
+ * fix build on 32bit arches
+ * containerImageRef.NewImageSource(): don't always force timestamps
+ * Add fuse module warning to image readme
+ * Heed our retry delay option values when retrying commit/pull/push
+ * Switch to containers/common for seccomp
+ * Use --timestamp rather then --omit-timestamp
+ * docs: remove outdated notice
+ * docs: remove outdated notice
+ * build-using-dockerfile: add a hidden --log-rusage flag
+ * build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
+ * Discard ReportWriter if user sets options.Quiet
+ * build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
+ * Fix ownership of content copied using COPY --from
+ * newTarDigester: zero out timestamps in tar headers
+ * Update nix pin with `make nixpkgs`
+ * bud.bats: correct .dockerignore integration tests
+ * Use pipes for copying
+ * run: include stdout in error message
+ * run: use the correct error for errors.Wrapf
+ * copier: un-export internal types
+ * copier: add Mkdir()
+ * in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
+ * docs/buildah-commit.md: tweak some wording, add a --rm example
+ * imagebuildah: don’t blank out destination names when COPYing
+ * Replace retry functions with common/pkg/retry
+ * StageExecutor.historyMatches: compare timestamps using .Equal
+ * Update vendor of containers/common
+ * Fix errors found in coverity scan
+ * Change namespace handling flags to better match podman commands
+ * conformance testing: ignore buildah.BuilderIdentityAnnotation labels
+ * Vendor in containers/storage v1.23.0
+ * Add buildah.IsContainer interface
+ * Avoid feeding run_buildah to pipe
+ * fix(buildahimage): add xz dependency in buildah image
+ * Bump github.com/containers/common from 0.15.2 to 0.18.0
+ * Howto for rootless image building from OpenShift
+ * Add --omit-timestamp flag to buildah bud
+ * Update nix pin with `make nixpkgs`
+ * Shutdown storage on failures
+ * Handle COPY --from when an argument is used
+ * Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
+ * Cirrus: Use newly built VM images
+ * Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
+ * Enhance the .dockerignore man pages
+ * conformance: add a test for COPY from subdirectory
+ * fix bug manifest inspct
+ * Add documentation for .dockerignore
+ * Add BuilderIdentityAnnotation to identify buildah version
+ * DOC: Add quay.io/containers/buildah image to README.md
+ * Update buildahimages readme
+ * fix spelling mistake in "info" command result display
+ * Don't bind /etc/host and /etc/resolv.conf if network is not present
+ * blobcache: avoid an unnecessary NewImage()
+ * Build static binary with `buildGoModule`
+ * copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
+ * tarFilterer: handle multiple archives
+ * Fix a race we hit during conformance tests
+ * Rework conformance testing
+ * Update 02-registries-repositories.md
+ * test-unit: invoke cmd/buildah tests with --flags
+ * parse: fix a type mismatch in a test
+ * Fix compilation of tests/testreport/testreport
+ * build.sh: log the version of Go that we're using
+ * test-unit: increase the test timeout to 40/45 minutes
+ * Add the "copier" package
+ * Fix & add notes regarding problematic language in codebase
+ * Add dependency on github.com/stretchr/testify/require
+ * CompositeDigester: add the ability to filter tar streams
+ * BATS tests: make more robust
+ * vendor golang.org/x/text@v0.3.3
+ * Switch golang 1.12 to golang 1.13
+ * imagebuildah: wait for stages that might not have even started yet
+ * chroot, run: not fail on bind mounts from /sys
+ * chroot: do not use setgroups if it is blocked
+ * Set engine env from containers.conf
+ * imagebuildah: return the right stage's image as the "final" image
+ * Fix a help string
+ * Deduplicate environment variables
+ * switch containers/libpod to containers/podman
+ * Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
+ * Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
+ * Mask out /sys/dev to prevent information leak
+ * linux: skip errors from the runtime kill
+ * Mask over the /sys/fs/selinux in mask branch
+ * Add VFS additional image store to container
+ * tests: add auth tests
+ * Allow "readonly" as alias to "ro" in mount options
+ * Ignore OS X specific consistency mount option
+ * Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
+ * Bump github.com/containers/common from 0.14.0 to 0.15.2
+ * Rootless Buildah should default to IsolationOCIRootless
+ * imagebuildah: fix inheriting multi-stage builds
+ * Make imagebuildah.BuildOptions.Architecture/OS optional
+ * Make imagebuildah.BuildOptions.Jobs optional
+ * Resolve a possible race in imagebuildah.Executor.startStage()
+ * Switch scripts to use containers.conf
+ * Bump openshift/imagebuilder to v1.1.6
+ * Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
+ * buildah, bud: support --jobs=N for parallel execution
+ * executor: refactor build code inside new function
+ * Add bud regression tests
+ * Cirrus: Fix missing htpasswd in registry img
+ * docs: clarify the 'triples' format
+ * CHANGELOG.md: Fix markdown formatting
+ * Add nix derivation for static builds
+ * Bump to v1.16.0-dev
+ * add version centos7 for compatible
+
+- Changelog for v1.15.0 (2020-06-17)
+ * Bump github.com/containers/common from 0.12.0 to 0.13.1
+ * Bump github.com/containers/storage from 1.20.1 to 1.20.2
+ * Bump github.com/seccomp/containers-golang from 0.4.1 to 0.5.0
+ * Bump github.com/stretchr/testify from 1.6.0 to 1.6.1
+ * Bump github.com/opencontainers/runc from 1.0.0-rc9 to 1.0.0-rc90
+ * Add CVE-2020-10696 to CHANGELOG.md and changelog.txt
+ * Bump github.com/stretchr/testify from 1.5.1 to 1.6.0
+ * Bump github.com/onsi/ginkgo from 1.12.2 to 1.12.3
+ * Vendor in containers/common v0.12.0
+ * fix lighttpd example
+ * Vendor in new go.etcd.io/bbolt
+ * Bump github.com/onsi/ginkgo from 1.12.1 to 1.12.2
+ * Bump imagebuilder for ARG fix
+ * Bump github.com/containers/common from 0.11.2 to 0.11.4
+ * remove dependency on openshift struct
+ * Warn on unset build arguments
+ * vendor: update seccomp/containers-golang to v0.4.1
+ * Ammended docs
+ * Updated docs
+ * clean up comments
+ * update exit code for tests
+ * Implement commit for encryption
+ * implementation of encrypt/decrypt push/pull/bud/from
+ * fix resolve docker image name as transport
+ * Bump github.com/opencontainers/go-digest from 1.0.0-rc1 to 1.0.0
+ * Bump github.com/onsi/ginkgo from 1.12.0 to 1.12.1
+ * Bump github.com/containers/storage from 1.19.1 to 1.19.2
+ * Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4
+ * Add preliminary profiling support to the CLI
+ * Bump github.com/containers/common from 0.10.0 to 0.11.2
+ * Evaluate symlinks in build context directory
+ * fix error info about get signatures for containerImageSource
+ * Add Security Policy
+ * Cirrus: Fixes from review feedback
+ * Bump github.com/containers/storage from 1.19.0 to 1.19.1
+ * Bump github.com/sirupsen/logrus from 1.5.0 to 1.6.0
+ * imagebuildah: stages shouldn't count as their base images
+ * Update containers/common v0.10.0
+ * Bump github.com/fsouza/go-dockerclient from 1.6.4 to 1.6.5
+ * Add registry to buildahimage Dockerfiles
+ * Cirrus: Use pre-installed VM packages + F32
+ * Cirrus: Re-enable all distro versions
+ * Cirrus: Update to F31 + Use cache images
+ * golangci-lint: Disable gosimple
+ * Lower number of golangci-lint threads
+ * Fix permissions on containers.conf
+ * Don't force tests to use runc
+ * Bump github.com/containers/common from 0.9.1 to 0.9.5
+ * Return exit code from failed containers
+ * Bump github.com/containers/storage from 1.18.2 to 1.19.0
+ * Bump github.com/containers/common from 0.9.0 to 0.9.1
+ * cgroup_manager should be under [engine]
+ * Use c/common/pkg/auth in login/logout
+ * Cirrus: Temporarily disable Ubuntu 19 testing
+ * Add containers.conf to stablebyhand build
+ * Update gitignore to exclude test Dockerfiles
+ * Bump github.com/fsouza/go-dockerclient from 1.6.3 to 1.6.4
+ * Bump github.com/containers/common from 0.8.1 to 0.9.0
+ * Bump back to v1.15.0-dev
+ * Remove warning for systemd inside of container
+
+- Changelog for v1.14.8 (2020-04-09)
+ * Run (make vendor)
+ * Run (make -C tests/tools vendor)
+ * Run (go mod tidy) before (go mod vendor) again
+ * Fix (make vendor)
+ * Bump validation
+ * Bump back to v1.15.0-dev
+
+- Changelog for v1.14.7 (2020-04-07)
+ * Bump github.com/containers/image/v5 from 5.3.1 to 5.4.3
+ * make vendor: run `tidy` after `vendor`
+ * Do not skip the directory when the ignore pattern matches
+ * Bump github.com/containers/common from 0.7.0 to 0.8.1
+ * Downgrade siruspen/logrus from 1.4.2
+ * Fix errorf conventions
+ * dockerignore tests : remove symlinks, rework
+ * Bump back to v1.15.0-dev
+
+- Changelog for v1.14.6 (2020-04-02)
+ * bud.bats - cleanup, refactoring
+ * vendor in latest containers/storage 1.18.0 and containers/common v0.7.0
+ * Bump github.com/spf13/cobra from 0.0.6 to 0.0.7
+ * Bump github.com/containers/storage from 1.16.5 to 1.17.0
+ * Bump github.com/containers/image/v5 from 5.2.1 to 5.3.1
+ * Fix Amazon install step
+ * Bump back to v1.15.0-dev
+ * Fix bud-build-arg-cache test
+ * Make image history work correctly with new args handling
+ * Don't add args to the RUN environment from the Builder
+ * Update github.com/openshift/imagebuilder to v1.1.4
+ * Add .swp files to .gitignore
+
+- Changelog for v1.14.5 (2020-03-26)
+ * revert #2246 FIPS mode change
+ * Bump back to v1.15.0-dev
+ * image with dup layers: we now have one on quay
+ * digest test : make more robust
+
+- Changelog for v1.14.4 (2020-03-25)
+ * Fix fips-mode check for RHEL8 boxes
+ * Fix potential CVE in tarfile w/ symlink (Edit 02-Jun-2020: Addresses CVE-2020-10696)
+ * Fix .dockerignore with globs and ! commands
+ * update install steps for Amazon Linux 2
+ * Bump github.com/openshift/imagebuilder from 1.1.2 to 1.1.3
+ * Add comment for RUN command in volume ownership test
+ * Run stat command directly for volume ownership test
+ * vendor in containers/common v0.6.1
+ * Cleanup go.sum
+ * Bump back to v1.15.0-dev
+
+- Changelog for v1.14.3 (2020-03-17)
+ * Update containers/storage to v1.16.5
+ * Bump github.com/containers/storage from 1.16.2 to 1.16.4
+ * Bump github.com/openshift/imagebuilder from 1.1.1 to 1.1.2
+ * Update github.com/openshift/imagebuilder vendoring
+ * Update unshare man page to fix script example
+ * Fix compilation errors on non linux platforms
+ * Bump containers/common and opencontainers/selinux versions
+ * Add tests for volume ownership
+ * Preserve volume uid and gid through subsequent commands
+ * Fix FORWARD_NULL errors found by Coverity
+ * Bump github.com/containers/storage from 1.16.1 to 1.16.2
+ * Fix errors found by codespell
+ * Bump back to v1.15.0-dev
+ * Add Pull Request Template
+
+- Changelog for v1.14.2 (2020-03-03)
+ * Add Buildah pull request template
+ * Bump to containers/storage v1.16.1
+ * run_linux: fix tight loop if file is not pollable
+ * Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3
+ * Bump github.com/containers/common from 0.4.1 to 0.4.2
+ * Bump back to v1.15.0-dev
+ * Add Containerfile to build a versioned stable image on quay.io
+
+- Changelog for v1.14.1 (2020-02-27)
+ * Search for local runtime per values in containers.conf
+ * Set correct ownership on working directory
+ * BATS : in teardown, umount stale mounts
+ * Bump github.com/spf13/cobra from 0.0.5 to 0.0.6
+ * Bump github.com/fsouza/go-dockerclient from 1.6.1 to 1.6.3
+ * Bump github.com/stretchr/testify from 1.4.0 to 1.5.1
+ * Replace unix with syscall to allow vendoring into libpod
+ * Update to containers/common v0.4.1
+ * Improve remote manifest retrieval
+ * Fix minor spelling errors in containertools README
+ * Clear the right variable in buildahimage
+ * Correct a couple of incorrect format specifiers
+ * Update to containers/common v0.3.0
+ * manifest push --format: force an image type, not a list type
+ * run: adjust the order in which elements are added to $PATH
+ * getDateAndDigestAndSize(): handle creation time not being set
+ * Bump github.com/containers/common from 0.2.0 to 0.2.1
+ * include installation steps for CentOS 8 and Stream
+ * include installation steps for CentOS7 and forks
+ * Adjust Ubuntu install info to also work on Pop!_OS
+ * Make the commit id clear like Docker
+ * Show error on copied file above context directory in build
+ * Bump github.com/containers/image/v5 from 5.2.0 to 5.2.1
+ * pull/from/commit/push: retry on most failures
+ * Makefile: fix install.cni.sudo
+ * Repair buildah so it can use containers.conf on the server side
+ * Bump github.com/mattn/go-shellwords from 1.0.9 to 1.0.10
+ * Bump github.com/fsouza/go-dockerclient from 1.6.0 to 1.6.1
+ * Fixing formatting & build instructions
+ * Add Code of Conduct
+ * Bors: Fix no. req. github reviews
+ * Cirrus+Bors: Simplify temp branch skipping
+ * Bors-ng: Add documentation and status-icon
+ * Bump github.com/onsi/ginkgo from 1.11.0 to 1.12.0
+ * fix XDG_RUNTIME_DIR for authfile
+ * Cirrus: Disable F29 testing
+ * Cirrus: Add jq package
+ * Cirrus: Fix lint + validation using wrong epoch
+ * Stop using fedorproject registry
+ * Bors: Workaround ineffective required statuses
+ * Bors: Enable app + Disable Travis
+ * Cirrus: Add standardized log-collection
+ * Cirrus: Improve automated lint + validation
+ * Allow passing options to golangci-lint
+ * Cirrus: Fixes from review feedback
+ * Cirrus: Temporarily ignore VM testing failures
+ * Cirrus: Migrate off papr + implement VM testing
+ * Cirrus: Update packages + fixes for get_ci_vm.sh
+ * Show validation command-line
+ * Skip overlay test w/ vfs driver
+ * use alpine, not centos, for various tests
+ * Flake handling: cache and prefetch images
+ * Bump to v1.15.0-dev
+
+- Changelog for v1.14.0 (2020-02-05)
+ * bump github.com/mtrmac/gpgme
+ * Update containers/common to v0.1.4
+ * manifest push: add --format option
+ * Bump github.com/onsi/gomega from 1.8.1 to 1.9.0
+ * vendor github.com/containers/image/v5@v5.2.0
+ * info test: deal with random key order
+ * Bump back to v1.14.0-dev
+
+- Changelog for v1.13.2 (2020-01-29)
+ * sign.bats: set GPG_TTY=/dev/null
+ * Fix parse_unsupported.go
+ * getDateAndDigestAndSize(): use manifest.Digest
+ * Bump github.com/opencontainers/selinux from 1.3.0 to 1.3.1
+ * Bump github.com/containers/common from 0.1.0 to 0.1.2
+ * Touch up os/arch doc
+ * chroot: handle slightly broken seccomp defaults
+ * buildahimage: specify fuse-overlayfs mount options
+ * Bump github.com/mattn/go-shellwords from 1.0.7 to 1.0.9
+ * copy.bats: make sure we detect failures due to missing source
+ * parse: don't complain about not being able to rename something to itself
+ * Makefile: use a $(GO_TEST) macro, fix a typo
+ * manifests: unit test fix
+ * Fix build for 32bit platforms
+ * Allow users to set OS and architecture on bud
+ * Fix COPY in containerfile with envvar
+ * Bump c/storage to v1.15.7
+ * add --sign-by to bud/commit/push, --remove-signatures for pull/push
+ * Remove cut/paste error in CHANGELOG.md
+ * Update vendor of containers/common to v0.1.0
+ * update install instructions for Debian, Raspbian and Ubuntu
+ * Add support for containers.conf
+ * Bump back to v1.14.0-dev
+
+- Changelog for v1.13.1 (2020-01-14)
+ * Bump github.com/containers/common from 0.0.5 to 0.0.7
+ * Bump github.com/onsi/ginkgo from 1.10.3 to 1.11.0
+ * Bump github.com/pkg/errors from 0.8.1 to 0.9.0
+ * Bump github.com/onsi/gomega from 1.7.1 to 1.8.1
+ * Add codespell support
+ * copyFileWithTar: close source files at the right time
+ * copy: don't digest files that we ignore
+ * Check for .dockerignore specifically
+ * Travis: rm go 1.12.x
+ * Don't setup excludes, if their is only one pattern to match
+ * set HOME env to /root on chroot-isolation by default
+ * docs: fix references to containers-*.5
+ * update openshift/api
+ * fix bug Add check .dockerignore COPY file
+ * buildah bud --volume: run from tmpdir, not source dir
+ * Fix imageNamePrefix to give consistent names in buildah-from
+ * cpp: use -traditional and -undef flags
+ * Fix image reference in tutorial 4
+ * discard outputs coming from onbuild command on buildah-from --quiet
+ * make --format columnizing consistent with buildah images
+ * Bump to v1.14.0-dev
+
+- Changelog for v1.13.0 (2019-12-27)
+ * Bump to c/storage v1.15.5
+ * Update container/storage to v1.15.4
+ * Fix option handling for volumes in build
+ * Rework overlay pkg for use with libpod
+ * Fix buildahimage builds for buildah
+ * Add support for FIPS-Mode backends
+ * Set the TMPDIR for pulling/pushing image to $TMPDIR
+ * WIP: safer test for pull --all-tags
+ * BATS major cleanup: blobcache.bats: refactor
+ * BATS major cleanup: part 4: manual stuff
+ * BATS major cleanup, step 3: yet more run_buildah
+ * BATS major cleanup, part 2: use more run_buildah
+ * BATS major cleanup, part 1: log-level
+ * Bump github.com/containers/image/v5 from 5.0.0 to 5.1.0
+ * Bump github.com/containers/common from 0.0.3 to 0.0.5
+ * Bump to v1.13.0-dev
+
+- Changelog for v1.12.0 (2019-12-13)
+ * Allow ADD to use http src
+ * Bump to c/storage v.1.15.3
+ * install.md: update golang dependency
+ * imgtype: reset storage opts if driver overridden
+ * Start using containers/common
+ * overlay.bats typo: fuse-overlays should be fuse-overlayfs
+ * chroot: Unmount with MNT_DETACH instead of UnmountMountpoints()
+ * bind: don't complain about missing mountpoints
+ * imgtype: check earlier for expected manifest type
+ * Vendor containers/storage fix
+ * Vendor containers/storage v1.15.1
+ * Add history names support
+ * PR takeover of #1966
+ * Tests: Add inspect test check steps
+ * Tests: Add container name and id check in containers test steps
+ * Test: Get permission in add test
+ * Tests: Add a test for tag by id
+ * Tests: Add test cases for push test
+ * Tests: Add image digest test
+ * Tests: Add some buildah from tests
+ * Tests: Add two commit test
+ * Tests: Add buildah bud with --quiet test
+ * Tests: Add two test for buildah add
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.6 (2019-12-03)
+ * Handle missing equal sign in --from and --chown flags for COPY/ADD
+ * bud COPY does not download URL
+ * Bump github.com/onsi/gomega from 1.7.0 to 1.7.1
+ * Fix .dockerignore exclude regression
+ * Ran buildah through codespell
+ * commit(docker): always set ContainerID and ContainerConfig
+ * Touch up commit man page image parameter
+ * Add builder identity annotations.
+ * info: use util.Runtime()
+ * Bump github.com/onsi/ginkgo from 1.10.2 to 1.10.3
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.5 (2019-11-11)
+ * Enhance error on unsafe symbolic link targets
+ * Add OCIRuntime to info
+ * Check nonexsit authfile
+ * Only output image id if running buildah bud --quiet
+ * Fix --pull=true||false and add --pull-never to bud and from (retry)
+ * cgroups v2: tweak or skip tests
+ * Prepwork: new 'skip' helpers for tests
+ * Handle configuration blobs for manifest lists
+ * unmarshalConvertedConfig: avoid using the updated image's ref
+ * Add completions for Manifest commands
+ * Add disableFips option to secrets pkg
+ * Update bud.bats test archive test
+ * Add test for caching based on content digest
+ * Builder.untarPath(): always evaluate b.ContentDigester.Hash()
+ * Bump github.com/onsi/ginkgo from 1.10.1 to 1.10.2
+ * Fix another broken test: copy-url-mtime
+ * yet more fixes
+ * Actual bug fix for 'add' test: fix the expected mode
+ * BATS tests - lots of mostly minor cleanup
+ * build: drop support for ostree
+ * Add support for make vendor-in-container
+ * imgtype: exit with error if storage fails
+ * remove XDG_RUNTIME_DIR from default authfile path
+ * fix troubleshooting redirect instructions
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.4 (2019-10-28)
+ * buildah: add a "manifest" command
+ * manifests: add the module
+ * pkg/supplemented: add a package for grouping images together
+ * pkg/manifests: add a manifest list build/manipulation API
+ * Update for ErrUnauthorizedForCredentials API change in containers/image
+ * Update for manifest-lists API changes in containers/image
+ * version: also note the version of containers/image
+ * Move to containers/image v5.0.0
+ * Enable --device directory as src device
+ * Fix git build with branch specified
+ * Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1
+ * Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0
+ * Add clarification to the Tutorial for new users
+ * Silence "using cache" to ensure -q is fully quiet
+ * Add OWNERS File to Buildah
+ * Bump github.com/containers/storage from 1.13.4 to 1.13.5
+ * Move runtime flag to bud from common
+ * Commit: check for storage.ErrImageUnknown using errors.Cause()
+ * Fix crash when invalid COPY --from flag is specified.
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.3 (2019-10-04)
+ * Update c/image to v4.0.1
+ * Bump github.com/spf13/pflag from 1.0.3 to 1.0.5
+ * Fix --build-args handling
+ * Bump github.com/spf13/cobra from 0.0.3 to 0.0.5
+ * Bump github.com/cyphar/filepath-securejoin from 0.2.1 to 0.2.2
+ * Bump github.com/onsi/ginkgo from 1.8.0 to 1.10.1
+ * Bump github.com/fsouza/go-dockerclient from 1.3.0 to 1.4.4
+ * Add support for retrieving context from stdin "-"
+ * Ensure bud remote context cleans up on error
+ * info: add cgroups2
+ * Bump github.com/seccomp/libseccomp-golang from 0.9.0 to 0.9.1
+ * Bump github.com/mattn/go-shellwords from 1.0.5 to 1.0.6
+ * Bump github.com/stretchr/testify from 1.3.0 to 1.4.0
+ * Bump github.com/opencontainers/selinux from 1.2.2 to 1.3.0
+ * Bump github.com/etcd-io/bbolt from 1.3.2 to 1.3.3
+ * Bump github.com/onsi/gomega from 1.5.0 to 1.7.0
+ * update c/storage to v1.13.4
+ * Print build 'STEP' line to stdout, not stderr
+ * Fix travis-ci on forks
+ * Vendor c/storage v1.13.3
+ * Use Containerfile by default
+ * Added tutorial on how to include Buildah as library
+ * util/util: Fix "configuraitno" -> "configuration" log typo
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.2 (2019-09-13)
+ * Add some cleanup code
+ * Move devices code to unit specific directory.
+ * Bump back to v1.12.0-dev
+
+- Changelog for v1.11.1 (2019-09-11)
+ * Add --devices flag to bud and from
+ * Downgrade .papr to highest atomic verion
+ * Add support for /run/.containerenv
+ * Truncate output of too long image names
+ * Preserve file and directory mount permissions
+ * Bump fedora version from 28 to 30
+ * makeImageRef: ignore EmptyLayer if Squash is set
+ * Set TMPDIR to /var/tmp by default
+ * replace --debug=false with --log-level=error
+ * Allow mounts.conf entries for equal source and destination paths
+ * fix label and annotation for 1-line Dockerfiles
+ * Enable interfacer linter and fix lints
+ * install.md: mention goproxy
+ * Makefile: use go proxy
+ * Bump to v1.12.0-dev
+
+- Changelog for v1.11.0 (2019-08-29)
+ * tests/bud.bats: add --signature-policy to some tests
+ * Vendor github.com/openshift/api
+ * pull/commit/push: pay attention to $BUILD_REGISTRY_SOURCES
+ * Add `--log-level` command line option and deprecate `--debug`
+ * add support for cgroupsV2
+ * Correctly detect ExitError values from Run()
+ * Disable empty logrus timestamps to reduce logger noise
+ * Remove outdated deps Makefile target
+ * Remove gofmt.sh in favor of golangci-lint
+ * Remove govet.sh in favor of golangci-lint
+ * Allow to override build date with SOURCE_DATE_EPOCH
+ * Update shebangs to take env into consideration
+ * Fix directory pull image names
+ * Add --digestfile and Re-add push statement as debug
+ * README: mention that Podman uses Buildah's API
+ * Use content digests in ADD/COPY history entries
+ * add: add a DryRun flag to AddAndCopyOptions
+ * Fix possible runtime panic on bud
+ * Add security-related volume options to validator
+ * use correct path for ginkgo
+ * Add bud 'without arguments' integration tests
+ * Update documentation about bud
+ * add: handle hard links when copying with .dockerignore
+ * add: teach copyFileWithTar() about symlinks and directories
+ * Allow buildah bud to be called without arguments
+ * imagebuilder: fix detection of referenced stage roots
+ * Touch up go mod instructions in install
+ * run_linux: fix mounting /sys in a userns
+ * Vendor Storage v1.13.2
+ * Cirrus: Update VM images
+ * Fix handling of /dev/null masked devices
+ * Update `bud`/`from` help to contain indicator for `--dns=none`
+ * Bump back to v1.11.0-dev
+
+- Changelog for v1.10.1 (2019-08-08)
+ * Bump containers/image to v3.0.2 to fix keyring issue
+ * Bug fix for volume minus syntax
+ * Bump container/storage v1.13.1 and containers/image v3.0.1
+ * bump github.com/containernetworking/cni to v0.7.1
+ * Add overlayfs to fuse-overlayfs tip
+ * Add automatic apparmor tag discovery
+ * Fix bug whereby --get-login has no effect
+ * Bump to v1.11.0-dev
+
+- Changelog for v1.10.0 (2019-08-02)
+ * vendor github.com/containers/image@v3.0.0
+ * Remove GO111MODULE in favor of `-mod=vendor`
+ * Vendor in containers/storage v1.12.16
+ * Add '-' minus syntax for removal of config values
+ * tests: enable overlay tests for rootless
+ * rootless, overlay: use fuse-overlayfs
+ * vendor github.com/containers/image@v2.0.1
+ * Added '-' syntax to remove volume config option
+ * delete `successfully pushed` message
+ * Add golint linter and apply fixes
+ * vendor github.com/containers/storage@v1.12.15
+ * Change wait to sleep in buildahimage readme
+ * Handle ReadOnly images when deleting images
+ * Add support for listing read/only images
+
+- Changelog for v1.9.2 (2019-07-19)
+ * from/import: record the base image's digest, if it has one
+ * Fix CNI version retrieval to not require network connection
+ * Add misspell linter and apply fixes
+ * Add goimports linter and apply fixes
+ * Add stylecheck linter and apply fixes
+ * Add unconvert linter and apply fixes
+ * image: make sure we don't try to use zstd compression
+ * run.bats: skip the "z" flag when testing --mount
+ * Update to runc v1.0.0-rc8
+ * Update to match updated runtime-tools API
+ * bump github.com/opencontainers/runtime-tools to v0.9.0
+ * Build e2e tests using the proper build tags
+ * Add unparam linter and apply fixes
+ * Run: correct a typo in the --cap-add help text
+ * unshare: add a --mount flag
+ * fix push check image name is not empty
+ * Bump to v1.9.2-dev
+
+- Changelog for v1.9.1 (2019-07-12)
+ * add: fix slow copy with no excludes
+ * Add errcheck linter and fix missing error check
+ * Improve tests/tools/Makefile parallelism and abstraction
+ * Fix response body not closed resource leak
+ * Switch to golangci-lint
+ * Add gomod instructions and mailing list links
+ * On Masked path, check if /dev/null already mounted before mounting
+ * Update to containers/storage v1.12.13
+ * Refactor code in package imagebuildah
+ * Add rootless podman with NFS issue in documentation
+ * Add --mount for buildah run
+ * import method ValidateVolumeOpts from libpod
+ * Fix typo
+ * Makefile: set GO111MODULE=off
+ * rootless: add the built-in slirp DNS server
+ * Update docker/libnetwork to get rid of outdated sctp package
+ * Update buildah-login.md
+ * migrate to go modules
+ * install.md: mention go modules
+ * tests/tools: go module for test binaries
+ * fix --volume splits comma delimited option
+ * Add bud test for RUN with a priv'd command
+ * vendor logrus v1.4.2
+ * pkg/cli: panic when flags can't be hidden
+ * pkg/unshare: check all errors
+ * pull: check error during report write
+ * run_linux.go: ignore unchecked errors
+ * conformance test: catch copy error
+ * chroot/run_test.go: export funcs to actually be executed
+ * tests/imgtype: ignore error when shutting down the store
+ * testreport: check json error
+ * bind/util.go: remove unused func
+ * rm chroot/util.go
+ * imagebuildah: remove unused `dedupeStringSlice`
+ * StageExecutor: EnsureContainerPath: catch error from SecureJoin()
+ * imagebuildah/build.go: return <expr> instead of branching
+ * rmi: avoid redundant branching
+ * conformance tests: nilness: allocate map
+ * imagebuildah/build.go: avoid redundant `filepath.Join()`
+ * imagebuildah/build.go: avoid redundant `os.Stat()`
+ * imagebuildah: omit comparison to bool
+ * fix "ineffectual assignment" lint errors
+ * docker: ignore "repeats json tag" lint error
+ * pkg/unshare: use `...` instead of iterating a slice
+ * conformance: bud test: use raw strings for regexes
+ * conformance suite: remove unused func/var
+ * buildah test suite: remove unused vars/funcs
+ * testreport: fix golangci-lint errors
+ * util: remove redundant `return` statement
+ * chroot: only log clean-up errors
+ * images_test: ignore golangci-lint error
+ * blobcache: log error when draining the pipe
+ * imagebuildah: check errors in deferred calls
+ * chroot: fix error handling in deferred funcs
+ * cmd: check all errors
+ * chroot/run_test.go: check errors
+ * chroot/run.go: check errors in deferred calls
+ * imagebuildah.Executor: remove unused onbuild field
+ * docker/types.go: remove unused struct fields
+ * util: use strings.ContainsRune instead of index check
+ * Cirrus: Initial implementation
+ * Bump to v1.9.1-dev
+
+- Changelog for v1.9.0 (2019-06-15)
+ * buildah-run: fix-out-of-range panic (2)
+ * Bump back to v1.9.0-dev
+
+- Changelog for v1.8.4 (2019-06-13)
+ Update containers/image to v2.0.0
+ run: fix hang with run and --isolation=chroot
+ run: fix hang when using run
+ chroot: drop unused function call
+ remove --> before imgageID on build
+ Always close stdin pipe
+ Write deny to setgroups when doing single user mapping
+ Avoid including linux/memfd.h
+ Add a test for the symlink pointing to a directory
+ Add missing continue
+ Fix the handling of symlinks to absolute paths
+ Only set default network sysctls if not rootless
+ Support --dns=none like podman
+ fix bug --cpu-shares parsing typo
+ Fix validate complaint
+ Update vendor on containers/storage to v1.12.10
+ Create directory paths for COPY thereby ensuring correct perms
+ imagebuildah: use a stable sort for comparing build args
+ imagebuildah: tighten up cache checking
+ bud.bats: add a test verying the order of --build-args
+ add -t to podman run
+ imagebuildah: simplify screening by top layers
+ imagebuildah: handle ID mappings for COPY --from
+ imagebuildah: apply additionalTags ourselves
+ bud.bats: test additional tags with cached images
+ bud.bats: add a test for WORKDIR and COPY with absolute destinations
+ Cleanup Overlay Mounts content
+
+- Changelog for v1.8.3 (2019-06-04)
+ * Add support for file secret mounts
+ * Add ability to skip secrets in mounts file
+ * allow 32bit builds
+ * fix tutorial instructions
+ * imagebuilder: pass the right contextDir to Add()
+ * add: use fileutils.PatternMatcher for .dockerignore
+ * bud.bats: add another .dockerignore test
+ * unshare: fallback to single usermapping
+ * addHelperSymlink: clear the destination on os.IsExist errors
+ * bud.bats: test replacing symbolic links
+ * imagebuildah: fix handling of destinations that end with '/'
+ * bud.bats: test COPY with a final "/" in the destination
+ * linux: add check for sysctl before using it
+ * unshare: set _CONTAINERS_ROOTLESS_GID
+ * Rework buildahimamges
+ * build context: support https git repos
+ * Add a test for ENV special chars behaviour
+ * Check in new Dockerfiles
+ * Apply custom SHELL during build time
+ * config: expand variables only at the command line
+ * SetEnv: we only need to expand v once
+ * Add default /root if empty on chroot iso
+ * Add support for Overlay volumes into the container.
+ * Export buildah validate volume functions so it can share code with libpod
+ * Bump baseline test to F30
+ * Fix rootless handling of /dev/shm size
+ * Avoid fmt.Printf() in the library
+ * imagebuildah: tighten cache checking back up
+ * Handle WORKDIR with dangling target
+ * Default Authfile to proper path
+ * Make buildah run --isolation follow BUILDAH_ISOLATION environment
+ * Vendor in latest containers/storage and containers/image
+ * getParent/getChildren: handle layerless images
+ * imagebuildah: recognize cache images for layerless images
+ * bud.bats: test scratch images with --layers caching
+ * Get CHANGELOG.md updates
+ * Add some symlinks to test our .dockerignore logic
+ * imagebuildah: addHelper: handle symbolic links
+ * commit/push: use an everything-allowed policy
+ * Correct manpage formatting in files section
+ * Remove must be root statement from buildah doc
+ * Change image names to stable, testing and upstream
+ * Bump back to v1.9.0-dev
+
+- Changelog for v1.8.2 (2019-05-02)
+ * Vendor Storage 1.12.6
+ * Create scratch file in TESTDIR
+ * Test bud-copy-dot with --layers picks up changed file
+ * Bump back to 1.9.0-dev
+
+- Changelog for v1.8.1 (2019-05-01)
+ * Don't create directory on container
+ * Replace kubernetes/pause in tests with k8s.gcr.io/pause
+ * imagebuildah: don't remove intermediate images if we need them
+ * Rework buildahimagegit to buildahimageupstream
+ * Fix Transient Mounts
+ * Handle WORKDIRs that are symlinks
+ * allow podman to build a client for windows
+ * Touch up 1.9-dev to 1.9.0-dev
+ * Bump to 1.9-dev
+
+- Changelog for v1.8.0 (2019-04-26)
+ * Resolve symlink when checking container path
+ * commit: commit on every instruction, but not always with layers
+ * CommitOptions: drop the unused OnBuild field
+ * makeImageRef: pass in the whole CommitOptions structure
+ * cmd: API cleanup: stores before images
+ * run: check if SELinux is enabled
+ * Fix buildahimages Dockerfiles to include support for additionalimages mounted from host.
+ * Detect changes in rootdir
+ * Fix typo in buildah-pull(1)
+ * Vendor in latest containers/storage
+ * Keep track of any build-args used during buildah bud --layers
+ * commit: always set a parent ID
+ * imagebuildah: rework unused-argument detection
+ * fix bug dest path when COPY .dockerignore
+ * Move Host IDMAppings code from util to unshare
+ * Add BUILDAH_ISOLATION rootless back
+ * Travis CI: fail fast, upon error in any step
+ * imagebuildah: only commit images for intermediate stages if we have to
+ * Use errors.Cause() when checking for IsNotExist errors
+ * auto pass http_proxy to container
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.3 (2019-04-16)
+ * imagebuildah: don't leak image structs
+ * Add Dockerfiles for buildahimages
+ * Bump to Replace golang 1.10 with 1.12
+ * add --dns* flags to buildah bud
+ * Add hack/build_speed.sh test speeds on building container images
+ * Create buildahimage Dockerfile for Quay
+ * rename 'is' to 'expect_output'
+ * squash.bats: test squashing in multi-layered builds
+ * bud.bats: test COPY --from in a Dockerfile while using the cache
+ * commit: make target image names optional
+ * Fix bud-args to allow comma separation
+ * oops, missed some tests in commit.bats
+ * new helper: expect_line_count
+ * New tests for #1467 (string slices in cmdline opts)
+ * Workarounds for dealing with travis; review feedback
+ * BATS tests - extensive but minor cleanup
+ * imagebuildah: defer pulling images for COPY --from
+ * imagebuildah: centralize COMMIT and image ID output
+ * Travis: do not use traviswait
+ * imagebuildah: only initialize imagebuilder configuration once per stage
+ * Make cleaner error on Dockerfile build errors
+ * unshare: move to pkg/
+ * unshare: move some code from cmd/buildah/unshare
+ * Fix handling of Slices versus Arrays
+ * imagebuildah: reorganize stage and per-stage logic
+ * imagebuildah: add empty layers for instructions
+ * Add missing step in installing into Ubuntu
+ * fix bug in .dockerignore support
+ * imagebuildah: deduplicate prepended "FROM" instructions
+ * Touch up intro
+ * commit: set created-by to the shell if it isn't set
+ * commit: check that we always set a "created-by"
+ * docs/buildah.md: add "containers-" prefixes under "SEE ALSO"
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.2 (2019-03-28)
+ * mount: do not create automatically a namespace
+ * buildah: correctly create the userns if euid!=0
+ * imagebuildah.Build: consolidate cleanup logic
+ * CommitOptions: drop the redundant Store field
+ * Move pkg/chrootuser from libpod to buildah.
+ * imagebuildah: record image IDs and references more often
+ * vendor imagebuilder v1.1.0
+ * imagebuildah: fix requiresStart/noRunsRemaining confusion
+ * imagebuildah: check for unused args across stages
+ * bump github.com/containernetworking/cni to v0.7.0-rc2
+ * imagebuildah: use "useCache" instead of "noCache"
+ * imagebuildah.resolveNameToImageRef(): take name as a parameter
+ * Export fields of the DokcerIgnore struct
+ * imagebuildah: drop the duplicate containerIDs list
+ * rootless: by default use the host network namespace
+ * imagebuildah: split Executor and per-stage execution
+ * imagebuildah: move some fields around
+ * golint: make golint happy
+ * docs: 01-intro.md: add missing . in Dockerfile examples
+ * fix bug using .dockerignore
+ * Do not create empty mounts.conf file
+ * images: suppress a spurious blank line with no images
+ * from: distinguish between ADD and COPY
+ * fix bug to not separate each --label value with comma
+ * buildah-bud.md: correct a typo, note a default
+ * Remove mistaken code that got merged in other PR
+ * add sample registries.conf to docs
+ * escape shell variables in README example
+ * slirp4netns: set mtu to 65520
+ * images: imageReposToMap() already adds <none>:<none>
+ * imagebuildah.ReposToMap: move to cmd
+ * Build: resolve copyFrom references earlier
+ * Allow rootless users to use the cache directory in homedir
+ * bud.bats: use the per-test temp directory
+ * bud.bats: log output before counting length
+ * Simplify checks for leftover args
+ * Print commitID with --layers
+ * fix bug images use the template to print results
+ * rootless: honor --net host
+ * onsi/gomeage add missing files
+ * vendor latest openshift/imagebuilder
+ * Remove noop from squash help
+ * Prepend a comment to files setup in container
+ * imagebuildah resolveSymlink: fix handling of relative links
+ * Errors should be printed to stderr
+ * Add recommends for slirp4netns and fuse-overlay
+ * Update pull and pull-always flags
+ * Hide from users command options that we don't want them to use.
+ * Update secrets fipsmode patch to work on rootless containers
+ * fix unshare option handling and documentation
+ * Vendor in latest containers/storage
+ * Hard-code docker.Transport use in pull --all-tags
+ * Use a types.ImageReference instead of (transport, name) strings in pullImage etc.
+ * Move the computation of srcRef before first pullAndFindImage
+ * Don't throw away user-specified tag for pull --all-tags
+ * CHANGES BEHAVIOR: Remove the string format input to localImageNameForReference
+ * Don't try to parse imageName as transport:image in pullImage
+ * Use reference.WithTag instead of manual string manipulation in Pull
+ * Don't pass image = transport:repo:tag, transport=transport to pullImage
+ * Fix confusing variable naming in Pull
+ * Don't try to parse image name as a transport:image
+ * Fix error reporting when parsing trans+image
+ * Remove 'transport == ""' handling from the pull path
+ * Clean up "pulls" of local image IDs / ID prefixes
+ * Simplify ExpandNames
+ * Document the semantics of transport+name returned by ResolveName
+ * UPdate gitvalidation epoch
+ * Bump back to 1.8-dev
+
+- Changelog for v1.7.1 (2019-02-26)
+ * vendor containers/image v1.5
+ * Move secrets code from libpod into buildah
+ * Update CHANGELOG.md with the past changes
+ * README.md: fix typo
+ * Fix a few issues found by tests/validate/gometalinter.sh
+ * Neutralize buildah/unshare on non-Linux platforms
+ * Explicitly specify a directory to find(1)
+ * README.md: rephrase Buildah description
+ * Stop printing default twice in cli --help
+ * install.md: add section about vendoring
+ * Bump to 1.8-dev
+
+- Changelog for v1.7 (2019-02-21)
+ * vendor containers/image v1.4
+ * Make "images --all" faster
+ * Remove a misleading comment
+ * Remove quiet option from pull options
+ * Make sure buildah pull --all-tags only works with docker transport
+ * Support oci layout format
+ * Fix pulling of images within buildah
+ * Fix tls-verify polarity
+ * Travis: execute make vendor and hack/tree_status.sh
+ * vendor.conf: remove unused dependencies
+ * add missing vendor/github.com/containers/libpod/vendor.conf
+ * vendor.conf: remove github.com/inconshreveable/mousetrap
+ * make vendor: always fetch the latest vndr
+ * add hack/tree_status.sh script
+ * Bump c/Storage to 1.10
+ * Add --all-tags test to pull
+ * mount: make error clearer
+ * Remove global flags from cli help
+ * Set --disable-compression to true as documented
+ * Help document using buildah mount in rootless mode
+ * healthcheck start-period: update documentation
+ * Vendor in latest c/storage and c/image
+ * dumpbolt: handle nested buckets
+ * Fix buildah commit compress by default
+ * Test on xenial, not trusty
+ * unshare: reexec using a memfd copy instead of the binary
+ * Add --target to bud command
+ * Fix example for setting multiple environment variables
+ * main: fix rootless mode
+ * buildah: force umask 022
+ * pull.bats: specify registry config when using registries
+ * pull.bats: use the temporary directory, not /tmp
+ * unshare: do not set rootless mode if euid=0
+ * Touch up cli help examples and a few nits
+ * Add an undocumented dumpbolt command
+ * Move tar commands into containers/storage
+ * Fix bud issue with 2 line Dockerfile
+ * Add package install descriptions
+ * Note configuration file requirements
+ * Replace urfave/cli with cobra
+ * cleanup vendor.conf
+ * Vendor in latest containers/storage
+ * Add Quiet to PullOptions and PushOptions
+ * cmd/commit: add flag omit-timestamp to allow for deterministic builds
+ * Add options for empty-layer history entries
+ * Make CLI help descriptions and usage a bit more consistent
+ * vndr opencontainers/selinux
+ * Bump baseline test Fedora to 29
+ * Bump to v1.7-dev-1
+ * Bump to v1.6-1
+ * Add support for ADD --chown
+ * imagebuildah: make EnsureContainerPath() check/create the right one
+ * Bump 1.7-dev
+ * Fix contrib/rpm/bulidah.spec changelog date
+
+- Changelog for v1.6-1 (2019-01-18)
+ * Add support for ADD --chown
+ * imagebuildah: make EnsureContainerPath() check/create the right one
+ * Fix contrib/rpm/bulidah.spec changelog date
+ * Vendor in latest containers/storage
+ * Revendor everything
+ * Revendor in latest code by release
+ * unshare: do not set USER=root
+ * run: ignore EIO when flushing at the end, avoid double log
+ * build-using-dockerfile,commit: disable compression by default
+ * Update some comments
+ * Make rootless work under no_pivot_root
+ * Add CreatedAtRaw date field for use with Format
+ * Properly format images JSON output
+ * pull: add all-tags option
+ * Fix support for multiple Short options
+ * pkg/blobcache: add synchronization
+ * Skip empty files in file check of conformance test
+ * Use NoPivot also for RUN, not only for run
+ * Remove no longer used isReferenceInsecure / isRegistryInsecure
+ * Do not set OCIInsecureSkipTLSVerify based on registries.conf
+ * Remove duplicate entries from images JSON output
+ * vendor parallel-copy from containers/image
+ * blobcache.bats: adjust explicit push tests
+ * Handle one line Dockerfile with layers
+ * We should only warn if user actually requests Hostname be set in image
+ * Fix compiler Warning about comparing different size types
+ * imagebuildah: don't walk if rootdir and path are equal
+ * Add aliases for buildah containers, so buildah list, ls and ps work
+ * vendor: use faster version instead compress/gzip
+ * vendor: update libpod
+ * Properly handle Hostname inside of RUN command
+ * docs: mention how to mount in rootless mode
+ * tests: use fully qualified name for centos image
+ * travis.yml: use the fully qualified name for alpine
+ * mount: allow mount only when using vfs
+ * Add some tests for buildah pull
+ * Touch up images -q processing
+ * Refactor: Use library shared idtools.ParseIDMap() instead of bundling it
+ * bump GITVALIDATE_EPOCH
+ * cli.BudFlags: add `--platform` nop
+ * Makefile: allow packagers to more easily add tags
+ * Makefile: soften the requirement on git
+ * tests: add containers json test
+ * Inline blobCache.putBlob into blobCacheDestination.PutBlob
+ * Move saveStream and putBlob near blobCacheDestination.PutBlob
+ * Remove BlobCache.PutBlob
+ * Update for API changes
+ * Vendor c/image after merging c/image#536
+ * Handle 'COPY --from' in Dockerfile
+ * Vendor in latest content from github.com/containers/storage
+ * Clarify docker.io default in push with docker-daemon
+ * Test blob caching
+ * Wire in a hidden --blob-cache option
+ * Use a blob cache when we're asked to use one
+ * Add --disable-compression to 'build-using-dockerfile'
+ * Add a blob cache implementation
+ * vendor: update containers/storage
+ * Update for sysregistriesv2 API changes
+ * Update containers/image to 63a1cbdc5e6537056695cf0d627c0a33b334df53
+ * clean up makefile variables
+ * Fix file permission
+ * Complete the instructions for the command
+ * Show warning when a build arg not used
+ * Assume user 0 group 0, if /etc/passwd file in container.
+ * Add buildah info command
+ * Enable -q when --filter is used for images command
+ * Add v1.5 Release Announcement
+ * Fix dangling filter for images command
+ * Fix completions to print Names as well as IDs
+ * tests: Fix file permissions
+ * Bump 1.6-dev
+
+- Changelog for v1.5-1 (2018-11-21)
+ * Bump min go to 1.10 in install.md
+ * vendor: update ostree-go
+ * Update docker build command line in conformance test
+ * Print command in SystemExec as debug information
+ * Add some skip word for inspect check in conformance test
+ * Update regex for multi stage base test
+ * Sort CLI flags
+ * vendor: update containers/storage
+ * Add note to install about non-root on RHEL/CentOS
+ * Update imagebuild depdency to support heading ARGs in Dockerfile
+ * rootless: do not specify --rootless to the OCI runtime
+ * Export resolvesymlink function
+ * Exclude --force-rm from common bud cli flags
+ * run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume
+ * rootless: use slirp4netns to setup the network namespace
+ * Instructions for completing the pull command
+ * Fix travis to not run environment variable patch
+ * rootless: only discard network configuration names
+ * run: only set up /etc/hosts or /etc/resolv.conf with network
+ * common: getFormat: match entire string not only the prefix
+ * vendor: update libpod
+ * Change validation EPOCH
+ * Fixing broken link for container-registries.conf
+ * Restore rootless isolation test for from volume ro test
+ * ostree: fix tag for build constraint
+ * Handle directories better in bud -f
+ * vndr in latest containers/storage
+ * Fix unshare gofmt issue
+ * runSetupBuiltinVolumes(): break up volume setup
+ * common: support a per-user registries conf file
+ * unshare: do not override the configuration
+ * common: honor the rootless configuration file
+ * unshare: create a new mount namespace
+ * unshare: support libpod rootless pkg
+ * Use libpod GetDefaultStorage to report proper storage config
+ * Allow container storage to manage the SELinux labels
+ * Resolve image names with default transport in from command
+ * run: When the value of isolation is set, use the set value instead of the default value.
+ * Vendor in latest containers/storage and opencontainers/selinux
+ * Remove no longer valid todo
+ * Check for empty buildTime in version
+ * Change gofmt so it runs on all but 1.10
+ * Run gofmt only on Go 1.11
+ * Walk symlinks when checking cached images for copied/added files
+ * ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder
+ * Set WorkingDir to empty, not / for conformance
+ * Update calls in e2e to addres 1101
+ * imagebuilder.BuildDockerfiles: return the image ID
+ * Update for changes in the containers/image API
+ * bump(github.com/containers/image)
+ * Allow setting --no-pivot default with an env var
+ * Add man page and bash completion, for --no-pivot
+ * Add the --no-pivot flag to the run command
+ * Improve reporting about individual pull failures
+ * Move the "short name but no search registries" error handling to resolveImage
+ * Return a "search registries were needed but empty" indication in util.ResolveName
+ * Simplify handling of the "tried to pull an image but found nothing" case in newBuilder
+ * Don't even invoke the pull loop if options.FromImage == ""
+ * Eliminate the long-running ref and img variables in resolveImage
+ * In resolveImage, return immediately on success
+ * Fix From As in Dockerfile
+ * Vendor latest containers/image
+ * Vendor in latest libpod
+ * Sort CLI flags of buildah bud
+ * Change from testing with golang 1.9 to 1.11.
+ * unshare: detect when unprivileged userns are disabled
+ * Optimize redundant code
+ * fix missing format param
+ * chroot: fix the args check
+ * imagebuildah: make ResolveSymLink public
+ * Update copy chown test
+ * buildah: use the same logic for XDG_RUNTIME_DIR as podman
+ * V1.4 Release Announcement
+ * Podman --privileged selinux is broken
+ * papr: mount source at gopath
+ * parse: Modify the return value
+ * parse: modify the verification of the isolation value
+ * Make sure we log or return every error
+ * pullImage(): when completing an image name, try docker://
+ * Fix up Tutorial 3 to account for format
+ * Vendor in latest containers/storage and containers/image
+ * docs/tutorials/01-intro.md: enhanced installation instructions
+ * Enforce "blocked" for registries for the "docker" transport
+ * Correctly set DockerInsecureSkipTLSVerify when pulling images
+ * chroot: set up seccomp and capabilities after supplemental groups
+ * chroot: fix capabilities list setup and application
+ * .papr.yml: log the podman version
+ * namespaces.bats: fix handling of uidmap/gidmap options in pairs
+ * chroot: only create user namespaces when we know we need them
+ * Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS)
+ * bash/buildah: add isolation option to the from command
+
+- Changelog for v1.4 (2018-10-02)
+ * from: fix isolation option
+ * Touchup pull manpage
+ * Export buildah ReserveSELinuxLables so podman can use it
+ * Add buildah.io to README.md and doc fixes
+ * Update rmi man for prune changes
+ * Ignore file not found removal error in bud
+ * bump(github.com/containers/{storage,image})
+ * NewImageSource(): only create one Diff() at a time
+ * Copy ExposedPorts from base image into the config
+ * tests: run conformance test suite in Travis
+ * Change rmi --prune to not accept an imageID
+ * Clear intermediate container IDs after each stage
+ * Request podman version for build issues
+ * unshare: keep the additional groups of the user
+ * Builtin volumes should be owned by the UID/GID of the container
+ * Get rid of dangling whitespace in markdown files
+ * Move buildah from projecatatomic/buildah to containers/buildah
+ * nitpick: parse.validateFlags loop in bud cli
+ * bash: Completion options
+ * Add signature policy to push tests
+ * vendor in latest containers/image
+ * Fix grammar in Container Tools Guide
+ * Don't build btrfs if it is not installed
+ * new: Return image-pulling errors from resolveImage
+ * pull: Return image-pulling errors from pullImage
+ * Add more volume mount tests
+ * chroot: create missing parent directories for volume mounts
+ * Push: Allow an empty destination
+ * Add Podman relationship to readme, create container tools guide
+ * Fix arg usage in buildah-tag
+ * Add flags/arguments order verification to other commands
+ * Handle ErrDuplicateName errors from store.CreateContainer()
+ * Evaluate symbolic links on Add/Copy Commands
+ * Vendor in latest containers/image and containers/storage
+ * Retain bounding set when running containers as non root
+ * run container-diff tests in Travis
+ * buildah-images.md: Fix option contents
+ * push: show image digest after push succeed
+ * Vendor in latest containers/storage,image,libpod and runc
+ * Change references to cri-o to point at new repository
+ * Exclude --layers from the common bug cli flags
+ * demos: Increase the executable permissions
+ * run: clear default seccomp filter if not enabled
+ * Bump maximum cyclomatic complexity to 45
+ * stdin: on HUP, read everything
+ * nitpick: use tabs in tests/helpers.bash
+ * Add flags/arguments order verification to one arg commands
+ * nitpick: decrease cognitive complexity in buildah-bud
+ * rename: Avoid renaming the same name as other containers
+ * chroot isolation: chroot() before setting up seccomp
+ * Small nitpick at the "if" condition in tag.go
+ * cmd/images: Modify json option
+ * cmd/images: Disallow the input of image when using the -a option
+ * Fix examples to include context directory
+ * Update containers/image to fix commit layer issue
+ * cmd/containers: End loop early when using the json option
+ * Make buildah-from error message clear when flags are after arg
+ * Touch up README.md for conformance tests
+ * Update container/storage for lock fix
+ * cmd/rm: restore the correct containerID display
+ * Remove debug lines
+ * Remove docker build image after each test
+ * Add README for conformance test
+ * Update the MakeOptions to accept all command options for buildah
+ * Update regrex to fit the docker output in test "run with JSON"
+ * cmd/buildah: Remove redundant variable declarations
+ * Warn about using Commands in Dockerfile that are not supported by OCI.
+ * Add buildah bud conformance test
+ * Fix rename to also change container name in builder
+ * Makefile: use $(GO) env-var everywhere
+ * Cleanup code to more closely match Docker Build images
+ * Document BUILDAH_* environment variables in buildah bud --help output
+ * Return error immediately if error occurs in Prepare step
+ * Fix --layers ADD from url issue
+ * Add "Sign your PRs" TOC item to contributing.md.
+ * Display the correct ID after deleting image
+ * rmi: Modify the handling of errors
+ * Let util.ResolveName() return parsing errors
+ * Explain Open Container Initiative (OCI) acronym, add link
+ * Update vendor for urfave/cli back to master
+ * Handle COPY --chown in Dockerfile
+ * Switch to Recommends container-selinux
+ * Update vendor for containernetworking, imagebuildah and podman
+ * Document STORAGE_DRIVER and STORAGE_OPTS environment variable
+ * Change references to projectatomic/libpod to containers/libpod
+ * Add container PATH retrieval example
+ * Expand variables names for --env
+ * imagebuildah: provide a way to provide stdin for RUN
+ * Remove an unused srcRef.NewImageSource in pullImage
+ * chroot: correct a comment
+ * chroot: bind mount an empty directory for masking
+ * Don't bother with --no-pivot for rootless isolation
+ * CentOS need EPEL repo
+ * Export a Pull() function
+ * Remove stream options, since docker build does not have it
+ * release v1.3: mention openSUSE
+ * Add Release Announcements directory
+ * Bump to v1.4-dev
+
+- Changelog for v1.3 (2018-08-04)
+ * Revert pull error handling from 881
+ * bud should not search context directory for Dockerfile
+ * Set BUILDAH_ISOLATION=rootless when running unprivileged
+ * .papr.sh: Also test with BUILDAH_ISOLATION=rootless
+ * Skip certain tests when we're using "rootless" isolation
+ * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ * Add and implement IsolationOCIRootless
+ * Add a value for IsolationOCIRootless
+ * Fix rmi to remove intermediate images associated with an image
+ * Return policy error on pull
+ * Update containers/image to 216acb1bcd2c1abef736ee322e17147ee2b7d76c
+ * Switch to github.com/containers/image/pkg/sysregistriesv2
+ * unshare: make adjusting the OOM score optional
+ * Add flags validation
+ * chroot: handle raising process limits
+ * chroot: make the resource limits name map module-global
+ * Remove rpm.bats, we need to run this manually
+ * Set the default ulimits to match Docker
+ * buildah: no args is out of bounds
+ * unshare: error message missed the pid
+ * preprocess ".in" suffixed Dockerfiles
+ * Fix the the in buildah-config man page
+ * Only test rpmbuild on latest fedora
+ * Add support for multiple Short options
+ * Update to latest urvave/cli
+ * Add additional SELinux tests
+ * Vendor in latest github.com/containers/{image;storage}
+ * Stop testing with golang 1.8
+ * Fix volume cache issue with buildah bud --layers
+ * Create buildah pull command
+ * Increase the deadline for gometalinter during 'make validate'
+ * .papr.sh: Also test with BUILDAH_ISOLATION=chroot
+ * .travis.yml: run integration tests with BUILDAH_ISOLATION=chroot
+ * Add a Dockerfile
+ * Set BUILDAH_ISOLATION=chroot when running unprivileged
+ * Add and implement IsolationChroot
+ * Update github.com/opencontainers/runc
+ * maybeReexecUsingUserNamespace: add a default for root
+ * Allow ping command without NET_RAW Capabilities
+ * rmi.storageImageID: fix Wrapf format warning
+ * Allow Dockerfile content to come from stdin
+ * Vendor latest container/storage to fix overlay mountopt
+ * userns: assign additional IDs sequentially
+ * Remove default dev/pts
+ * Add OnBuild test to baseline test
+ * tests/run.bats(volumes): use :z when SELinux is enabled
+ * Avoid a stall in runCollectOutput()
+ * Use manifest from container/image
+ * Vendor in latest containers/image and containers/storage
+ * add rename command
+ * Completion command
+ * Update CHANGELOG.md
+ * Update vendor for runc to fix 32 bit builds
+ * bash completion: remove shebang
+ * Update vendor for runc to fix 32 bit builds
+
+- Changelog for v1.2 (2018-07-14)
+ * Vendor in lates containers/image
+ * build-using-dockerfile: let -t include transports again
+ * Block use of /proc/acpi and /proc/keys from inside containers
+ * Fix handling of --registries-conf
+ * Fix becoming a maintainer link
+ * add optional CI test fo darwin
+ * Don't pass a nil error to errors.Wrapf()
+ * image filter test: use kubernetes/pause as a "since"
+ * Add --cidfile option to from
+ * vendor: update containers/storage
+ * Contributors need to find the CONTRIBUTOR.md file easier
+ * Add a --loglevel option to build-with-dockerfile
+ * Create Development plan
+ * cmd: Code improvement
+ * allow buildah cross compile for a darwin target
+ * Add unused function param lint check
+ * docs: Follow man-pages(7) suggestions for SYNOPSIS
+ * Start using github.com/seccomp/containers-golang
+ * umount: add all option to umount all mounted containers
+ * runConfigureNetwork(): remove an unused parameter
+ * Update github.com/opencontainers/selinux
+ * Fix buildah bud --layers
+ * Force ownership of /etc/hosts and /etc/resolv.conf to 0:0
+ * main: if unprivileged, reexec in a user namespace
+ * Vendor in latest imagebuilder
+ * Reduce the complexity of the buildah.Run function
+ * mount: output it before replacing lastError
+ * Vendor in latest selinux-go code
+ * Implement basic recognition of the "--isolation" option
+ * Run(): try to resolve non-absolute paths using $PATH
+ * Run(): don't include any default environment variables
+ * build without seccomp
+ * vendor in latest runtime-tools
+ * bind/mount_unsupported.go: remove import errors
+ * Update github.com/opencontainers/runc
+ * Add Capabilities lists to BuilderInfo
+ * Tweaks for commit tests
+ * commit: recognize committing to second storage locations
+ * Fix ARGS parsing for run commands
+ * Add info on registries.conf to from manpage
+ * Switch from using docker to podman for testing in .papr
+ * buildah: set the HTTP User-Agent
+ * ONBUILD tutorial
+ * Add information about the configuration files to the install docs
+ * Makefile: add uninstall
+ * Add tilde info for push to troubleshooting
+ * mount: support multiple inputs
+ * Use the right formatting when adding entries to /etc/hosts
+ * Vendor in latest go-selinux bindings
+ * Allow --userns-uid-map/--userns-gid-map to be global options
+ * bind: factor out UnmountMountpoints
+ * Run(): simplify runCopyStdio()
+ * Run(): handle POLLNVAL results
+ * Run(): tweak terminal mode handling
+ * Run(): rename 'copyStdio' to 'copyPipes'
+ * Run(): don't set a Pdeathsig for the runtime
+ * Run(): add options for adding and removing capabilities
+ * Run(): don't use a callback when a slice will do
+ * setupSeccomp(): refactor
+ * Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers
+ * Escape use of '_' in .md docs
+ * Break out getProcIDMappings()
+ * Break out SetupIntermediateMountNamespace()
+ * Add Multi From Demo
+ * Use the c/image conversion code instead of converting configs manually
+ * Don't throw away the manifest MIME type and guess again
+ * Consolidate loading manifest and config in initConfig
+ * Pass a types.Image to Builder.initConfig
+ * Require an image ID in importBuilderDataFromImage
+ * Use c/image/manifest.GuessMIMEType instead of a custom heuristic
+ * Do not ignore any parsing errors in initConfig
+ * Explicitly handle "from scratch" images in Builder.initConfig
+ * Fix parsing of OCI images
+ * Simplify dead but dangerous-looking error handling
+ * Don't ignore v2s1 history if docker_version is not set
+ * Add --rm and --force-rm to buildah bud
+ * Add --all,-a flag to buildah images
+ * Separate stdio buffering from writing
+ * Remove tty check from images --format
+ * Add environment variable BUILDAH_RUNTIME
+ * Add --layers and --no-cache to buildah bud
+ * Touch up images man
+ * version.md: fix DESCRIPTION
+ * tests: add containers test
+ * tests: add images test
+ * images: fix usage
+ * fix make clean error
+ * Change 'registries' to 'container registries' in man
+ * add commit test
+ * Add(): learn to record hashes of what we add
+ * Minor update to buildah config documentation for entrypoint
+ * Bump to v1.2-dev
+ * Add registries.conf link to a few man pages
+
+- Changelog for v1.1 (2018-06-08)
+ * Drop capabilities if running container processes as non root
+ * Print Warning message if cmd will not be used based on entrypoint
+ * Update 01-intro.md
+ * Shouldn't add insecure registries to list of search registries
+ * Report errors on bad transports specification when pushing images
+ * Move parsing code out of common for namespaces and into pkg/parse.go
+ * Add disable-content-trust noop flag to bud
+ * Change freenode chan to buildah
+ * runCopyStdio(): don't close stdin unless we saw POLLHUP
+ * Add registry errors for pull
+ * runCollectOutput(): just read until the pipes are closed on us
+ * Run(): provide redirection for stdio
+ * rmi, rm: add test
+ * add mount test
+ * Add parameter judgment for commands that do not require parameters
+ * Add context dir to bud command in baseline test
+ * run.bats: check that we can run with symlinks in the bundle path
+ * Give better messages to users when image can not be found
+ * use absolute path for bundlePath
+ * Add environment variable to buildah --format
+ * rm: add validation to args and all option
+ * Accept json array input for config entrypoint
+ * Run(): process RunOptions.Mounts, and its flags
+ * Run(): only collect error output from stdio pipes if we created some
+ * Add OnBuild support for Dockerfiles
+ * Quick fix on demo readme
+ * run: fix validate flags
+ * buildah bud should require a context directory or URL
+ * Touchup tutorial for run changes
+ * Validate common bud and from flags
+ * images: Error if the specified imagename does not exist
+ * inspect: Increase err judgments to avoid panic
+ * add test to inspect
+ * buildah bud picks up ENV from base image
+ * Extend the amount of time travis_wait should wait
+ * Add a make target for Installing CNI plugins
+ * Add tests for namespace control flags
+ * copy.bats: check ownerships in the container
+ * Fix SELinux test errors when SELinux is enabled
+ * Add example CNI configurations
+ * Run: set supplemental group IDs
+ * Run: use a temporary mount namespace
+ * Use CNI to configure container networks
+ * add/secrets/commit: Use mappings when setting permissions on added content
+ * Add CLI options for specifying namespace and cgroup setup
+ * Always set mappings when using user namespaces
+ * Run(): break out creation of stdio pipe descriptors
+ * Read UID/GID mapping information from containers and images
+ * Additional bud CI tests
+ * Run integration tests under travis_wait in Travis
+ * build-using-dockerfile: add --annotation
+ * Implement --squash for build-using-dockerfile and commit
+ * Vendor in latest container/storage for devicemapper support
+ * add test to inspect
+ * Vendor github.com/onsi/ginkgo and github.com/onsi/gomega
+ * Test with Go 1.10, too
+ * Add console syntax highlighting to troubleshooting page
+ * bud.bats: print "$output" before checking its contents
+ * Manage "Run" containers more closely
+ * Break Builder.Run()'s "run runc" bits out
+ * util.ResolveName(): handle completion for tagged/digested image names
+ * Handle /etc/hosts and /etc/resolv.conf properly in container
+ * Documentation fixes
+ * Make it easier to parse our temporary directory as an image name
+ * Makefile: list new pkg/ subdirectoris as dependencies for buildah
+ * containerImageSource: return more-correct errors
+ * API cleanup: PullPolicy and TerminalPolicy should be types
+ * Make "run --terminal" and "run -t" aliases for "run --tty"
+ * Vendor github.com/containernetworking/cni v0.6.0
+ * Update github.com/containers/storage
+ * Update github.com/projectatomic/libpod
+ * Add support for buildah bud --label
+ * buildah push/from can push and pull images with no reference
+ * Vendor in latest containers/image
+ * Update gometalinter to fix install.tools error
+ * Update troubleshooting with new run workaround
+ * Added a bud demo and tidied up
+ * Attempt to download file from url, if fails assume Dockerfile
+ * Add buildah bud CI tests for ENV variables
+ * Re-enable rpm .spec version check and new commit test
+ * Update buildah scratch demo to support el7
+ * Added Docker compatibility demo
+ * Update to F28 and new run format in baseline test
+ * Touchup man page short options across man pages
+ * Added demo dir and a demo. chged distrorlease
+ * builder-inspect: fix format option
+ * Add cpu-shares short flag (-c) and cpu-shares CI tests
+ * Minor fixes to formatting in rpm spec changelog
+ * Fix rpm .spec changelog formatting
+ * CI tests and minor fix for cache related noop flags
+ * buildah-from: add effective value to mount propagation
+
+- Changelog for v1.0 (2018-05-06)
+ * Declare Buildah 1.0
+ * Add cache-from and no-cache noops, and fix doco
+ * Update option and documentation for --force-rm
+ * Adding noop for --force-rm to match --rm
+ * Add buildah bud ENTRYPOINT,CMD,RUN tests
+ * Adding buildah bud RUN test scenarios
+ * Extend tests for empty buildah run command
+ * Fix formatting error in run.go
+ * Update buildah run to make command required
+ * Expanding buildah run cmd/entrypoint tests
+ * Update test cases for buildah run behaviour
+ * Remove buildah run cmd and entrypoint execution
+ * Add Files section with registries.conf to pertinent man pages
+ * tests/config: perfect test
+ * tests/from: add name test
+ * Do not print directly to stdout in Commit()
+ * Touch up auth test commands
+ * Force "localhost" as a default registry
+ * Drop util.GetLocalTime()
+ * Vendor in latest containers/image
+ * Validate host and container paths passed to --volume
+ * test/from: add add-host test
+ * Add --compress, --rm, --squash flags as a noop for bud
+ * Add FIPS mode secret to buildah run and bud
+ * Add config --comment/--domainname/--history-comment/--hostname
+ * 'buildah config': stop replacing Created-By whenever it's not specified
+ * Modify man pages so they compile correctly in mandb
+ * Add description on how to do --isolation to buildah-bud man page
+ * Add support for --iidfile to bud and commit
+ * Refactor buildah bud for vendoring
+ * Fail if date or git not installed
+ * Revert update of entrypoint behaviour to match docker
+ * Vendor in latest imagebuilder code to fix multiple stage builds
+ * Add /bin/sh -c to entrypoint in config
+ * image_test: Improve the test
+ * Fix README example of buildah config
+ * buildah-image: add validation to 'format'
+ * Simple changes to allow buildah to pass make validate
+ * Clarify the use of buildah config options
+ * containers_test: Perfect testing
+ * buildah images and podman images are listing different sizes
+ * buildah-containers: add tests and example to the man page
+ * buildah-containers: add validation to 'format'
+ * Clarify the use of buildah config options
+ * Minor fix for lighttpd example in README
+ * Add tls-verification to troubleshooting
+ * Modify buildah rmi to account for changes in containers/storage
+ * Vendor in latest containers/image and containers/storage
+ * addcopy: add src validation
+ * Remove tarball as an option from buildah push --help
+ * Fix secrets patch
+ * Update entrypoint behaviour to match docker
+ * Display imageId after commit
+ * config: add support for StopSignal
+ * Fix docker login issue in travis.yml
+ * Allow referencing stages as index and names
+ * Add multi-stage builds tests
+ * Add multi-stage builds support
+ * Add accessor functions for comment and stop signal
+ * Vendor in latest imagebuilder, to get mixed case AS support
+ * Allow umount to have multi-containers
+ * Update buildah push doc
+ * buildah bud walks symlinks
+ * Imagename is required for commit atm, update manpage
+
+- Changelog for v0.16.0 (2018-04-08)
+ * Bump to v0.16.0
+ * Remove requires for ostree-lib in rpm spec file
+ * Add support for shell
+ * buildah.spec should require ostree-libs
+ * Vendor in latest containers/image
+ * bash: prefer options
+ * Change image time to locale, add troubleshooting.md, add logo to other mds
+ * buildah-run.md: fix error SYNOPSIS
+ * docs: fix error example
+ * Allow --cmd parameter to have commands as values
+ * Touchup README to re-enable logo
+ * Clean up README.md
+ * Make default-mounts-file a hidden option
+ * Document the mounts.conf file
+ * Fix man pages to format correctly
+ * Add various transport support to buildah from
+ * Add unit tests to run.go
+ * If the user overrides the storage driver, the options should be dropped
+ * Show Config/Manifest as JSON string in inspect when format is not set
+ * Switch which for that in README.md
+ * Remove COPR
+ * Fix wrong order of parameters
+ * Vendor in latest containers/image
+ * Remove shallowCopy(), which shouldn't be saving us time any more
+ * shallowCopy: avoid a second read of the container's layer
diff --git a/chroot/pty_posix.go b/chroot/pty_posix.go
new file mode 100644
index 0000000..97c1b65
--- /dev/null
+++ b/chroot/pty_posix.go
@@ -0,0 +1,64 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
+package chroot
+
+// #include <fcntl.h>
+// #include <stdlib.h>
+import "C"
+
+import (
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func openpt() (int, error) {
+ fd, err := C.posix_openpt(C.O_RDWR)
+ if err != nil {
+ return -1, err
+ }
+ if _, err := C.grantpt(fd); err != nil {
+ return -1, err
+ }
+ return int(fd), nil
+}
+
+func ptsname(fd int) (string, error) {
+ path, err := C.ptsname(C.int(fd))
+ if err != nil {
+ return "", err
+ }
+ return C.GoString(path), nil
+}
+
+func unlockpt(fd int) error {
+ if _, err := C.unlockpt(C.int(fd)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func getPtyDescriptors() (int, int, error) {
+ // Create a pseudo-terminal and open the control side
+ controlFd, err := openpt()
+ if err != nil {
+ logrus.Errorf("error opening PTY control side using posix_openpt: %v", err)
+ return -1, -1, err
+ }
+ if err = unlockpt(controlFd); err != nil {
+ logrus.Errorf("error unlocking PTY: %v", err)
+ return -1, -1, err
+ }
+ // Get a handle for the other end.
+ ptyName, err := ptsname(controlFd)
+ if err != nil {
+ logrus.Errorf("error getting PTY name: %v", err)
+ return -1, -1, err
+ }
+ ptyFd, err := unix.Open(ptyName, unix.O_RDWR, 0)
+ if err != nil {
+ logrus.Errorf("error opening PTY: %v", err)
+ return -1, -1, err
+ }
+ return controlFd, ptyFd, nil
+}
diff --git a/chroot/pty_ptmx.go b/chroot/pty_ptmx.go
new file mode 100644
index 0000000..b1ba96b
--- /dev/null
+++ b/chroot/pty_ptmx.go
@@ -0,0 +1,47 @@
+//go:build linux
+// +build linux
+
+package chroot
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Open a PTY using the /dev/ptmx device. The main advantage of using
+// this instead of posix_openpt is that it avoids cgo.
+func getPtyDescriptors() (int, int, error) {
+ // Create a pseudo-terminal -- open a copy of the master side.
+ controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600)
+ if err != nil {
+ return -1, -1, fmt.Errorf("opening PTY master using /dev/ptmx: %v", err)
+ }
+ // Set the kernel's lock to "unlocked".
+ locked := 0
+ if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 {
+ return -1, -1, fmt.Errorf("unlocking PTY descriptor: %v", err)
+ }
+ // Get a handle for the other end.
+ ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY)
+ if int(ptyFd) == -1 {
+ if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) {
+ return -1, -1, fmt.Errorf("getting PTY descriptor: %v", err)
+ }
+ // EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN.
+ ptyN, err := unix.IoctlGetInt(controlFd, unix.TIOCGPTN)
+ if err != nil {
+ return -1, -1, fmt.Errorf("getting PTY number: %v", err)
+ }
+ ptyName := fmt.Sprintf("/dev/pts/%d", ptyN)
+ fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620)
+ if err != nil {
+ return -1, -1, fmt.Errorf("opening PTY %q: %v", ptyName, err)
+ }
+ ptyFd = uintptr(fd)
+ }
+ return controlFd, int(ptyFd), nil
+}
diff --git a/chroot/pty_unsupported.go b/chroot/pty_unsupported.go
new file mode 100644
index 0000000..55ea597
--- /dev/null
+++ b/chroot/pty_unsupported.go
@@ -0,0 +1,13 @@
+//go:build !linux && !(freebsd && cgo)
+// +build !linux
+// +build !freebsd !cgo
+
+package chroot
+
+import (
+ "errors"
+)
+
+func getPtyDescriptors() (int, int, error) {
+ return -1, -1, errors.New("getPtyDescriptors not supported on this platform")
+}
diff --git a/chroot/run_common.go b/chroot/run_common.go
new file mode 100644
index 0000000..deda64f
--- /dev/null
+++ b/chroot/run_common.go
@@ -0,0 +1,831 @@
+//go:build linux || freebsd
+// +build linux freebsd
+
+package chroot
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/util"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "golang.org/x/term"
+)
+
+const (
+ // runUsingChrootCommand is a command we use as a key for reexec
+ runUsingChrootCommand = "buildah-chroot-runtime"
+ // runUsingChrootExec is a command we use as a key for reexec
+ runUsingChrootExecCommand = "buildah-chroot-exec"
+)
+
+func init() {
+ reexec.Register(runUsingChrootCommand, runUsingChrootMain)
+ reexec.Register(runUsingChrootExecCommand, runUsingChrootExecMain)
+ for limitName, limitNumber := range rlimitsMap {
+ rlimitsReverseMap[limitNumber] = limitName
+ }
+}
+
+type runUsingChrootExecSubprocOptions struct {
+ Spec *specs.Spec
+ BundlePath string
+}
+
+// RunUsingChroot runs a chrooted process, using some of the settings from the
+// passed-in spec, and using the specified bundlePath to hold temporary files,
+// directories, and mountpoints.
+func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
+ var confwg sync.WaitGroup
+ var homeFound bool
+ for _, env := range spec.Process.Env {
+ if strings.HasPrefix(env, "HOME=") {
+ homeFound = true
+ break
+ }
+ }
+ if !homeFound {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOME=%s", homeDir))
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Write the runtime configuration, mainly for debugging.
+ specbytes, err := json.Marshal(spec)
+ if err != nil {
+ return err
+ }
+ if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
+ return fmt.Errorf("storing runtime configuration: %w", err)
+ }
+ logrus.Debugf("config = %v", string(specbytes))
+
+ // Default to using stdin/stdout/stderr if we weren't passed objects to use.
+ if stdin == nil {
+ stdin = os.Stdin
+ }
+ if stdout == nil {
+ stdout = os.Stdout
+ }
+ if stderr == nil {
+ stderr = os.Stderr
+ }
+
+ // Create a pipe for passing configuration down to the next process.
+ preader, pwriter, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating configuration pipe: %w", err)
+ }
+ config, conferr := json.Marshal(runUsingChrootSubprocOptions{
+ Spec: spec,
+ BundlePath: bundlePath,
+ })
+ if conferr != nil {
+ return fmt.Errorf("encoding configuration for %q: %w", runUsingChrootCommand, conferr)
+ }
+
+ // Set our terminal's mode to raw, to pass handling of special
+ // terminal input to the terminal in the container.
+ if spec.Process.Terminal && term.IsTerminal(unix.Stdin) {
+ state, err := term.MakeRaw(unix.Stdin)
+ if err != nil {
+ logrus.Warnf("error setting terminal state: %v", err)
+ } else {
+ defer func() {
+ if err = term.Restore(unix.Stdin, state); err != nil {
+ logrus.Errorf("unable to restore terminal state: %v", err)
+ }
+ }()
+ }
+ }
+
+ // Raise any resource limits that are higher than they are now, before
+ // we drop any more privileges.
+ if err = setRlimits(spec, false, true); err != nil {
+ return err
+ }
+
+ // Start the grandparent subprocess.
+ cmd := unshare.Command(runUsingChrootCommand)
+ setPdeathsig(cmd.Cmd)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr
+ cmd.Dir = "/"
+ cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}
+
+ interrupted := make(chan os.Signal, 100)
+ cmd.Hook = func(int) error {
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ for receivedSignal := range interrupted {
+ if err := cmd.Process.Signal(receivedSignal); err != nil {
+ logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
+ }
+ }
+ }()
+ return nil
+ }
+
+ logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd)
+ confwg.Add(1)
+ go func() {
+ _, conferr = io.Copy(pwriter, bytes.NewReader(config))
+ pwriter.Close()
+ confwg.Done()
+ }()
+ cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
+ err = cmd.Run()
+ confwg.Wait()
+ signal.Stop(interrupted)
+ close(interrupted)
+ if err == nil {
+ return conferr
+ }
+ return err
+}
+
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
+func runUsingChrootMain() {
+ var options runUsingChrootSubprocOptions
+
+ runtime.LockOSThread()
+
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ os.Unsetenv("LOGLEVEL")
+ }
+
+ // Unpack our configuration.
+ confPipe := os.NewFile(3, "confpipe")
+ if confPipe == nil {
+ fmt.Fprintf(os.Stderr, "error reading options pipe\n")
+ os.Exit(1)
+ }
+ defer confPipe.Close()
+ if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
+ os.Exit(1)
+ }
+
+ if options.Spec == nil || options.Spec.Process == nil {
+ fmt.Fprintf(os.Stderr, "invalid options spec in runUsingChrootMain\n")
+ os.Exit(1)
+ }
+
+ // Prepare to shuttle stdio back and forth.
+ rootUID32, rootGID32, err := util.GetHostRootIDs(options.Spec)
+ if err != nil {
+ logrus.Errorf("error determining ownership for container stdio")
+ os.Exit(1)
+ }
+ rootUID := int(rootUID32)
+ rootGID := int(rootGID32)
+ relays := make(map[int]int)
+ closeOnceRunning := []*os.File{}
+ var ctty *os.File
+ var stdin io.Reader
+ var stdinCopy io.WriteCloser
+ var stdout io.Writer
+ var stderr io.Writer
+ fdDesc := make(map[int]string)
+ if options.Spec.Process.Terminal {
+ ptyMasterFd, ptyFd, err := getPtyDescriptors()
+ if err != nil {
+ logrus.Errorf("error opening PTY descriptors: %v", err)
+ os.Exit(1)
+ }
+ // Make notes about what's going where.
+ relays[ptyMasterFd] = unix.Stdout
+ relays[unix.Stdin] = ptyMasterFd
+ fdDesc[ptyMasterFd] = "container terminal"
+ fdDesc[unix.Stdin] = "stdin"
+ fdDesc[unix.Stdout] = "stdout"
+ winsize := &unix.Winsize{}
+ // Set the pseudoterminal's size to the configured size, or our own.
+ if options.Spec.Process.ConsoleSize != nil {
+ // Use configured sizes.
+ winsize.Row = uint16(options.Spec.Process.ConsoleSize.Height)
+ winsize.Col = uint16(options.Spec.Process.ConsoleSize.Width)
+ } else {
+ if term.IsTerminal(unix.Stdin) {
+ // Use the size of our terminal.
+ winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ)
+ if err != nil {
+ logrus.Debugf("error reading current terminal's size")
+ winsize.Row = 0
+ winsize.Col = 0
+ }
+ }
+ }
+ if winsize.Row != 0 && winsize.Col != 0 {
+ if err = unix.IoctlSetWinsize(ptyFd, unix.TIOCSWINSZ, winsize); err != nil {
+ logrus.Warnf("error setting terminal size for pty")
+ }
+ // FIXME - if we're connected to a terminal, we should
+ // be passing the updated terminal size down when we
+ // receive a SIGWINCH.
+ }
+ // Open an *os.File object that we can pass to our child.
+ ctty = os.NewFile(uintptr(ptyFd), "/dev/tty")
+ // Set ownership for the PTY.
+ if err = ctty.Chown(rootUID, rootGID); err != nil {
+ var cttyInfo unix.Stat_t
+ err2 := unix.Fstat(ptyFd, &cttyInfo)
+ from := ""
+ op := "setting"
+ if err2 == nil {
+ op = "changing"
+ from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid)
+ }
+ logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err)
+ }
+ // Set permissions on the PTY.
+ if err = ctty.Chmod(0620); err != nil {
+ logrus.Errorf("error setting permissions of container PTY: %v", err)
+ os.Exit(1)
+ }
+ // Make a note that our child (the parent subprocess) should
+ // have the PTY connected to its stdio, and that we should
+ // close it once it's running.
+ stdin = ctty
+ stdout = ctty
+ stderr = ctty
+ closeOnceRunning = append(closeOnceRunning, ctty)
+ } else {
+ // Create pipes for stdio.
+ stdinRead, stdinWrite, err := os.Pipe()
+ if err != nil {
+ logrus.Errorf("error opening pipe for stdin: %v", err)
+ }
+ stdoutRead, stdoutWrite, err := os.Pipe()
+ if err != nil {
+ logrus.Errorf("error opening pipe for stdout: %v", err)
+ }
+ stderrRead, stderrWrite, err := os.Pipe()
+ if err != nil {
+ logrus.Errorf("error opening pipe for stderr: %v", err)
+ }
+ // Make notes about what's going where.
+ relays[unix.Stdin] = int(stdinWrite.Fd())
+ relays[int(stdoutRead.Fd())] = unix.Stdout
+ relays[int(stderrRead.Fd())] = unix.Stderr
+ fdDesc[int(stdinWrite.Fd())] = "container stdin pipe"
+ fdDesc[int(stdoutRead.Fd())] = "container stdout pipe"
+ fdDesc[int(stderrRead.Fd())] = "container stderr pipe"
+ fdDesc[unix.Stdin] = "stdin"
+ fdDesc[unix.Stdout] = "stdout"
+ fdDesc[unix.Stderr] = "stderr"
+ // Set ownership for the pipes.
+ if err = stdinRead.Chown(rootUID, rootGID); err != nil {
+ logrus.Errorf("error setting ownership of container stdin pipe: %v", err)
+ os.Exit(1)
+ }
+ if err = stdoutWrite.Chown(rootUID, rootGID); err != nil {
+ logrus.Errorf("error setting ownership of container stdout pipe: %v", err)
+ os.Exit(1)
+ }
+ if err = stderrWrite.Chown(rootUID, rootGID); err != nil {
+ logrus.Errorf("error setting ownership of container stderr pipe: %v", err)
+ os.Exit(1)
+ }
+ // Make a note that our child (the parent subprocess) should
+ // have the pipes connected to its stdio, and that we should
+ // close its ends of them once it's running.
+ stdin = stdinRead
+ stdout = stdoutWrite
+ stderr = stderrWrite
+ closeOnceRunning = append(closeOnceRunning, stdinRead, stdoutWrite, stderrWrite)
+ stdinCopy = stdinWrite
+ defer stdoutRead.Close()
+ defer stderrRead.Close()
+ }
+ for readFd, writeFd := range relays {
+ if err := unix.SetNonblock(readFd, true); err != nil {
+ logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err)
+ return
+ }
+ if err := unix.SetNonblock(writeFd, false); err != nil {
+ logrus.Errorf("error setting descriptor %d (%s) blocking: %v", relays[writeFd], fdDesc[writeFd], err)
+ return
+ }
+ }
+ if err := unix.SetNonblock(relays[unix.Stdin], true); err != nil {
+ logrus.Errorf("error setting %d to nonblocking: %v", relays[unix.Stdin], err)
+ }
+ go func() {
+ buffers := make(map[int]*bytes.Buffer)
+ for _, writeFd := range relays {
+ buffers[writeFd] = new(bytes.Buffer)
+ }
+ pollTimeout := -1
+ stdinClose := false
+ for len(relays) > 0 {
+ fds := make([]unix.PollFd, 0, len(relays))
+ for fd := range relays {
+ fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP})
+ }
+ _, err := unix.Poll(fds, pollTimeout)
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) {
+ return
+ }
+ removeFds := make(map[int]struct{})
+ for _, rfd := range fds {
+ if rfd.Revents&unix.POLLHUP == unix.POLLHUP {
+ removeFds[int(rfd.Fd)] = struct{}{}
+ }
+ if rfd.Revents&unix.POLLNVAL == unix.POLLNVAL {
+ logrus.Debugf("error polling descriptor %s: closed?", fdDesc[int(rfd.Fd)])
+ removeFds[int(rfd.Fd)] = struct{}{}
+ }
+ if rfd.Revents&unix.POLLIN == 0 {
+ if stdinClose && stdinCopy == nil {
+ continue
+ }
+ continue
+ }
+ b := make([]byte, 8192)
+ nread, err := unix.Read(int(rfd.Fd), b)
+ util.LogIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ if nread > 0 {
+ if wfd, ok := relays[int(rfd.Fd)]; ok {
+ nwritten, err := buffers[wfd].Write(b[:nread])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ continue
+ }
+ if nwritten != nread {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nread, nwritten)
+ continue
+ }
+ }
+ // If this is the last of the data we'll be able to read
+ // from this descriptor, read as much as there is to read.
+ for rfd.Revents&unix.POLLHUP == unix.POLLHUP {
+ nr, err := unix.Read(int(rfd.Fd), b)
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ if nr <= 0 {
+ break
+ }
+ if wfd, ok := relays[int(rfd.Fd)]; ok {
+ nwritten, err := buffers[wfd].Write(b[:nr])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ break
+ }
+ if nwritten != nr {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten)
+ break
+ }
+ }
+ }
+ }
+ if nread == 0 {
+ removeFds[int(rfd.Fd)] = struct{}{}
+ }
+ }
+ pollTimeout = -1
+ for wfd, buffer := range buffers {
+ if buffer.Len() > 0 {
+ nwritten, err := unix.Write(wfd, buffer.Bytes())
+ util.LogIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err))
+ if nwritten >= 0 {
+ _ = buffer.Next(nwritten)
+ }
+ }
+ if buffer.Len() > 0 {
+ pollTimeout = 100
+ }
+ if wfd == relays[unix.Stdin] && stdinClose && buffer.Len() == 0 {
+ stdinCopy.Close()
+ delete(relays, unix.Stdin)
+ }
+ }
+ for rfd := range removeFds {
+ if rfd == unix.Stdin {
+ buffer, found := buffers[relays[unix.Stdin]]
+ if found && buffer.Len() > 0 {
+ stdinClose = true
+ continue
+ }
+ }
+ if !options.Spec.Process.Terminal && rfd == unix.Stdin {
+ stdinCopy.Close()
+ }
+ delete(relays, rfd)
+ }
+ }
+ }()
+
+ // Set up mounts and namespaces, and run the parent subprocess.
+ status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err)
+ os.Exit(1)
+ }
+
+ // Pass the process's exit status back to the caller by exiting with the same status.
+ if status.Exited() {
+ if status.ExitStatus() != 0 {
+ fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", status.ExitStatus())
+ }
+ os.Exit(status.ExitStatus())
+ } else if status.Signaled() {
+ fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", status.Signal())
+ os.Exit(1)
+ }
+}
+
+// runUsingChroot, still in the grandparent process, sets up various bind
+// mounts and then runs the parent process in its own user namespace with the
+// necessary ID mappings.
+func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) {
+ var confwg sync.WaitGroup
+
+ // Create a new mount namespace for ourselves and bind mount everything to a new location.
+ undoIntermediates, err := bind.SetupIntermediateMountNamespace(spec, bundlePath)
+ if err != nil {
+ return 1, err
+ }
+ defer func() {
+ if undoErr := undoIntermediates(); undoErr != nil {
+ logrus.Debugf("error cleaning up intermediate mount NS: %v", err)
+ }
+ }()
+
+ // Bind mount in our filesystems.
+ undoChroots, err := setupChrootBindMounts(spec, bundlePath)
+ if err != nil {
+ return 1, err
+ }
+ defer func() {
+ if undoErr := undoChroots(); undoErr != nil {
+ logrus.Debugf("error cleaning up intermediate chroot bind mounts: %v", err)
+ }
+ }()
+
+ // Create a pipe for passing configuration down to the next process.
+ preader, pwriter, err := os.Pipe()
+ if err != nil {
+ return 1, fmt.Errorf("creating configuration pipe: %w", err)
+ }
+ config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{
+ Spec: spec,
+ BundlePath: bundlePath,
+ })
+ if conferr != nil {
+ fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand)
+ os.Exit(1)
+ }
+
+ // Apologize for the namespace configuration that we're about to ignore.
+ logNamespaceDiagnostics(spec)
+
+ // We need to lock the thread so that PR_SET_PDEATHSIG won't trigger if the current thread exits.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Start the parent subprocess.
+ cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...)
+ setPdeathsig(cmd.Cmd)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr
+ cmd.Dir = "/"
+ cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}
+ if ctty != nil {
+ cmd.Setsid = true
+ cmd.Ctty = ctty
+ }
+ cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
+ if err := setPlatformUnshareOptions(spec, cmd); err != nil {
+ return 1, fmt.Errorf("setting platform unshare options: %w", err)
+
+ }
+ interrupted := make(chan os.Signal, 100)
+ cmd.Hook = func(int) error {
+ for _, f := range closeOnceRunning {
+ f.Close()
+ }
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ for receivedSignal := range interrupted {
+ if err := cmd.Process.Signal(receivedSignal); err != nil {
+ logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
+ }
+ }
+ }()
+ return nil
+ }
+
+ logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd)
+ confwg.Add(1)
+ go func() {
+ _, conferr = io.Copy(pwriter, bytes.NewReader(config))
+ pwriter.Close()
+ confwg.Done()
+ }()
+ err = cmd.Run()
+ confwg.Wait()
+ signal.Stop(interrupted)
+ close(interrupted)
+ if err != nil {
+ if exitError, ok := err.(*exec.ExitError); ok {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ if waitStatus.ExitStatus() != 0 {
+ fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus())
+ }
+ os.Exit(waitStatus.ExitStatus())
+ } else if waitStatus.Signaled() {
+ fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal())
+ os.Exit(1)
+ }
+ }
+ }
+ fmt.Fprintf(os.Stderr, "process exited with error: %v", err)
+ os.Exit(1)
+ }
+
+ return 0, nil
+}
+
+// main() for parent subprocess. Its main job is to try to make our
+// environment look like the one described by the runtime configuration blob,
+// and then launch the intended command as a child.
+func runUsingChrootExecMain() {
+ args := os.Args[1:]
+ var options runUsingChrootExecSubprocOptions
+ var err error
+
+ runtime.LockOSThread()
+
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ os.Unsetenv("LOGLEVEL")
+ }
+
+ // Unpack our configuration.
+ confPipe := os.NewFile(3, "confpipe")
+ if confPipe == nil {
+ fmt.Fprintf(os.Stderr, "error reading options pipe\n")
+ os.Exit(1)
+ }
+ defer confPipe.Close()
+ if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
+ os.Exit(1)
+ }
+
+ // Set the hostname. We're already in a distinct UTS namespace and are admins in the user
+ // namespace which created it, so we shouldn't get a permissions error, but seccomp policy
+ // might deny our attempt to call sethostname() anyway, so log a debug message for that.
+ if options.Spec == nil || options.Spec.Process == nil {
+ fmt.Fprintf(os.Stderr, "invalid options spec passed in\n")
+ os.Exit(1)
+ }
+
+ if options.Spec.Hostname != "" {
+ setContainerHostname(options.Spec.Hostname)
+ }
+
+ // Try to chroot into the root. Do this before we potentially
+ // block the syscall via the seccomp profile. Allow the
+ // platform to override this - on FreeBSD, we use a simple
+ // jail to set the hostname in the container
+ if err := createPlatformContainer(options); err != nil {
+ var oldst, newst unix.Stat_t
+ if err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil {
+ fmt.Fprintf(os.Stderr, "error stat()ing intended root directory %q: %v\n", options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ if err := unix.Chdir(options.Spec.Root.Path); err != nil {
+ fmt.Fprintf(os.Stderr, "error chdir()ing to intended root directory %q: %v\n", options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(options.Spec.Root.Path); err != nil {
+ fmt.Fprintf(os.Stderr, "error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ if err := unix.Stat("/", &newst); err != nil {
+ fmt.Fprintf(os.Stderr, "error stat()ing current root directory: %v\n", err)
+ os.Exit(1)
+ }
+ if oldst.Dev != newst.Dev || oldst.Ino != newst.Ino {
+ fmt.Fprintf(os.Stderr, "unknown error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ logrus.Debugf("chrooted into %q", options.Spec.Root.Path)
+ }
+
+ // not doing because it's still shared: creating devices
+ // not doing because it's not applicable: setting annotations
+ // not doing because it's still shared: setting sysctl settings
+ // not doing because cgroupfs is read only: configuring control groups
+ // -> this means we can use the freezer to make sure there aren't any lingering processes
+ // -> this means we ignore cgroups-based controls
+ // not doing because we don't set any in the config: running hooks
+ // not doing because we don't set it in the config: setting rootfs read-only
+ // not doing because we don't set it in the config: setting rootfs propagation
+ logrus.Debugf("setting apparmor profile")
+ if err = setApparmorProfile(options.Spec); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting apparmor profile for process: %v\n", err)
+ os.Exit(1)
+ }
+ if err = setSelinuxLabel(options.Spec); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting SELinux label for process: %v\n", err)
+ os.Exit(1)
+ }
+
+ logrus.Debugf("setting resource limits")
+ if err = setRlimits(options.Spec, false, false); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting process resource limits for process: %v\n", err)
+ os.Exit(1)
+ }
+
+ // Try to change to the directory.
+ cwd := options.Spec.Process.Cwd
+ if !filepath.IsAbs(cwd) {
+ cwd = "/" + cwd
+ }
+ cwd = filepath.Clean(cwd)
+ if err := unix.Chdir("/"); err != nil {
+ fmt.Fprintf(os.Stderr, "error chdir()ing into new root directory %q: %v\n", options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ if err := unix.Chdir(cwd); err != nil {
+ fmt.Fprintf(os.Stderr, "error chdir()ing into directory %q under root %q: %v\n", cwd, options.Spec.Root.Path, err)
+ os.Exit(1)
+ }
+ logrus.Debugf("changed working directory to %q", cwd)
+
+ // Drop privileges.
+ user := options.Spec.Process.User
+ if len(user.AdditionalGids) > 0 {
+ gids := make([]int, len(user.AdditionalGids))
+ for i := range user.AdditionalGids {
+ gids[i] = int(user.AdditionalGids[i])
+ }
+ logrus.Debugf("setting supplemental groups")
+ if err = syscall.Setgroups(gids); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err)
+ os.Exit(1)
+ }
+ } else {
+ setgroups, _ := os.ReadFile("/proc/self/setgroups")
+ if strings.Trim(string(setgroups), "\n") != "deny" {
+ logrus.Debugf("clearing supplemental groups")
+ if err = syscall.Setgroups([]int{}); err != nil {
+ fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err)
+ os.Exit(1)
+ }
+ }
+ }
+
+ logrus.Debugf("setting gid")
+ if err = unix.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting GID: %v", err)
+ os.Exit(1)
+ }
+
+ if err = setSeccomp(options.Spec); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting seccomp filter for process: %v\n", err)
+ os.Exit(1)
+ }
+
+ logrus.Debugf("setting capabilities")
+ var keepCaps []string
+ if user.UID != 0 {
+ keepCaps = []string{"CAP_SETUID"}
+ }
+ if err := setCapabilities(options.Spec, keepCaps...); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting capabilities for process: %v\n", err)
+ os.Exit(1)
+ }
+
+ logrus.Debugf("setting uid")
+ if err = unix.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil {
+ fmt.Fprintf(os.Stderr, "error setting UID: %v", err)
+ os.Exit(1)
+ }
+
+ // Actually run the specified command.
+ cmd := exec.Command(args[0], args[1:]...)
+ setPdeathsig(cmd)
+ cmd.Env = options.Spec.Process.Env
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+ cmd.Dir = cwd
+ logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH"))
+ interrupted := make(chan os.Signal, 100)
+ if err = cmd.Start(); err != nil {
+ fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err)
+ }
+ go func() {
+ for range interrupted {
+ if err := cmd.Process.Signal(syscall.SIGKILL); err != nil {
+ logrus.Infof("%v while attempting to send SIGKILL to child process", err)
+ }
+ }
+ }()
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ err = cmd.Wait()
+ signal.Stop(interrupted)
+ close(interrupted)
+ if err != nil {
+ if exitError, ok := err.(*exec.ExitError); ok {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ if waitStatus.ExitStatus() != 0 {
+ fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus())
+ }
+ os.Exit(waitStatus.ExitStatus())
+ } else if waitStatus.Signaled() {
+ fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal())
+ os.Exit(1)
+ }
+ }
+ }
+ fmt.Fprintf(os.Stderr, "process exited with error: %v", err)
+ os.Exit(1)
+ }
+}
+
+// parses the resource limits for ourselves and any processes that
+// we'll start into a format that's more in line with the kernel APIs
+func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) {
+ if spec.Process == nil {
+ return nil, nil
+ }
+ parsed := make(map[int]unix.Rlimit)
+ for _, limit := range spec.Process.Rlimits {
+ resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)]
+ if !recognized {
+ return nil, fmt.Errorf("parsing limit type %q", limit.Type)
+ }
+ parsed[resource] = makeRlimit(limit)
+ }
+ return parsed, nil
+}
+
+// setRlimits sets any resource limits that we want to apply to processes that
+// we'll start.
+func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
+ limits, err := parseRlimits(spec)
+ if err != nil {
+ return err
+ }
+ for resource, desired := range limits {
+ var current unix.Rlimit
+ if err := unix.Getrlimit(resource, &current); err != nil {
+ return fmt.Errorf("reading %q limit: %w", rlimitsReverseMap[resource], err)
+ }
+ if desired.Max > current.Max && onlyLower {
+ // this would raise a hard limit, and we're only here to lower them
+ continue
+ }
+ if desired.Max < current.Max && onlyRaise {
+ // this would lower a hard limit, and we're only here to raise them
+ continue
+ }
+ if err := unix.Setrlimit(resource, &desired); err != nil {
+ return fmt.Errorf("setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err)
+ }
+ }
+ return nil
+}
+
+func isDevNull(dev os.FileInfo) bool {
+ if dev.Mode()&os.ModeCharDevice != 0 {
+ stat, _ := dev.Sys().(*syscall.Stat_t)
+ nullStat := syscall.Stat_t{}
+ if err := syscall.Stat(os.DevNull, &nullStat); err != nil {
+ logrus.Warnf("unable to stat /dev/null: %v", err)
+ return false
+ }
+ if stat.Rdev == nullStat.Rdev {
+ return true
+ }
+ }
+ return false
+}
diff --git a/chroot/run_freebsd.go b/chroot/run_freebsd.go
new file mode 100644
index 0000000..52763ee
--- /dev/null
+++ b/chroot/run_freebsd.go
@@ -0,0 +1,269 @@
+//go:build freebsd
+// +build freebsd
+
+package chroot
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containers/buildah/pkg/jail"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ rlimitsMap = map[string]int{
+ "RLIMIT_AS": unix.RLIMIT_AS,
+ "RLIMIT_CORE": unix.RLIMIT_CORE,
+ "RLIMIT_CPU": unix.RLIMIT_CPU,
+ "RLIMIT_DATA": unix.RLIMIT_DATA,
+ "RLIMIT_FSIZE": unix.RLIMIT_FSIZE,
+ "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK,
+ "RLIMIT_NOFILE": unix.RLIMIT_NOFILE,
+ "RLIMIT_NPROC": unix.RLIMIT_NPROC,
+ "RLIMIT_RSS": unix.RLIMIT_RSS,
+ "RLIMIT_STACK": unix.RLIMIT_STACK,
+ }
+ rlimitsReverseMap = map[int]string{}
+)
+
+type runUsingChrootSubprocOptions struct {
+ Spec *specs.Spec
+ BundlePath string
+}
+
+func setPlatformUnshareOptions(spec *specs.Spec, cmd *unshare.Cmd) error {
+ return nil
+}
+
+func setContainerHostname(name string) {
+ // On FreeBSD, we have to set this later when we create the
+ // jail below in createPlatformContainer
+}
+
+func setSelinuxLabel(spec *specs.Spec) error {
+ // Ignore this on FreeBSD
+ return nil
+}
+
+func setApparmorProfile(spec *specs.Spec) error {
+ // FreeBSD doesn't have apparmor`
+ return nil
+}
+
+func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
+ // FreeBSD capabilities are nothing like Linux
+ return nil
+}
+
+func makeRlimit(limit specs.POSIXRlimit) unix.Rlimit {
+ return unix.Rlimit{Cur: int64(limit.Soft), Max: int64(limit.Hard)}
+}
+
+func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
+ path := options.Spec.Root.Path
+ jconf := jail.NewConfig()
+ jconf.Set("name", filepath.Base(path)+"-chroot")
+ jconf.Set("host.hostname", options.Spec.Hostname)
+ jconf.Set("persist", false)
+ jconf.Set("path", path)
+ jconf.Set("ip4", jail.INHERIT)
+ jconf.Set("ip6", jail.INHERIT)
+ jconf.Set("allow.raw_sockets", true)
+ jconf.Set("enforce_statfs", 1)
+ _, err := jail.CreateAndAttach(jconf)
+ if err != nil {
+ return fmt.Errorf("creating jail: %w", err)
+ }
+ return nil
+}
+
+// logNamespaceDiagnostics knows which namespaces we want to create.
+// Output debug messages when that differs from what we're being asked to do.
+func logNamespaceDiagnostics(spec *specs.Spec) {
+ // Nothing here for FreeBSD
+}
+
+func makeReadOnly(mntpoint string, flags uintptr) error {
+ var fs unix.Statfs_t
+ // Make sure it's read-only.
+ if err := unix.Statfs(mntpoint, &fs); err != nil {
+ return fmt.Errorf("checking if directory %q was bound read-only: %w", mntpoint, err)
+ }
+ return nil
+}
+
+func saveDir(spec *specs.Spec, path string) string {
+ id := filepath.Base(spec.Root.Path)
+ return filepath.Join(filepath.Dir(path), ".save-"+id)
+}
+
+func copyFile(source, dest string) error {
+ in, err := os.Open(source)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dest)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return err
+ }
+ return out.Close()
+}
+
+type rename struct {
+ from, to string
+}
+
+// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a
+// callback that will clean up its work.
+func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) {
+ renames := []rename{}
+ unmounts := []string{}
+ removes := []string{}
+ undoBinds = func() error {
+ for _, r := range renames {
+ if err2 := os.Rename(r.to, r.from); err2 != nil {
+ logrus.Warnf("pkg/chroot: error renaming %q to %q: %v", r.to, r.from, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ for _, path := range unmounts {
+ if err2 := mount.Unmount(path); err2 != nil {
+ logrus.Warnf("pkg/chroot: error unmounting %q: %v", spec.Root.Path, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ for _, path := range removes {
+ if err2 := os.Remove(path); err2 != nil {
+ logrus.Warnf("pkg/chroot: error removing %q: %v", path, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ return err
+ }
+
+ // Now mount all of those things to be under the rootfs's location in this
+ // mount namespace.
+ for _, m := range spec.Mounts {
+ // If the target is there, we can just mount it.
+ var srcinfo os.FileInfo
+ switch m.Type {
+ case "nullfs":
+ srcinfo, err = os.Stat(m.Source)
+ if err != nil {
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", m.Source, err)
+ }
+ }
+ target := filepath.Join(spec.Root.Path, m.Destination)
+ if _, err := os.Stat(target); err != nil {
+ // If the target can't be stat()ted, check the error.
+ if !os.IsNotExist(err) {
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
+ }
+ // The target isn't there yet, so create it, and make a
+ // note to remove it later.
+ // XXX: This was copied from the linux version which supports bind mounting files.
+ // Leaving it here since I plan to add this to FreeBSD's nullfs.
+ if m.Type != "nullfs" || srcinfo.IsDir() {
+ if err = os.MkdirAll(target, 0111); err != nil {
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
+ }
+ removes = append(removes, target)
+ } else {
+ if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil {
+ return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
+ }
+ // Don't do this until we can support file mounts in nullfs
+ /*var file *os.File
+ if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0); err != nil {
+ return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target)
+ }
+ file.Close()
+ removes = append(removes, target)*/
+ }
+ }
+ logrus.Debugf("mount: %v", m)
+ switch m.Type {
+ case "nullfs":
+ // Do the bind mount.
+ if !srcinfo.IsDir() {
+ logrus.Debugf("emulating file mount %q on %q", m.Source, target)
+ _, err := os.Stat(target)
+ if err == nil {
+ save := saveDir(spec, target)
+ if _, err := os.Stat(save); err != nil {
+ if os.IsNotExist(err) {
+ err = os.MkdirAll(save, 0111)
+ }
+ if err != nil {
+ return undoBinds, fmt.Errorf("creating file mount save directory %q: %w", save, err)
+ }
+ removes = append(removes, save)
+ }
+ savePath := filepath.Join(save, filepath.Base(target))
+ if _, err := os.Stat(target); err == nil {
+ logrus.Debugf("moving %q to %q", target, savePath)
+ if err := os.Rename(target, savePath); err != nil {
+ return undoBinds, fmt.Errorf("moving %q to %q: %w", target, savePath, err)
+ }
+ renames = append(renames, rename{
+ from: target,
+ to: savePath,
+ })
+ }
+ } else {
+ removes = append(removes, target)
+ }
+ if err := copyFile(m.Source, target); err != nil {
+ return undoBinds, fmt.Errorf("copying %q to %q: %w", m.Source, target, err)
+ }
+ } else {
+ logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
+ if err := mount.Mount(m.Source, target, "nullfs", strings.Join(m.Options, ",")); err != nil {
+ return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
+ }
+ logrus.Debugf("bind mounted %q to %q", m.Source, target)
+ unmounts = append(unmounts, target)
+ }
+ case "devfs", "fdescfs", "tmpfs":
+ // Mount /dev, /dev/fd.
+ if err := mount.Mount(m.Source, target, m.Type, strings.Join(m.Options, ",")); err != nil {
+ return undoBinds, fmt.Errorf("mounting %q to %q in mount namespace (%q, %q): %w", m.Type, m.Destination, target, strings.Join(m.Options, ","), err)
+ }
+ logrus.Debugf("mounted a %q to %q", m.Type, target)
+ unmounts = append(unmounts, target)
+ }
+ }
+ return undoBinds, nil
+}
+
+// setPdeathsig sets a parent-death signal for the process
+func setPdeathsig(cmd *exec.Cmd) {
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+}
diff --git a/chroot/run_linux.go b/chroot/run_linux.go
new file mode 100644
index 0000000..dae4b71
--- /dev/null
+++ b/chroot/run_linux.go
@@ -0,0 +1,711 @@
+//go:build linux
+// +build linux
+
+package chroot
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/copier"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ rlimitsMap = map[string]int{
+ "RLIMIT_AS": unix.RLIMIT_AS,
+ "RLIMIT_CORE": unix.RLIMIT_CORE,
+ "RLIMIT_CPU": unix.RLIMIT_CPU,
+ "RLIMIT_DATA": unix.RLIMIT_DATA,
+ "RLIMIT_FSIZE": unix.RLIMIT_FSIZE,
+ "RLIMIT_LOCKS": unix.RLIMIT_LOCKS,
+ "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK,
+ "RLIMIT_MSGQUEUE": unix.RLIMIT_MSGQUEUE,
+ "RLIMIT_NICE": unix.RLIMIT_NICE,
+ "RLIMIT_NOFILE": unix.RLIMIT_NOFILE,
+ "RLIMIT_NPROC": unix.RLIMIT_NPROC,
+ "RLIMIT_RSS": unix.RLIMIT_RSS,
+ "RLIMIT_RTPRIO": unix.RLIMIT_RTPRIO,
+ "RLIMIT_RTTIME": unix.RLIMIT_RTTIME,
+ "RLIMIT_SIGPENDING": unix.RLIMIT_SIGPENDING,
+ "RLIMIT_STACK": unix.RLIMIT_STACK,
+ }
+ rlimitsReverseMap = map[int]string{}
+)
+
+type runUsingChrootSubprocOptions struct {
+ Spec *specs.Spec
+ BundlePath string
+ UIDMappings []syscall.SysProcIDMap
+ GIDMappings []syscall.SysProcIDMap
+}
+
+func setPlatformUnshareOptions(spec *specs.Spec, cmd *unshare.Cmd) error {
+ // If we have configured ID mappings, set them here so that they can apply to the child.
+ hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
+ if err != nil {
+ return err
+ }
+ uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings
+ if len(uidmap) == 0 {
+ // No UID mappings are configured for the container. Borrow our parent's mappings.
+ uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...)
+ for i := range uidmap {
+ uidmap[i].HostID = uidmap[i].ContainerID
+ }
+ }
+ if len(gidmap) == 0 {
+ // No GID mappings are configured for the container. Borrow our parent's mappings.
+ gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...)
+ for i := range gidmap {
+ gidmap[i].HostID = gidmap[i].ContainerID
+ }
+ }
+
+ cmd.UnshareFlags = syscall.CLONE_NEWUTS | syscall.CLONE_NEWNS
+ requestedUserNS := false
+ for _, ns := range spec.Linux.Namespaces {
+ if ns.Type == specs.UserNamespace {
+ requestedUserNS = true
+ }
+ }
+ if len(spec.Linux.UIDMappings) > 0 || len(spec.Linux.GIDMappings) > 0 || requestedUserNS {
+ cmd.UnshareFlags = cmd.UnshareFlags | syscall.CLONE_NEWUSER
+ cmd.UidMappings = uidmap
+ cmd.GidMappings = gidmap
+ cmd.GidMappingsEnableSetgroups = true
+ }
+ cmd.OOMScoreAdj = spec.Process.OOMScoreAdj
+ return nil
+}
+
+func setContainerHostname(name string) {
+ if err := unix.Sethostname([]byte(name)); err != nil {
+ logrus.Debugf("failed to set hostname %q for process: %v", name, err)
+ }
+}
+
+// logNamespaceDiagnostics knows which namespaces we want to create.
+// Output debug messages when that differs from what we're being asked to do.
+func logNamespaceDiagnostics(spec *specs.Spec) {
+ sawMountNS := false
+ sawUTSNS := false
+ for _, ns := range spec.Linux.Namespaces {
+ switch ns.Type {
+ case specs.CgroupNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join cgroup namespace, sorry about that")
+ } else {
+ logrus.Debugf("unable to create cgroup namespace, sorry about that")
+ }
+ case specs.IPCNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join IPC namespace, sorry about that")
+ } else {
+ logrus.Debugf("unable to create IPC namespace, sorry about that")
+ }
+ case specs.MountNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join mount namespace %q, creating a new one", ns.Path)
+ }
+ sawMountNS = true
+ case specs.NetworkNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join network namespace, sorry about that")
+ } else {
+ logrus.Debugf("unable to create network namespace, sorry about that")
+ }
+ case specs.PIDNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join PID namespace, sorry about that")
+ } else {
+ logrus.Debugf("unable to create PID namespace, sorry about that")
+ }
+ case specs.UserNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join user namespace, sorry about that")
+ }
+ case specs.UTSNamespace:
+ if ns.Path != "" {
+ logrus.Debugf("unable to join UTS namespace %q, creating a new one", ns.Path)
+ }
+ sawUTSNS = true
+ }
+ }
+ if !sawMountNS {
+ logrus.Debugf("mount namespace not requested, but creating a new one anyway")
+ }
+ if !sawUTSNS {
+ logrus.Debugf("UTS namespace not requested, but creating a new one anyway")
+ }
+}
+
+// setApparmorProfile sets the apparmor profile for ourselves, and hopefully any child processes that we'll start.
+func setApparmorProfile(spec *specs.Spec) error {
+ if !apparmor.IsEnabled() || spec.Process.ApparmorProfile == "" {
+ return nil
+ }
+ if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil {
+ return fmt.Errorf("setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err)
+ }
+ return nil
+}
+
+// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start.
+func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
+ currentCaps, err := capability.NewPid2(0)
+ if err != nil {
+ return fmt.Errorf("reading capabilities of current process: %w", err)
+ }
+ if err := currentCaps.Load(); err != nil {
+ return fmt.Errorf("loading capabilities: %w", err)
+ }
+ caps, err := capability.NewPid2(0)
+ if err != nil {
+ return fmt.Errorf("reading capabilities of current process: %w", err)
+ }
+ capMap := map[capability.CapType][]string{
+ capability.BOUNDING: spec.Process.Capabilities.Bounding,
+ capability.EFFECTIVE: spec.Process.Capabilities.Effective,
+ capability.INHERITABLE: []string{},
+ capability.PERMITTED: spec.Process.Capabilities.Permitted,
+ capability.AMBIENT: spec.Process.Capabilities.Ambient,
+ }
+ knownCaps := capability.List()
+ noCap := capability.Cap(-1)
+ for capType, capList := range capMap {
+ for _, capToSet := range capList {
+ cap := noCap
+ for _, c := range knownCaps {
+ if strings.EqualFold("CAP_"+c.String(), capToSet) {
+ cap = c
+ break
+ }
+ }
+ if cap == noCap {
+ return fmt.Errorf("mapping capability %q to a number", capToSet)
+ }
+ caps.Set(capType, cap)
+ }
+ for _, capToSet := range keepCaps {
+ cap := noCap
+ for _, c := range knownCaps {
+ if strings.EqualFold("CAP_"+c.String(), capToSet) {
+ cap = c
+ break
+ }
+ }
+ if cap == noCap {
+ return fmt.Errorf("mapping capability %q to a number", capToSet)
+ }
+ if currentCaps.Get(capType, cap) {
+ caps.Set(capType, cap)
+ }
+ }
+ }
+ if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil {
+ return fmt.Errorf("setting capabilities: %w", err)
+ }
+ return nil
+}
+
+func makeRlimit(limit specs.POSIXRlimit) unix.Rlimit {
+ return unix.Rlimit{Cur: limit.Soft, Max: limit.Hard}
+}
+
+func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
+ return errors.New("unsupported createPlatformContainer")
+}
+
+func mountFlagsForFSFlags(fsFlags uintptr) uintptr {
+ var mountFlags uintptr
+ for _, mapping := range []struct {
+ fsFlag uintptr
+ mountFlag uintptr
+ }{
+ {unix.ST_MANDLOCK, unix.MS_MANDLOCK},
+ {unix.ST_NOATIME, unix.MS_NOATIME},
+ {unix.ST_NODEV, unix.MS_NODEV},
+ {unix.ST_NODIRATIME, unix.MS_NODIRATIME},
+ {unix.ST_NOEXEC, unix.MS_NOEXEC},
+ {unix.ST_NOSUID, unix.MS_NOSUID},
+ {unix.ST_RDONLY, unix.MS_RDONLY},
+ {unix.ST_RELATIME, unix.MS_RELATIME},
+ {unix.ST_SYNCHRONOUS, unix.MS_SYNCHRONOUS},
+ } {
+ if fsFlags&mapping.fsFlag == mapping.fsFlag {
+ mountFlags |= mapping.mountFlag
+ }
+ }
+ return mountFlags
+}
+
+func makeReadOnly(mntpoint string, flags uintptr) error {
+ var fs unix.Statfs_t
+ // Make sure it's read-only.
+ if err := unix.Statfs(mntpoint, &fs); err != nil {
+ return fmt.Errorf("checking if directory %q was bound read-only: %w", mntpoint, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ // All callers currently pass MS_RDONLY in "flags", but in case they stop doing
+ // that at some point in the future...
+ if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_RDONLY|unix.MS_REMOUNT|unix.MS_BIND, ""); err != nil {
+ return fmt.Errorf("remounting %s in mount namespace read-only: %w", mntpoint, err)
+ }
+ }
+ return nil
+}
+
+// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a
+// callback that will clean up its work.
+func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) {
+ var fs unix.Statfs_t
+ undoBinds = func() error {
+ if err2 := unix.Unmount(spec.Root.Path, unix.MNT_DETACH); err2 != nil {
+ retries := 0
+ for (err2 == unix.EBUSY || err2 == unix.EAGAIN) && retries < 50 {
+ time.Sleep(50 * time.Millisecond)
+ err2 = unix.Unmount(spec.Root.Path, unix.MNT_DETACH)
+ retries++
+ }
+ if err2 != nil {
+ logrus.Warnf("pkg/chroot: error unmounting %q (retried %d times): %v", spec.Root.Path, retries, err2)
+ if err == nil {
+ err = err2
+ }
+ }
+ }
+ return err
+ }
+
+ // Now bind mount all of those things to be under the rootfs's location in this
+ // mount namespace.
+ commonFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE)
+ bindFlags := commonFlags
+ devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY
+ procFlags := devFlags | unix.MS_NODEV
+ sysFlags := devFlags | unix.MS_NODEV
+
+ // Bind /dev read-only.
+ subDev := filepath.Join(spec.Root.Path, "/dev")
+ if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = os.Mkdir(subDev, 0755)
+ if err == nil {
+ err = unix.Mount("/dev", subDev, "bind", devFlags, "")
+ }
+ }
+ if err != nil {
+ return undoBinds, fmt.Errorf("bind mounting /dev from host into mount namespace: %w", err)
+ }
+ }
+ // Make sure it's read-only.
+ if err = unix.Statfs(subDev, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", subDev, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT|unix.MS_BIND, ""); err != nil {
+ return undoBinds, fmt.Errorf("remounting /dev in mount namespace read-only: %w", err)
+ }
+ }
+ logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev"))
+
+ // Bind /proc read-only.
+ subProc := filepath.Join(spec.Root.Path, "/proc")
+ if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = os.Mkdir(subProc, 0755)
+ if err == nil {
+ err = unix.Mount("/proc", subProc, "bind", procFlags, "")
+ }
+ }
+ if err != nil {
+ return undoBinds, fmt.Errorf("bind mounting /proc from host into mount namespace: %w", err)
+ }
+ }
+ logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc"))
+
+ // Bind /sys read-only.
+ subSys := filepath.Join(spec.Root.Path, "/sys")
+ if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ err = os.Mkdir(subSys, 0755)
+ if err == nil {
+ err = unix.Mount("/sys", subSys, "bind", sysFlags, "")
+ }
+ }
+ if err != nil {
+ return undoBinds, fmt.Errorf("bind mounting /sys from host into mount namespace: %w", err)
+ }
+ }
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
+ }
+
+ mnts, _ := mount.GetMounts()
+ for _, m := range mnts {
+ if !strings.HasPrefix(m.Mountpoint, "/sys/") &&
+ m.Mountpoint != "/sys" {
+ continue
+ }
+ subSys := filepath.Join(spec.Root.Path, m.Mountpoint)
+ if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
+ msg := fmt.Sprintf("could not bind mount %q, skipping: %v", m.Mountpoint, err)
+ if strings.HasPrefix(m.Mountpoint, "/sys") {
+ logrus.Infof(msg)
+ } else {
+ logrus.Warningf(msg)
+ }
+ continue
+ }
+ if err := makeReadOnly(subSys, sysFlags); err != nil {
+ return undoBinds, err
+ }
+ }
+ logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys"))
+
+ // Bind, overlay, or tmpfs mount everything we've been asked to mount.
+ for _, m := range spec.Mounts {
+ // Skip anything that we just mounted.
+ switch m.Destination {
+ case "/dev", "/proc", "/sys":
+ logrus.Debugf("already bind mounted %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
+ continue
+ default:
+ if strings.HasPrefix(m.Destination, "/dev/") {
+ continue
+ }
+ if strings.HasPrefix(m.Destination, "/proc/") {
+ continue
+ }
+ if strings.HasPrefix(m.Destination, "/sys/") {
+ continue
+ }
+ }
+ // Skip anything that isn't a bind or overlay or tmpfs mount.
+ if m.Type != "bind" && m.Type != "tmpfs" && m.Type != "overlay" {
+ logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination)
+ continue
+ }
+ // If the target is already there, we can just mount over it.
+ var srcinfo os.FileInfo
+ switch m.Type {
+ case "bind":
+ srcinfo, err = os.Stat(m.Source)
+ if err != nil {
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", m.Source, err)
+ }
+ case "overlay", "tmpfs":
+ srcinfo, err = os.Stat("/")
+ if err != nil {
+ return undoBinds, fmt.Errorf("examining / to use as a template for a %s mount: %w", m.Type, err)
+ }
+ }
+ target := filepath.Join(spec.Root.Path, m.Destination)
+ // Check if target is a symlink.
+ stat, err := os.Lstat(target)
+ // If target is a symlink, follow the link and ensure the destination exists.
+ if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) {
+ target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("evaluating symlink %q: %w", target, err)
+ }
+ // Stat the destination of the evaluated symlink.
+ _, err = os.Stat(target)
+ }
+ if err != nil {
+ // If the target can't be stat()ted, check the error.
+ if !errors.Is(err, os.ErrNotExist) {
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
+ }
+ // The target isn't there yet, so create it. If the source is a directory,
+ // we need a directory, otherwise we need a non-directory (i.e., a file).
+ if srcinfo.IsDir() {
+ if err = os.MkdirAll(target, 0755); err != nil {
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
+ }
+ } else {
+ if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
+ return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
+ }
+ var file *os.File
+ if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
+ }
+ file.Close()
+ }
+ }
+ // Sort out which flags we're asking for, and what statfs() should be telling us
+ // if we successfully mounted with them.
+ requestFlags := uintptr(0)
+ expectedImportantFlags := uintptr(0)
+ importantFlags := uintptr(0)
+ possibleImportantFlags := uintptr(unix.ST_NODEV | unix.ST_NOEXEC | unix.ST_NOSUID | unix.ST_RDONLY)
+ for _, option := range m.Options {
+ switch option {
+ case "nodev":
+ requestFlags |= unix.MS_NODEV
+ importantFlags |= unix.ST_NODEV
+ expectedImportantFlags |= unix.ST_NODEV
+ case "dev":
+ requestFlags &= ^uintptr(unix.MS_NODEV)
+ importantFlags |= unix.ST_NODEV
+ expectedImportantFlags &= ^uintptr(unix.ST_NODEV)
+ case "noexec":
+ requestFlags |= unix.MS_NOEXEC
+ importantFlags |= unix.ST_NOEXEC
+ expectedImportantFlags |= unix.ST_NOEXEC
+ case "exec":
+ requestFlags &= ^uintptr(unix.MS_NOEXEC)
+ importantFlags |= unix.ST_NOEXEC
+ expectedImportantFlags &= ^uintptr(unix.ST_NOEXEC)
+ case "nosuid":
+ requestFlags |= unix.MS_NOSUID
+ importantFlags |= unix.ST_NOSUID
+ expectedImportantFlags |= unix.ST_NOSUID
+ case "suid":
+ requestFlags &= ^uintptr(unix.MS_NOSUID)
+ importantFlags |= unix.ST_NOSUID
+ expectedImportantFlags &= ^uintptr(unix.ST_NOSUID)
+ case "ro":
+ requestFlags |= unix.MS_RDONLY
+ importantFlags |= unix.ST_RDONLY
+ expectedImportantFlags |= unix.ST_RDONLY
+ case "rw":
+ requestFlags &= ^uintptr(unix.MS_RDONLY)
+ importantFlags |= unix.ST_RDONLY
+ expectedImportantFlags &= ^uintptr(unix.ST_RDONLY)
+ }
+ }
+ switch m.Type {
+ case "bind":
+ // Do the initial bind mount. We'll worry about the flags in a bit.
+ logrus.Debugf("bind mounting %q on %q %v", m.Destination, filepath.Join(spec.Root.Path, m.Destination), m.Options)
+ if err = unix.Mount(m.Source, target, "", bindFlags|requestFlags, ""); err != nil {
+ return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
+ }
+ logrus.Debugf("bind mounted %q to %q", m.Source, target)
+ case "tmpfs":
+ // Mount a tmpfs. We'll worry about the flags in a bit.
+ if err = mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
+ return undoBinds, fmt.Errorf("mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(append(m.Options, "private"), ","), err)
+ }
+ logrus.Debugf("mounted a tmpfs to %q", target)
+ case "overlay":
+ // Mount an overlay. We'll worry about the flags in a bit.
+ if err = mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
+ return undoBinds, fmt.Errorf("mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(append(m.Options, "private"), ","), err)
+ }
+ logrus.Debugf("mounted a overlay to %q", target)
+ }
+ // Time to worry about the flags.
+ if err = unix.Statfs(target, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if volume %q was mounted with requested flags: %w", target, err)
+ }
+ effectiveImportantFlags := uintptr(fs.Flags) & importantFlags
+ if effectiveImportantFlags != expectedImportantFlags {
+ // Do a remount to try to get the desired flags to stick.
+ effectiveUnimportantFlags := uintptr(fs.Flags) & ^possibleImportantFlags
+ if err = unix.Mount(target, target, m.Type, unix.MS_REMOUNT|bindFlags|requestFlags|mountFlagsForFSFlags(effectiveUnimportantFlags), ""); err != nil {
+ return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
+ }
+ // Check if the desired flags stuck.
+ if err = unix.Statfs(target, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
+ }
+ newEffectiveImportantFlags := uintptr(fs.Flags) & importantFlags
+ if newEffectiveImportantFlags != expectedImportantFlags {
+ return undoBinds, fmt.Errorf("unable to remount %q with requested flags %#x instead of %#x, just got %#x back", target, requestFlags, effectiveImportantFlags, newEffectiveImportantFlags)
+ }
+ }
+ }
+
+ // Set up any read-only paths that we need to. If we're running inside
+ // of a container, some of these locations will already be read-only, in
+ // which case can declare victory and move on.
+ for _, roPath := range spec.Linux.ReadonlyPaths {
+ r := filepath.Join(spec.Root.Path, roPath)
+ target, err := filepath.EvalSymlinks(r)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // No target, no problem.
+ continue
+ }
+ return undoBinds, fmt.Errorf("checking %q for symlinks before marking it read-only: %w", r, err)
+ }
+ // Check if the location is already read-only.
+ var fs unix.Statfs_t
+ if err = unix.Statfs(target, &fs); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // No target, no problem.
+ continue
+ }
+ return undoBinds, fmt.Errorf("checking if directory %q is already read-only: %w", target, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == unix.ST_RDONLY {
+ continue
+ }
+ // Mount the location over itself, so that we can remount it as read-only, making
+ // sure to preserve any combination of nodev/noexec/nosuid that's already in play.
+ roFlags := mountFlagsForFSFlags(uintptr(fs.Flags)) | unix.MS_RDONLY
+ if err := unix.Mount(target, target, "", bindFlags|roFlags, ""); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // No target, no problem.
+ continue
+ }
+ return undoBinds, fmt.Errorf("bind mounting %q onto itself in preparation for making it read-only: %w", target, err)
+ }
+ // Remount the location read-only.
+ if err = unix.Statfs(target, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err := unix.Mount(target, target, "", unix.MS_REMOUNT|unix.MS_RDONLY|bindFlags|mountFlagsForFSFlags(uintptr(fs.Flags)), ""); err != nil {
+ return undoBinds, fmt.Errorf("remounting %q in mount namespace read-only: %w", target, err)
+ }
+ }
+ // Check again.
+ if err = unix.Statfs(target, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if directory %q was remounted read-only: %w", target, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ // Still not read only.
+ return undoBinds, fmt.Errorf("verifying that %q in mount namespace was remounted read-only: %w", target, err)
+ }
+ }
+
+ // Create an empty directory for to use for masking directories.
+ roEmptyDir := filepath.Join(bundlePath, "empty")
+ if len(spec.Linux.MaskedPaths) > 0 {
+ if err := os.Mkdir(roEmptyDir, 0700); err != nil {
+ return undoBinds, fmt.Errorf("creating empty directory %q: %w", roEmptyDir, err)
+ }
+ }
+
+ // Set up any masked paths that we need to. If we're running inside of
+ // a container, some of these locations will already be read-only tmpfs
+ // filesystems or bind mounted to os.DevNull. If we're not running
+ // inside of a container, and nobody else has done that, we'll do it.
+ for _, masked := range spec.Linux.MaskedPaths {
+ t := filepath.Join(spec.Root.Path, masked)
+ target, err := filepath.EvalSymlinks(t)
+ if err != nil {
+ target = t
+ }
+ // Get some info about the target.
+ targetinfo, err := os.Stat(target)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // No target, no problem.
+ continue
+ }
+ return undoBinds, fmt.Errorf("examining %q for masking in mount namespace: %w", target, err)
+ }
+ if targetinfo.IsDir() {
+ // The target's a directory. Check if it's a read-only filesystem.
+ var statfs unix.Statfs_t
+ if err = unix.Statfs(target, &statfs); err != nil {
+ return undoBinds, fmt.Errorf("checking if directory %q is a mountpoint: %w", target, err)
+ }
+ isReadOnly := statfs.Flags&unix.ST_RDONLY == unix.ST_RDONLY
+ // Check if any of the IDs we're mapping could read it.
+ var stat unix.Stat_t
+ if err = unix.Stat(target, &stat); err != nil {
+ return undoBinds, fmt.Errorf("checking permissions on directory %q: %w", target, err)
+ }
+ isAccessible := false
+ if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 {
+ isAccessible = true
+ }
+ if !isAccessible && stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 {
+ if len(spec.Linux.GIDMappings) > 0 {
+ for _, mapping := range spec.Linux.GIDMappings {
+ if stat.Gid >= mapping.ContainerID && stat.Gid < mapping.ContainerID+mapping.Size {
+ isAccessible = true
+ break
+ }
+ }
+ }
+ }
+ if !isAccessible && stat.Mode&unix.S_IRUSR|unix.S_IXUSR != 0 {
+ if len(spec.Linux.UIDMappings) > 0 {
+ for _, mapping := range spec.Linux.UIDMappings {
+ if stat.Uid >= mapping.ContainerID && stat.Uid < mapping.ContainerID+mapping.Size {
+ isAccessible = true
+ break
+ }
+ }
+ }
+ }
+ // Check if it's empty.
+ hasContent := false
+ directory, err := os.Open(target)
+ if err != nil {
+ if !os.IsPermission(err) {
+ return undoBinds, fmt.Errorf("opening directory %q: %w", target, err)
+ }
+ } else {
+ names, err := directory.Readdirnames(0)
+ directory.Close()
+ if err != nil {
+ return undoBinds, fmt.Errorf("reading contents of directory %q: %w", target, err)
+ }
+ hasContent = false
+ for _, name := range names {
+ switch name {
+ case ".", "..":
+ continue
+ default:
+ hasContent = true
+ }
+ if hasContent {
+ break
+ }
+ }
+ }
+ // The target's a directory, so read-only bind mount an empty directory on it.
+ roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY)
+ if !isReadOnly || (hasContent && isAccessible) {
+ if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil {
+ return undoBinds, fmt.Errorf("masking directory %q in mount namespace: %w", target, err)
+ }
+ if err = unix.Statfs(target, &fs); err != nil {
+ return undoBinds, fmt.Errorf("checking if masked directory %q was mounted read-only in mount namespace: %w", target, err)
+ }
+ if fs.Flags&unix.ST_RDONLY == 0 {
+ if err = unix.Mount(target, target, "", syscall.MS_REMOUNT|roFlags|mountFlagsForFSFlags(uintptr(fs.Flags)), ""); err != nil {
+ return undoBinds, fmt.Errorf("making sure masked directory %q in mount namespace is read only: %w", target, err)
+ }
+ }
+ }
+ } else {
+ // If the target's is not a directory or os.DevNull, bind mount os.DevNull over it.
+ if !isDevNull(targetinfo) {
+ if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil {
+ return undoBinds, fmt.Errorf("masking non-directory %q in mount namespace: %w", target, err)
+ }
+ }
+ }
+ }
+ return undoBinds, nil
+}
+
+// setPdeathsig sets a parent-death signal for the process
+func setPdeathsig(cmd *exec.Cmd) {
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+}
diff --git a/chroot/run_test.go b/chroot/run_test.go
new file mode 100644
index 0000000..bb883e5
--- /dev/null
+++ b/chroot/run_test.go
@@ -0,0 +1,610 @@
+//go:build linux
+// +build linux
+
+package chroot
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/containers/buildah/tests/testreport/types"
+ "github.com/containers/buildah/util"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/reexec"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ reportCommand = "testreport"
+)
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundleDir string), verify func(t *testing.T, report *types.TestReport)) {
+ t.Helper()
+ g, err := generate.New("linux")
+ if err != nil {
+ t.Fatalf("generate.New(%q): %v", "linux", err)
+ }
+ if err = setupSeccomp(g.Config, ""); err != nil {
+ t.Fatalf("setupSeccomp(%q): %v", "", err)
+ }
+
+ // t.TempDir returns /tmp/TestName/001.
+ // /tmp/TestName/001 has permission 0777, but /tmp/TestName is 0700
+ tempDir := t.TempDir()
+ if err = os.Chmod(filepath.Dir(tempDir), 0711); err != nil {
+ t.Fatalf("error loosening permissions on %q: %v", tempDir, err)
+ }
+
+ rootDir := filepath.Join(tempDir, "root")
+ if err := os.Mkdir(rootDir, 0711); err != nil {
+ t.Fatalf("os.Mkdir(%q): %v", rootDir, err)
+ }
+
+ rootTmpDir := filepath.Join(rootDir, "tmp")
+ if err := os.Mkdir(rootTmpDir, 01777); err != nil {
+ t.Fatalf("os.Mkdir(%q): %v", rootTmpDir, err)
+ }
+
+ specPath := filepath.Join("..", "tests", reportCommand, reportCommand)
+ specBinarySource, err := os.Open(specPath)
+ if err != nil {
+ t.Fatalf("open(%q): %v", specPath, err)
+ }
+ defer specBinarySource.Close()
+ specBinary, err := os.OpenFile(filepath.Join(rootDir, reportCommand), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0711)
+ if err != nil {
+ t.Fatalf("open(%q): %v", filepath.Join(rootDir, reportCommand), err)
+ }
+
+ if _, err := io.Copy(specBinary, specBinarySource); err != nil {
+ t.Fatalf("io.Copy error: %v", err)
+ }
+ specBinary.Close()
+
+ g.SetRootPath(rootDir)
+ g.SetProcessArgs([]string{"/" + reportCommand})
+
+ bundleDir := filepath.Join(tempDir, "bundle")
+ if err := os.Mkdir(bundleDir, 0700); err != nil {
+ t.Fatalf("os.Mkdir(%q): %v", bundleDir, err)
+ }
+
+ if modify != nil {
+ modify(&g, rootDir, bundleDir)
+ }
+
+ uid, gid, err := util.GetHostRootIDs(g.Config)
+ if err != nil {
+ t.Fatalf("GetHostRootIDs: %v", err)
+ }
+ if err := os.Chown(rootDir, int(uid), int(gid)); err != nil {
+ t.Fatalf("os.Chown(%q): %v", rootDir, err)
+ }
+
+ output := new(bytes.Buffer)
+ if err := RunUsingChroot(g.Config, bundleDir, "/", new(bytes.Buffer), output, output); err != nil {
+ t.Fatalf("run: %v: %s", err, output.String())
+ }
+
+ var report types.TestReport
+ if err := json.Unmarshal(output.Bytes(), &report); err != nil {
+ t.Fatalf("decode: %v", err)
+ }
+
+ if verify != nil {
+ verify(t, &report)
+ }
+}
+
+func TestNoop(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t, nil, nil)
+}
+
+func TestMinimalSkeleton(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ },
+ func(t *testing.T, report *types.TestReport) {
+ },
+ )
+}
+
+func TestProcessTerminal(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ for _, terminal := range []bool{false, true} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetProcessTerminal(terminal)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Process.Terminal != terminal {
+ t.Fatalf("expected terminal = %v, got %v", terminal, report.Spec.Process.Terminal)
+ }
+ },
+ )
+ }
+}
+
+func TestProcessConsoleSize(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ for _, size := range [][2]uint{{80, 25}, {132, 50}} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetProcessTerminal(true)
+ g.SetProcessConsoleSize(size[0], size[1])
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Process.ConsoleSize.Width != size[0] {
+ t.Fatalf("expected console width = %v, got %v", size[0], report.Spec.Process.ConsoleSize.Width)
+ }
+ if report.Spec.Process.ConsoleSize.Height != size[1] {
+ t.Fatalf("expected console height = %v, got %v", size[1], report.Spec.Process.ConsoleSize.Height)
+ }
+ },
+ )
+ }
+}
+
+func TestProcessUser(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ for _, id := range []uint32{0, 1000} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetProcessUID(id)
+ g.SetProcessGID(id + 1)
+ g.AddProcessAdditionalGid(id + 2)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Process.User.UID != id {
+ t.Fatalf("expected UID %v, got %v", id, report.Spec.Process.User.UID)
+ }
+ if report.Spec.Process.User.GID != id+1 {
+ t.Fatalf("expected GID %v, got %v", id+1, report.Spec.Process.User.GID)
+ }
+ },
+ )
+ }
+}
+
+func TestProcessEnv(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ e := fmt.Sprintf("PARENT_TEST_PID=%d", unix.Getpid())
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearProcessEnv()
+ g.AddProcessEnv("PARENT_TEST_PID", strconv.Itoa(unix.Getpid()))
+ },
+ func(t *testing.T, report *types.TestReport) {
+ for _, ev := range report.Spec.Process.Env {
+ if ev == e {
+ return
+ }
+ }
+ t.Fatalf("expected environment variable %q", e)
+ },
+ )
+}
+
+func TestProcessCwd(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ if err := os.Mkdir(filepath.Join(rootDir, "/no-such-directory"), 0700); err != nil {
+ t.Fatalf("mkdir(%q): %v", filepath.Join(rootDir, "/no-such-directory"), err)
+ }
+ g.SetProcessCwd("/no-such-directory")
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Process.Cwd != "/no-such-directory" {
+ t.Fatalf("expected %q, got %q", "/no-such-directory", report.Spec.Process.Cwd)
+ }
+ },
+ )
+}
+
+func TestProcessCapabilities(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearProcessCapabilities()
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if len(report.Spec.Process.Capabilities.Permitted) != 0 {
+ t.Fatalf("expected no permitted capabilities, got %#v", report.Spec.Process.Capabilities.Permitted)
+ }
+ },
+ )
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearProcessCapabilities()
+ if err := g.AddProcessCapabilityEffective("CAP_IPC_LOCK"); err != nil {
+ t.Fatalf("%v", err)
+ }
+ if err := g.AddProcessCapabilityPermitted("CAP_IPC_LOCK"); err != nil {
+ t.Fatalf("%v", err)
+ }
+ if err := g.AddProcessCapabilityInheritable("CAP_IPC_LOCK"); err != nil {
+ t.Fatalf("%v", err)
+ }
+ if err := g.AddProcessCapabilityBounding("CAP_IPC_LOCK"); err != nil {
+ t.Fatalf("%v", err)
+ }
+ if err := g.AddProcessCapabilityAmbient("CAP_IPC_LOCK"); err != nil {
+ t.Fatalf("%v", err)
+ }
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if len(report.Spec.Process.Capabilities.Permitted) != 1 {
+ t.Fatalf("expected one permitted capability, got %#v", report.Spec.Process.Capabilities.Permitted)
+ }
+ if report.Spec.Process.Capabilities.Permitted[0] != "CAP_IPC_LOCK" {
+ t.Fatalf("expected one capability CAP_IPC_LOCK, got %#v", report.Spec.Process.Capabilities.Permitted)
+ }
+ },
+ )
+}
+
+func TestProcessRlimits(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ for _, limit := range []uint64{100 * 1024 * 1024 * 1024, 200 * 1024 * 1024 * 1024, unix.RLIM_INFINITY} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearProcessRlimits()
+ if limit != unix.RLIM_INFINITY {
+ g.AddProcessRlimits("rlimit_as", limit, limit)
+ }
+ },
+ func(t *testing.T, report *types.TestReport) {
+ var rlim *specs.POSIXRlimit
+ for i := range report.Spec.Process.Rlimits {
+ if strings.ToUpper(report.Spec.Process.Rlimits[i].Type) == "RLIMIT_AS" {
+ rlim = &report.Spec.Process.Rlimits[i]
+ }
+ }
+ if limit == unix.RLIM_INFINITY && !(rlim == nil || (rlim.Soft == unix.RLIM_INFINITY && rlim.Hard == unix.RLIM_INFINITY)) {
+ t.Fatalf("wasn't supposed to set limit on number of open files: %#v", rlim)
+ }
+ if limit != unix.RLIM_INFINITY && rlim == nil {
+ t.Fatalf("was supposed to set limit on number of open files")
+ }
+ if rlim != nil {
+ if rlim.Soft != limit {
+ t.Fatalf("soft limit was set to %d, not %d", rlim.Soft, limit)
+ }
+ if rlim.Hard != limit {
+ t.Fatalf("hard limit was set to %d, not %d", rlim.Hard, limit)
+ }
+ }
+ },
+ )
+ }
+}
+
+func TestProcessNoNewPrivileges(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ if !seccompAvailable {
+ t.Skip("not built with seccomp support")
+ }
+ for _, nope := range []bool{false, true} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetProcessNoNewPrivileges(nope)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Process.NoNewPrivileges != nope {
+ t.Fatalf("expected no-new-privs to be %v, got %v", nope, report.Spec.Process.NoNewPrivileges)
+ }
+ },
+ )
+ }
+}
+
+func TestProcessOOMScoreAdj(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ for _, adj := range []int{0, 1, 2, 3} {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetProcessOOMScoreAdj(adj)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ adjusted := 0
+ if report.Spec.Process.OOMScoreAdj != nil {
+ adjusted = *report.Spec.Process.OOMScoreAdj
+ }
+ if adjusted != adj {
+ t.Fatalf("expected oom-score-adj to be %v, got %v", adj, adjusted)
+ }
+ },
+ )
+ }
+}
+
+func TestHostname(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ hostname := fmt.Sprintf("host%d", unix.Getpid())
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.SetHostname(hostname)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if report.Spec.Hostname != hostname {
+ t.Fatalf("expected %q, got %q", hostname, report.Spec.Hostname)
+ }
+ },
+ )
+}
+
+func TestMounts(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ t.Run("tmpfs", func(t *testing.T) {
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.AddMount(specs.Mount{
+ Source: "tmpfs",
+ Destination: "/was-not-there-before",
+ Type: "tmpfs",
+ Options: []string{"ro", "size=0"},
+ })
+ },
+ func(t *testing.T, report *types.TestReport) {
+ found := false
+ for _, mount := range report.Spec.Mounts {
+ if mount.Destination == "/was-not-there-before" && mount.Type == "tmpfs" {
+ found = true
+ }
+ }
+ if !found {
+ t.Errorf("added mount for /was-not-there-before not found in %#v", report.Spec.Mounts)
+ }
+ },
+ )
+ })
+ // apparently we can do anything except turn read-only into read-write
+ binds := []struct {
+ name string
+ tmpfsOptions string
+ destination string
+ fsType string
+ options []string
+ require []string
+ reject []string
+ }{
+ {
+ name: "nodev",
+ destination: "/nodev",
+ options: []string{"nodev"},
+ reject: []string{"dev"},
+ },
+ {
+ name: "noexec",
+ destination: "/noexec",
+ options: []string{"noexec"},
+ reject: []string{"exec"},
+ },
+ {
+ name: "nosuid",
+ destination: "/nosuid",
+ options: []string{"nosuid"},
+ reject: []string{"suid"}},
+ {
+ name: "nodev,noexec",
+ destination: "/nodev,noexec",
+ options: []string{"nodev", "noexec"},
+ reject: []string{"dev", "exec"},
+ },
+ {
+ name: "nodev,noexec,nosuid",
+ destination: "/nodev,noexec,nosuid",
+ options: []string{"nodev", "noexec", "nosuid"},
+ reject: []string{"dev", "exec", "suid"},
+ },
+ {
+ name: "nodev,noexec,nosuid,ro",
+ destination: "/nodev,noexec,nosuid,ro",
+ options: []string{"nodev", "noexec", "nosuid", "ro"},
+ reject: []string{"dev", "exec", "suid", "rw"},
+ },
+ {
+ name: "nodev,noexec,nosuid,rw",
+ destination: "/nodev,noexec,nosuid,rw",
+ options: []string{"nodev", "noexec", "nosuid", "rw"},
+ reject: []string{"dev", "exec", "suid", "ro"},
+ },
+ {
+ name: "dev,exec,suid,rw",
+ tmpfsOptions: "nodev,noexec,nosuid",
+ destination: "/dev,exec,suid,rw",
+ options: []string{"dev", "exec", "suid", "rw"},
+ require: []string{"rw"},
+ reject: []string{"nodev", "noexec", "nosuid", "ro"},
+ },
+ {
+ name: "nodev,noexec,nosuid,ro,flip",
+ tmpfsOptions: "dev,exec,suid,rw",
+ destination: "/nodev,noexec,nosuid,ro",
+ options: []string{"nodev", "noexec", "nosuid", "ro"},
+ reject: []string{"dev", "exec", "suid", "rw"},
+ },
+ }
+ for _, bind := range binds {
+ t.Run(bind.name, func(t *testing.T) {
+ // mount a tmpfs over the temp dir, which may be on a nodev/noexec/nosuid filesystem
+ tmpfsMount := t.TempDir()
+ t.Cleanup(func() { _ = unix.Unmount(tmpfsMount, unix.MNT_FORCE|unix.MNT_DETACH) })
+ tmpfsOptions := "rw,size=1m"
+ if bind.tmpfsOptions != "" {
+ tmpfsOptions += ("," + bind.tmpfsOptions)
+ }
+ tmpfsFlags, tmpfsOptions := mount.ParseOptions(tmpfsOptions)
+ require.NoErrorf(t, unix.Mount("none", tmpfsMount, "tmpfs", uintptr(tmpfsFlags), tmpfsOptions), "error mounting a tmpfs with flags=%#x,options=%q at %s", tmpfsFlags, tmpfsOptions, tmpfsMount)
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ fsType := bind.fsType
+ if fsType == "" {
+ fsType = "bind"
+ }
+ g.AddMount(specs.Mount{
+ Source: tmpfsMount,
+ Destination: bind.destination,
+ Type: fsType,
+ Options: bind.options,
+ })
+ },
+ func(t *testing.T, report *types.TestReport) {
+ foundMounts := make(map[string]bool)
+ for _, mount := range report.Spec.Mounts {
+ if mount.Destination == bind.destination {
+ allRequired := true
+ requiredFlags := bind.require
+ if len(requiredFlags) == 0 {
+ requiredFlags = bind.options
+ }
+ for _, required := range requiredFlags {
+ if !util.StringInSlice(required, mount.Options) {
+ allRequired = false
+ }
+ }
+ anyRejected := false
+ for _, rejected := range bind.reject {
+ if util.StringInSlice(rejected, mount.Options) {
+ anyRejected = true
+ }
+ }
+ foundMounts[mount.Destination] = allRequired && !anyRejected
+ }
+ }
+ if !foundMounts[bind.destination] {
+ t.Errorf("added mount for %s not found with the right flags (%v) in %+v", bind.destination, bind.options, report.Spec.Mounts)
+ }
+ },
+ )
+ // okay, just make sure we didn't change anything about the tmpfs mount point outside of the chroot
+ var fs unix.Statfs_t
+ require.NoErrorf(t, unix.Statfs(tmpfsMount, &fs), "fstat")
+ assert.Equalf(t, tmpfsFlags&unix.MS_NODEV == unix.MS_NODEV, fs.Flags&unix.ST_NODEV == unix.ST_NODEV, "nodev flag")
+ assert.Equalf(t, tmpfsFlags&unix.MS_NOEXEC == unix.MS_NOEXEC, fs.Flags&unix.ST_NOEXEC == unix.ST_NOEXEC, "noexec flag")
+ assert.Equalf(t, tmpfsFlags&unix.MS_NOSUID == unix.MS_NOSUID, fs.Flags&unix.ST_NOSUID == unix.ST_NOSUID, "nosuid flag")
+ assert.Equalf(t, tmpfsFlags&unix.MS_RDONLY == unix.MS_RDONLY, fs.Flags&unix.ST_RDONLY == unix.ST_RDONLY, "readonly flag")
+ })
+ }
+}
+
+func TestLinuxIDMapping(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearLinuxUIDMappings()
+ g.ClearLinuxGIDMappings()
+ g.AddLinuxUIDMapping(uint32(unix.Getuid()), 0, 1)
+ g.AddLinuxGIDMapping(uint32(unix.Getgid()), 0, 1)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if len(report.Spec.Linux.UIDMappings) != 1 {
+ t.Fatalf("expected 1 uid mapping, got %q", len(report.Spec.Linux.UIDMappings))
+ }
+ if report.Spec.Linux.UIDMappings[0].HostID != uint32(unix.Getuid()) {
+ t.Fatalf("expected host uid mapping to be %d, got %d", unix.Getuid(), report.Spec.Linux.UIDMappings[0].HostID)
+ }
+ if report.Spec.Linux.UIDMappings[0].ContainerID != 0 {
+ t.Fatalf("expected container uid mapping to be 0, got %d", report.Spec.Linux.UIDMappings[0].ContainerID)
+ }
+ if report.Spec.Linux.UIDMappings[0].Size != 1 {
+ t.Fatalf("expected container uid map size to be 1, got %d", report.Spec.Linux.UIDMappings[0].Size)
+ }
+ if report.Spec.Linux.GIDMappings[0].HostID != uint32(unix.Getgid()) {
+ t.Fatalf("expected host uid mapping to be %d, got %d", unix.Getgid(), report.Spec.Linux.GIDMappings[0].HostID)
+ }
+ if report.Spec.Linux.GIDMappings[0].ContainerID != 0 {
+ t.Fatalf("expected container gid mapping to be 0, got %d", report.Spec.Linux.GIDMappings[0].ContainerID)
+ }
+ if report.Spec.Linux.GIDMappings[0].Size != 1 {
+ t.Fatalf("expected container gid map size to be 1, got %d", report.Spec.Linux.GIDMappings[0].Size)
+ }
+ },
+ )
+}
+
+func TestLinuxIDMappingShift(t *testing.T) {
+ if unix.Getuid() != 0 {
+ t.Skip("tests need to be run as root")
+ }
+ testMinimal(t,
+ func(g *generate.Generator, rootDir, bundleDir string) {
+ g.ClearLinuxUIDMappings()
+ g.ClearLinuxGIDMappings()
+ g.AddLinuxUIDMapping(uint32(unix.Getuid())+1, 0, 1)
+ g.AddLinuxGIDMapping(uint32(unix.Getgid())+1, 0, 1)
+ },
+ func(t *testing.T, report *types.TestReport) {
+ if len(report.Spec.Linux.UIDMappings) != 1 {
+ t.Fatalf("expected 1 uid mapping, got %q", len(report.Spec.Linux.UIDMappings))
+ }
+ if report.Spec.Linux.UIDMappings[0].HostID != uint32(unix.Getuid()+1) {
+ t.Fatalf("expected host uid mapping to be %d, got %d", unix.Getuid()+1, report.Spec.Linux.UIDMappings[0].HostID)
+ }
+ if report.Spec.Linux.UIDMappings[0].ContainerID != 0 {
+ t.Fatalf("expected container uid mapping to be 0, got %d", report.Spec.Linux.UIDMappings[0].ContainerID)
+ }
+ if report.Spec.Linux.UIDMappings[0].Size != 1 {
+ t.Fatalf("expected container uid map size to be 1, got %d", report.Spec.Linux.UIDMappings[0].Size)
+ }
+ if report.Spec.Linux.GIDMappings[0].HostID != uint32(unix.Getgid()+1) {
+ t.Fatalf("expected host uid mapping to be %d, got %d", unix.Getgid()+1, report.Spec.Linux.GIDMappings[0].HostID)
+ }
+ if report.Spec.Linux.GIDMappings[0].ContainerID != 0 {
+ t.Fatalf("expected container gid mapping to be 0, got %d", report.Spec.Linux.GIDMappings[0].ContainerID)
+ }
+ if report.Spec.Linux.GIDMappings[0].Size != 1 {
+ t.Fatalf("expected container gid map size to be 1, got %d", report.Spec.Linux.GIDMappings[0].Size)
+ }
+ },
+ )
+}
diff --git a/chroot/seccomp.go b/chroot/seccomp.go
new file mode 100644
index 0000000..e875f33
--- /dev/null
+++ b/chroot/seccomp.go
@@ -0,0 +1,205 @@
+//go:build linux && seccomp
+// +build linux,seccomp
+
+package chroot
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/containers/common/pkg/seccomp"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+ "github.com/sirupsen/logrus"
+)
+
+const seccompAvailable = true
+
+// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start.
+func setSeccomp(spec *specs.Spec) error {
+ logrus.Debugf("setting seccomp configuration")
+ if spec.Linux.Seccomp == nil {
+ return nil
+ }
+ mapAction := func(specAction specs.LinuxSeccompAction, errnoRet *uint) libseccomp.ScmpAction {
+ switch specAction {
+ case specs.ActKill:
+ return libseccomp.ActKillThread
+ case specs.ActTrap:
+ return libseccomp.ActTrap
+ case specs.ActErrno:
+ action := libseccomp.ActErrno
+ if errnoRet != nil {
+ action = action.SetReturnCode(int16(*errnoRet))
+ }
+ return action
+ case specs.ActTrace:
+ return libseccomp.ActTrace
+ case specs.ActAllow:
+ return libseccomp.ActAllow
+ case specs.ActLog:
+ return libseccomp.ActLog
+ case specs.ActKillProcess:
+ return libseccomp.ActKillProcess
+ default:
+ logrus.Errorf("unmappable action %v", specAction)
+ }
+ return libseccomp.ActInvalid
+ }
+ mapArch := func(specArch specs.Arch) libseccomp.ScmpArch {
+ switch specArch {
+ case specs.ArchX86:
+ return libseccomp.ArchX86
+ case specs.ArchX86_64:
+ return libseccomp.ArchAMD64
+ case specs.ArchX32:
+ return libseccomp.ArchX32
+ case specs.ArchARM:
+ return libseccomp.ArchARM
+ case specs.ArchAARCH64:
+ return libseccomp.ArchARM64
+ case specs.ArchMIPS:
+ return libseccomp.ArchMIPS
+ case specs.ArchMIPS64:
+ return libseccomp.ArchMIPS64
+ case specs.ArchMIPS64N32:
+ return libseccomp.ArchMIPS64N32
+ case specs.ArchMIPSEL:
+ return libseccomp.ArchMIPSEL
+ case specs.ArchMIPSEL64:
+ return libseccomp.ArchMIPSEL64
+ case specs.ArchMIPSEL64N32:
+ return libseccomp.ArchMIPSEL64N32
+ case specs.ArchPPC:
+ return libseccomp.ArchPPC
+ case specs.ArchPPC64:
+ return libseccomp.ArchPPC64
+ case specs.ArchPPC64LE:
+ return libseccomp.ArchPPC64LE
+ case specs.ArchS390:
+ return libseccomp.ArchS390
+ case specs.ArchS390X:
+ return libseccomp.ArchS390X
+ case specs.ArchPARISC:
+ return libseccomp.ArchPARISC
+ case specs.ArchPARISC64:
+ return libseccomp.ArchPARISC64
+ case specs.ArchRISCV64:
+ return libseccomp.ArchRISCV64
+ default:
+ logrus.Errorf("unmappable arch %v", specArch)
+ }
+ return libseccomp.ArchInvalid
+ }
+ mapOp := func(op specs.LinuxSeccompOperator) libseccomp.ScmpCompareOp {
+ switch op {
+ case specs.OpNotEqual:
+ return libseccomp.CompareNotEqual
+ case specs.OpLessThan:
+ return libseccomp.CompareLess
+ case specs.OpLessEqual:
+ return libseccomp.CompareLessOrEqual
+ case specs.OpEqualTo:
+ return libseccomp.CompareEqual
+ case specs.OpGreaterEqual:
+ return libseccomp.CompareGreaterEqual
+ case specs.OpGreaterThan:
+ return libseccomp.CompareGreater
+ case specs.OpMaskedEqual:
+ return libseccomp.CompareMaskedEqual
+ default:
+ logrus.Errorf("unmappable op %v", op)
+ }
+ return libseccomp.CompareInvalid
+ }
+
+ filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, spec.Linux.Seccomp.DefaultErrnoRet))
+ if err != nil {
+ return fmt.Errorf("creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err)
+ }
+ for _, arch := range spec.Linux.Seccomp.Architectures {
+ if err = filter.AddArch(mapArch(arch)); err != nil {
+ return fmt.Errorf("adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err)
+ }
+ }
+ for _, rule := range spec.Linux.Seccomp.Syscalls {
+ scnames := make(map[libseccomp.ScmpSyscall]string)
+ for _, name := range rule.Names {
+ scnum, err := libseccomp.GetSyscallFromName(name)
+ if err != nil {
+ logrus.Debugf("error mapping syscall %q to a syscall, ignoring %q rule for %q", name, rule.Action, name)
+ continue
+ }
+ scnames[scnum] = name
+ }
+ for scnum := range scnames {
+ if len(rule.Args) == 0 {
+ if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil {
+ return fmt.Errorf("adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
+ }
+ continue
+ }
+ var conditions []libseccomp.ScmpCondition
+ opsAreAllEquality := true
+ for _, arg := range rule.Args {
+ condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo)
+ if err != nil {
+ return fmt.Errorf("building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err)
+ }
+ if arg.Op != specs.OpEqualTo {
+ opsAreAllEquality = false
+ }
+ conditions = append(conditions, condition)
+ }
+ if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions); err != nil {
+ // Okay, if the rules specify multiple equality
+ // checks, assume someone thought that they
+ // were OR'd, when in fact they're ordinarily
+ // supposed to be AND'd. Break them up into
+ // different rules to get that OR effect.
+ if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" {
+ for i := range conditions {
+ if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil {
+ return fmt.Errorf("adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err)
+ }
+ }
+ } else {
+ return fmt.Errorf("adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
+ }
+ }
+ }
+ }
+ if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil {
+ return fmt.Errorf("setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err)
+ }
+ err = filter.Load()
+ filter.Release()
+ if err != nil {
+ return fmt.Errorf("activating seccomp filter: %w", err)
+ }
+ return nil
+}
+
+func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
+ switch seccompProfilePath {
+ case "unconfined":
+ spec.Linux.Seccomp = nil
+ case "":
+ seccompConfig, err := seccomp.GetDefaultProfile(spec)
+ if err != nil {
+ return fmt.Errorf("loading default seccomp profile failed: %w", err)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ default:
+ seccompProfile, err := os.ReadFile(seccompProfilePath)
+ if err != nil {
+ return fmt.Errorf("opening seccomp profile failed: %w", err)
+ }
+ seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
+ if err != nil {
+ return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ }
+ return nil
+}
diff --git a/chroot/seccomp_freebsd.go b/chroot/seccomp_freebsd.go
new file mode 100644
index 0000000..90e9f14
--- /dev/null
+++ b/chroot/seccomp_freebsd.go
@@ -0,0 +1,15 @@
+//go:build freebsd && seccomp
+// +build freebsd,seccomp
+
+package chroot
+
+import (
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const seccompAvailable = false
+
+func setSeccomp(spec *specs.Spec) error {
+ // Ignore this on FreeBSD
+ return nil
+}
diff --git a/chroot/seccomp_unsupported.go b/chroot/seccomp_unsupported.go
new file mode 100644
index 0000000..dc80dcd
--- /dev/null
+++ b/chroot/seccomp_unsupported.go
@@ -0,0 +1,27 @@
+//go:build (!linux && !freebsd) || !seccomp
+// +build !linux,!freebsd !seccomp
+
+package chroot
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const seccompAvailable = false
+
+func setSeccomp(spec *specs.Spec) error {
+ if spec.Linux.Seccomp != nil {
+ return errors.New("configured a seccomp filter without seccomp support?")
+ }
+ return nil
+}
+
+func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
+ if spec.Linux != nil {
+ // runtime-tools may have supplied us with a default filter
+ spec.Linux.Seccomp = nil
+ }
+ return nil
+}
diff --git a/chroot/selinux.go b/chroot/selinux.go
new file mode 100644
index 0000000..bba4b82
--- /dev/null
+++ b/chroot/selinux.go
@@ -0,0 +1,24 @@
+//go:build linux
+// +build linux
+
+package chroot
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/sirupsen/logrus"
+)
+
+// setSelinuxLabel sets the process label for child processes that we'll start.
+func setSelinuxLabel(spec *specs.Spec) error {
+ logrus.Debugf("setting selinux label")
+ if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
+ if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
+ return fmt.Errorf("setting process label to %q: %w", spec.Process.SelinuxLabel, err)
+ }
+ }
+ return nil
+}
diff --git a/chroot/selinux_unsupported.go b/chroot/selinux_unsupported.go
new file mode 100644
index 0000000..826b920
--- /dev/null
+++ b/chroot/selinux_unsupported.go
@@ -0,0 +1,20 @@
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
+
+package chroot
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func setSelinuxLabel(spec *specs.Spec) error {
+ if spec.Linux.MountLabel != "" {
+ return errors.New("configured an SELinux mount label without SELinux support?")
+ }
+ if spec.Process.SelinuxLabel != "" {
+ return errors.New("configured an SELinux process label without SELinux support?")
+ }
+ return nil
+}
diff --git a/chroot/unsupported.go b/chroot/unsupported.go
new file mode 100644
index 0000000..677a0a2
--- /dev/null
+++ b/chroot/unsupported.go
@@ -0,0 +1,16 @@
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
+
+package chroot
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// RunUsingChroot is not supported.
+func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
+ return fmt.Errorf("--isolation chroot is not supported on this platform")
+}
diff --git a/cmd/buildah/addcopy.go b/cmd/buildah/addcopy.go
new file mode 100644
index 0000000..cf6e238
--- /dev/null
+++ b/cmd/buildah/addcopy.go
@@ -0,0 +1,284 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/storage"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+type addCopyResults struct {
+ addHistory bool
+ chmod string
+ chown string
+ checksum string
+ quiet bool
+ ignoreFile string
+ contextdir string
+ from string
+ blobCache string
+ decryptionKeys []string
+ removeSignatures bool
+ signaturePolicy string
+ authfile string
+ creds string
+ tlsVerify bool
+ certDir string
+ retry int
+ retryDelay string
+}
+
+func createCommand(addCopy string, desc string, short string, opts *addCopyResults) *cobra.Command {
+ return &cobra.Command{
+ Use: addCopy,
+ Short: short,
+ Long: desc,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return addAndCopyCmd(cmd, args, strings.ToUpper(addCopy), *opts)
+ },
+ Example: `buildah ` + addCopy + ` containerID '/myapp/app.conf'
+ buildah ` + addCopy + ` containerID '/myapp/app.conf' '/myapp/app.conf'`,
+ Args: cobra.MinimumNArgs(1),
+ }
+}
+
+func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
+ flags.SetInterspersed(false)
+ flags.BoolVar(&opts.addHistory, "add-history", false, "add an entry for this operation to the image's history. Use BUILDAH_HISTORY environment variable to override. (default false)")
+ flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ if err := flags.MarkHidden("authfile"); err != nil {
+ panic(fmt.Sprintf("error marking authfile as hidden: %v", err))
+ }
+ flags.StringVar(&opts.blobCache, "blob-cache", "", "store copies of pulled image blobs in the specified directory")
+ if err := flags.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
+ }
+ flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access registries")
+ if err := flags.MarkHidden("cert-dir"); err != nil {
+ panic(fmt.Sprintf("error marking cert-dir as hidden: %v", err))
+ }
+ flags.StringVar(&opts.checksum, "checksum", "", "checksum the HTTP source content")
+ flags.StringVar(&opts.chown, "chown", "", "set the user and group ownership of the destination content")
+ flags.StringVar(&opts.chmod, "chmod", "", "set the access permissions of the destination content")
+ flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing registries when pulling images")
+ if err := flags.MarkHidden("creds"); err != nil {
+ panic(fmt.Sprintf("error marking creds as hidden: %v", err))
+ }
+ flags.StringVar(&opts.from, "from", "", "use the specified container's or image's root directory as the source root directory")
+ flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", nil, "key needed to decrypt a pulled image")
+ if err := flags.MarkHidden("decryption-key"); err != nil {
+ panic(fmt.Sprintf("error marking decryption-key as hidden: %v", err))
+ }
+ flags.StringVar(&opts.ignoreFile, "ignorefile", "", "path to .containerignore file")
+ flags.StringVar(&opts.contextdir, "contextdir", "", "context directory path")
+ flags.IntVar(&opts.retry, "retry", cli.MaxPullPushRetries, "number of times to retry in case of failure when performing pull")
+ flags.StringVar(&opts.retryDelay, "retry-delay", cli.PullPushRetryDelay.String(), "delay between retries in case of pull failures")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output a digest of the newly-added/copied content")
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing registries when pulling images. TLS verification cannot be used when talking to an insecure registry.")
+ if err := flags.MarkHidden("tls-verify"); err != nil {
+ panic(fmt.Sprintf("error marking tls-verify as hidden: %v", err))
+ }
+ flags.BoolVarP(&opts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pulling image")
+ if err := flags.MarkHidden("remove-signatures"); err != nil {
+ panic(fmt.Sprintf("error marking remove-signatures as hidden: %v", err))
+ }
+ flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+}
+
+func init() {
+ var (
+ addDescription = "\n Adds the contents of a file, URL, or directory to a container's working\n directory. If a local file appears to be an archive, its contents are\n extracted and added instead of the archive file itself."
+ copyDescription = "\n Copies the contents of a file, URL, or directory into a container's working\n directory."
+ shortAdd = "Add content to the container"
+ shortCopy = "Copy content into the container"
+ addOpts addCopyResults
+ copyOpts addCopyResults
+ )
+ addCommand := createCommand("add", addDescription, shortAdd, &addOpts)
+ addCommand.SetUsageTemplate(UsageTemplate())
+
+ copyCommand := createCommand("copy", copyDescription, shortCopy, &copyOpts)
+ copyCommand.SetUsageTemplate(UsageTemplate())
+
+ addFlags := addCommand.Flags()
+ applyFlagVars(addFlags, &addOpts)
+
+ copyFlags := copyCommand.Flags()
+ applyFlagVars(copyFlags, &copyOpts)
+
+ rootCmd.AddCommand(addCommand)
+ rootCmd.AddCommand(copyCommand)
+}
+
+func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyResults) error {
+ if len(args) == 0 {
+ return errors.New("container ID must be specified")
+ }
+ name := args[0]
+ args = Tail(args)
+ if len(args) == 0 {
+ return errors.New("src must be specified")
+ }
+
+ if err := cli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ // If list is greater than one, the last item is the destination
+ dest := ""
+ size := len(args)
+ if size > 1 {
+ dest = args[size-1]
+ args = args[:size-1]
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ var from *buildah.Builder
+ unmountFrom := false
+ removeFrom := false
+ var idMappingOptions *buildah.IDMappingOptions
+ contextdir := iopts.contextdir
+ if iopts.ignoreFile != "" && contextdir == "" {
+ return errors.New("--ignorefile option requires that you specify a context dir using --contextdir")
+ }
+
+ if iopts.from != "" {
+ if from, err = openBuilder(getContext(), store, iopts.from); err != nil && errors.Is(err, storage.ErrContainerUnknown) {
+ systemContext, err2 := parse.SystemContextFromOptions(c)
+ if err2 != nil {
+ return fmt.Errorf("building system context: %w", err2)
+ }
+
+ decryptConfig, err2 := cli.DecryptConfig(iopts.decryptionKeys)
+ if err2 != nil {
+ return fmt.Errorf("unable to obtain decrypt config: %w", err2)
+ }
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.retryDelay)
+ if err != nil {
+ return fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.retryDelay, err)
+ }
+ options := buildah.BuilderOptions{
+ FromImage: iopts.from,
+ BlobDirectory: iopts.blobCache,
+ SignaturePolicyPath: iopts.signaturePolicy,
+ SystemContext: systemContext,
+ MaxPullRetries: iopts.retry,
+ PullRetryDelay: pullPushRetryDelay,
+ OciDecryptConfig: decryptConfig,
+ }
+ if !iopts.quiet {
+ options.ReportWriter = os.Stderr
+ }
+ if from, err = buildah.NewBuilder(getContext(), store, options); err != nil {
+ return fmt.Errorf("no container named %q, error copying content from image %q: %w", iopts.from, iopts.from, err)
+ }
+ removeFrom = true
+ defer func() {
+ if !removeFrom {
+ return
+ }
+ if err := from.Delete(); err != nil {
+ logrus.Errorf("error deleting %q temporary working container %q", iopts.from, from.Container)
+ }
+ }()
+ }
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", iopts.from, err)
+ }
+ fromMountPoint, err := from.Mount(from.MountLabel)
+ if err != nil {
+ return fmt.Errorf("mounting %q container %q: %w", iopts.from, from.Container, err)
+ }
+ unmountFrom = true
+ defer func() {
+ if !unmountFrom {
+ return
+ }
+ if err := from.Unmount(); err != nil {
+ logrus.Errorf("error unmounting %q container %q", iopts.from, from.Container)
+ }
+ if err := from.Save(); err != nil {
+ logrus.Errorf("error saving information about %q container %q", iopts.from, from.Container)
+ }
+ }()
+ idMappingOptions = &from.IDMappingOptions
+ contextdir = filepath.Join(fromMountPoint, iopts.contextdir)
+ for i := range args {
+ args[i] = filepath.Join(fromMountPoint, args[i])
+ }
+ }
+
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", name, err)
+ }
+
+ builder.ContentDigester.Restart()
+
+ options := buildah.AddAndCopyOptions{
+ Chmod: iopts.chmod,
+ Chown: iopts.chown,
+ Checksum: iopts.checksum,
+ ContextDir: contextdir,
+ IDMappingOptions: idMappingOptions,
+ }
+ if iopts.contextdir != "" {
+ var excludes []string
+
+ excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDir, iopts.ignoreFile, []string{})
+ if err != nil {
+ return err
+ }
+ options.Excludes = excludes
+ }
+
+ extractLocalArchives := verb == "ADD"
+ err = builder.Add(dest, extractLocalArchives, options, args...)
+ if err != nil {
+ return fmt.Errorf("adding content to container %q: %w", builder.Container, err)
+ }
+ if unmountFrom {
+ if err := from.Unmount(); err != nil {
+ return fmt.Errorf("unmounting %q container %q: %w", iopts.from, from.Container, err)
+ }
+ if err := from.Save(); err != nil {
+ return fmt.Errorf("saving information about %q container %q: %w", iopts.from, from.Container, err)
+ }
+ unmountFrom = false
+ }
+ if removeFrom {
+ if err := from.Delete(); err != nil {
+ return fmt.Errorf("deleting %q temporary working container %q: %w", iopts.from, from.Container, err)
+ }
+ removeFrom = false
+ }
+
+ contentType, digest := builder.ContentDigester.Digest()
+ if !iopts.quiet {
+ fmt.Printf("%s\n", digest.Hex())
+ }
+ if contentType != "" {
+ contentType = contentType + ":"
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) %s %s%s", verb, contentType, digest.Hex())
+ return builder.Save()
+}
diff --git a/cmd/buildah/build.go b/cmd/buildah/build.go
new file mode 100644
index 0000000..fd06b98
--- /dev/null
+++ b/cmd/buildah/build.go
@@ -0,0 +1,104 @@
+package main
+
+import (
+ "os"
+
+ "github.com/containers/buildah/imagebuildah"
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/util"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ buildDescription := `
+ Builds an OCI image using instructions in one or more Containerfiles.
+
+ If no arguments are specified, Buildah will use the current working directory
+ as the build context and look for a Containerfile. The build fails if no
+ Containerfile nor Dockerfile is present.`
+
+ layerFlagsResults := buildahcli.LayerResults{}
+ buildFlagResults := buildahcli.BudResults{}
+ fromAndBudResults := buildahcli.FromAndBudResults{}
+ userNSResults := buildahcli.UserNSResults{}
+ namespaceResults := buildahcli.NameSpaceResults{}
+
+ buildCommand := &cobra.Command{
+ Use: "build [CONTEXT]",
+ Aliases: []string{"build-using-dockerfile", "bud"},
+ Short: "Build an image using instructions in a Containerfile",
+ Long: buildDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ br := buildahcli.BuildOptions{
+ LayerResults: &layerFlagsResults,
+ BudResults: &buildFlagResults,
+ UserNSResults: &userNSResults,
+ FromAndBudResults: &fromAndBudResults,
+ NameSpaceResults: &namespaceResults,
+ }
+ return buildCmd(cmd, args, br)
+ },
+ Args: cobra.MaximumNArgs(1),
+ Example: `buildah build
+ buildah bud -f Containerfile.simple .
+ buildah bud --volume /home/test:/myvol:ro,Z -t imageName .
+ buildah bud -f Containerfile.simple -f Containerfile.notsosimple .`,
+ }
+ buildCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := buildCommand.Flags()
+ flags.SetInterspersed(false)
+
+ // build is a all common flags
+ buildFlags := buildahcli.GetBudFlags(&buildFlagResults)
+ buildFlags.StringVar(&buildFlagResults.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.")
+
+ layerFlags := buildahcli.GetLayerFlags(&layerFlagsResults)
+ fromAndBudFlags, err := buildahcli.GetFromAndBudFlags(&fromAndBudResults, &userNSResults, &namespaceResults)
+ if err != nil {
+ logrus.Errorf("failed to setup From and Build flags: %v", err)
+ os.Exit(1)
+ }
+
+ flags.AddFlagSet(&buildFlags)
+ flags.AddFlagSet(&layerFlags)
+ flags.AddFlagSet(&fromAndBudFlags)
+ flags.SetNormalizeFunc(buildahcli.AliasFlags)
+
+ rootCmd.AddCommand(buildCommand)
+}
+
+func buildCmd(c *cobra.Command, inputArgs []string, iopts buildahcli.BuildOptions) error {
+ if c.Flag("logfile").Changed {
+ logfile, err := os.OpenFile(iopts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+ iopts.Logwriter = logfile
+ defer iopts.Logwriter.Close()
+ }
+
+ options, containerfiles, removeAll, err := buildahcli.GenBuildOptions(c, inputArgs, iopts)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ for _, f := range removeAll {
+ os.RemoveAll(f)
+ }
+ }()
+
+ options.DefaultMountsFilePath = globalFlagResults.DefaultMountsFile
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ id, ref, err := imagebuildah.BuildDockerfiles(getContext(), store, options, containerfiles...)
+ if err == nil && options.Manifest != "" {
+ logrus.Debugf("manifest list id = %q, ref = %q", id, ref.String())
+ }
+ return err
+}
diff --git a/cmd/buildah/commit.go b/cmd/buildah/commit.go
new file mode 100644
index 0000000..983c569
--- /dev/null
+++ b/cmd/buildah/commit.go
@@ -0,0 +1,301 @@
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/common/pkg/completion"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/shortnames"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type commitInputOptions struct {
+ authfile string
+ omitHistory bool
+ blobCache string
+ certDir string
+ changes []string
+ configFile string
+ creds string
+ cwOptions string
+ disableCompression bool
+ format string
+ iidfile string
+ manifest string
+ omitTimestamp bool
+ timestamp int64
+ quiet bool
+ referenceTime string
+ rm bool
+ signaturePolicy string
+ signBy string
+ squash bool
+ tlsVerify bool
+ identityLabel bool
+ encryptionKeys []string
+ encryptLayers []int
+ unsetenvs []string
+}
+
+func init() {
+ var (
+ opts commitInputOptions
+ commitDescription = "\n Writes a new image using the container's read-write layer and, if it is based\n on an image, the layers of that image."
+ )
+ commitCommand := &cobra.Command{
+ Use: "commit",
+ Short: "Create an image from a working container",
+ Long: commitDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return commitCmd(cmd, args, opts)
+ },
+ Example: `buildah commit containerID
+ buildah commit containerID newImageName
+ buildah commit containerID docker://localhost:5000/imageId`,
+ }
+ commitCommand.SetUsageTemplate(UsageTemplate())
+ commitListFlagSet(commitCommand, &opts)
+ rootCmd.AddCommand(commitCommand)
+
+}
+
+func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
+ flags := cmd.Flags()
+ flags.SetInterspersed(false)
+
+ flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ _ = cmd.RegisterFlagCompletionFunc("authfile", completion.AutocompleteDefault)
+ flags.StringVar(&opts.blobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ if err := flags.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
+ }
+ flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", nil, "key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
+ _ = cmd.RegisterFlagCompletionFunc("encryption-key", completion.AutocompleteDefault)
+ flags.IntSliceVar(&opts.encryptLayers, "encrypt-layer", nil, "layers to encrypt, 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified")
+ _ = cmd.RegisterFlagCompletionFunc("encryption-key", completion.AutocompleteNone)
+
+ flags.StringArrayVarP(&opts.changes, "change", "c", nil, "apply containerfile `instruction`s to the committed image")
+ flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ _ = cmd.RegisterFlagCompletionFunc("cert-dir", completion.AutocompleteDefault)
+ flags.StringVar(&opts.configFile, "config", "", "apply configuration JSON `file` to the committed image")
+ _ = cmd.RegisterFlagCompletionFunc("config", completion.AutocompleteDefault)
+ flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ _ = cmd.RegisterFlagCompletionFunc("creds", completion.AutocompleteNone)
+ flags.StringVar(&opts.cwOptions, "cw", "", "confidential workload `options`")
+ flags.BoolVarP(&opts.disableCompression, "disable-compression", "D", true, "don't compress layers")
+ flags.StringVarP(&opts.format, "format", "f", defaultFormat(), "`format` of the image manifest and metadata")
+ _ = cmd.RegisterFlagCompletionFunc("format", completion.AutocompleteNone)
+ flags.StringVar(&opts.manifest, "manifest", "", "adds created image to the specified manifest list. Creates manifest list if it does not exist")
+ _ = cmd.RegisterFlagCompletionFunc("manifest", completion.AutocompleteNone)
+ flags.StringVar(&opts.iidfile, "iidfile", "", "write the image ID to the file")
+ _ = cmd.RegisterFlagCompletionFunc("iidfile", completion.AutocompleteDefault)
+ flags.BoolVar(&opts.omitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
+ flags.Int64Var(&opts.timestamp, "timestamp", 0, "set created timestamp to epoch seconds to allow for deterministic builds, defaults to current time")
+ _ = cmd.RegisterFlagCompletionFunc("timestamp", completion.AutocompleteNone)
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when writing images")
+ flags.StringVar(&opts.referenceTime, "reference-time", "", "set the timestamp on the image to match the named `file`")
+ _ = cmd.RegisterFlagCompletionFunc("reference-time", completion.AutocompleteNone)
+ flags.StringVar(&opts.signBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
+ _ = cmd.RegisterFlagCompletionFunc("sign-by", completion.AutocompleteNone)
+ if err := flags.MarkHidden("omit-timestamp"); err != nil {
+ panic(fmt.Sprintf("error marking omit-timestamp as hidden: %v", err))
+ }
+ if err := flags.MarkHidden("reference-time"); err != nil {
+ panic(fmt.Sprintf("error marking reference-time as hidden: %v", err))
+ }
+
+ flags.BoolVar(&opts.omitHistory, "omit-history", false, "omit build history information from the built image (default false)")
+ flags.BoolVar(&opts.identityLabel, "identity-label", true, "add default builder label (default true)")
+ flags.BoolVar(&opts.rm, "rm", false, "remove the container and its content after committing it to an image. Default leaves the container and its content in place.")
+ flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ _ = cmd.RegisterFlagCompletionFunc("signature-policy", completion.AutocompleteDefault)
+
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+
+ flags.BoolVar(&opts.squash, "squash", false, "produce an image with only one layer")
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+
+ flags.StringSliceVar(&opts.unsetenvs, "unsetenv", nil, "unset env from final image")
+ _ = cmd.RegisterFlagCompletionFunc("unsetenv", completion.AutocompleteNone)
+}
+
+func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error {
+ var dest types.ImageReference
+ if len(args) == 0 {
+ return errors.New("container ID must be specified")
+ }
+ if err := cli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if err := auth.CheckAuthFile(iopts.authfile); err != nil {
+ return err
+ }
+
+ name := args[0]
+ args = Tail(args)
+ if len(args) > 1 {
+ return errors.New("too many arguments specified")
+ }
+ image := ""
+ if len(args) > 0 {
+ image = args[0]
+ }
+ compress := define.Gzip
+ if iopts.disableCompression {
+ compress = define.Uncompressed
+ }
+
+ format, err := cli.GetFormat(iopts.format)
+ if err != nil {
+ return err
+ }
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ ctx := getContext()
+
+ builder, err := openBuilder(ctx, store, name)
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", name, err)
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ // If the user specified an image, we may need to massage it a bit if
+ // no transport is specified.
+ if image != "" {
+ if dest, err = alltransports.ParseImageName(image); err != nil {
+ candidates, err2 := shortnames.ResolveLocally(systemContext, image)
+ if err2 != nil {
+ return err2
+ }
+ if len(candidates) == 0 {
+ return fmt.Errorf("parsing target image name %q", image)
+ }
+ dest2, err2 := storageTransport.Transport.ParseStoreReference(store, candidates[0].String())
+ if err2 != nil {
+ return fmt.Errorf("parsing target image name %q: %w", image, err)
+ }
+ dest = dest2
+ }
+ }
+
+ // Add builder identity information.
+ if iopts.identityLabel {
+ builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
+ }
+
+ encConfig, encLayers, err := cli.EncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
+ if err != nil {
+ return fmt.Errorf("unable to obtain encryption config: %w", err)
+ }
+
+ var overrideConfig *manifest.Schema2Config
+ if c.Flag("config").Changed {
+ configBytes, err := os.ReadFile(iopts.configFile)
+ if err != nil {
+ return fmt.Errorf("reading configuration blob from file: %w", err)
+ }
+ overrideConfig = &manifest.Schema2Config{}
+ if err := json.Unmarshal(configBytes, &overrideConfig); err != nil {
+ return fmt.Errorf("parsing configuration blob from %q: %w", iopts.configFile, err)
+ }
+ }
+
+ options := buildah.CommitOptions{
+ PreferredManifestType: format,
+ Manifest: iopts.manifest,
+ Compression: compress,
+ SignaturePolicyPath: iopts.signaturePolicy,
+ SystemContext: systemContext,
+ IIDFile: iopts.iidfile,
+ Squash: iopts.squash,
+ BlobDirectory: iopts.blobCache,
+ OmitHistory: iopts.omitHistory,
+ SignBy: iopts.signBy,
+ OciEncryptConfig: encConfig,
+ OciEncryptLayers: encLayers,
+ UnsetEnvs: iopts.unsetenvs,
+ OverrideChanges: iopts.changes,
+ OverrideConfig: overrideConfig,
+ }
+ exclusiveFlags := 0
+ if c.Flag("reference-time").Changed {
+ exclusiveFlags++
+ referenceFile := iopts.referenceTime
+ finfo, err := os.Stat(referenceFile)
+ if err != nil {
+ return fmt.Errorf("reading timestamp of file %q: %w", referenceFile, err)
+ }
+ timestamp := finfo.ModTime().UTC()
+ options.HistoryTimestamp = &timestamp
+ }
+ if c.Flag("timestamp").Changed {
+ exclusiveFlags++
+ timestamp := time.Unix(iopts.timestamp, 0).UTC()
+ options.HistoryTimestamp = &timestamp
+ }
+ if iopts.omitTimestamp {
+ exclusiveFlags++
+ timestamp := time.Unix(0, 0).UTC()
+ options.HistoryTimestamp = &timestamp
+ }
+
+ if iopts.cwOptions != "" {
+ confidentialWorkloadOptions, err := parse.GetConfidentialWorkloadOptions(iopts.cwOptions)
+ if err != nil {
+ return fmt.Errorf("parsing --cw arguments: %w", err)
+ }
+ options.ConfidentialWorkloadOptions = confidentialWorkloadOptions
+ }
+
+ if exclusiveFlags > 1 {
+ return errors.New("can not use more then one timestamp option at at time")
+ }
+
+ if !iopts.quiet {
+ options.ReportWriter = os.Stderr
+ }
+ id, ref, _, err := builder.Commit(ctx, dest, options)
+ if err != nil {
+ return util.GetFailureCause(err, fmt.Errorf("committing container %q to %q: %w", builder.Container, image, err))
+ }
+ if ref != nil && id != "" {
+ logrus.Debugf("wrote image %s with ID %s", ref, id)
+ } else if ref != nil {
+ logrus.Debugf("wrote image %s", ref)
+ } else if id != "" {
+ logrus.Debugf("wrote image with ID %s", id)
+ } else {
+ logrus.Debugf("wrote image")
+ }
+ if options.IIDFile == "" && id != "" {
+ fmt.Printf("%s\n", id)
+ }
+
+ if iopts.rm {
+ return builder.Delete()
+ }
+ return nil
+}
diff --git a/cmd/buildah/common.go b/cmd/buildah/common.go
new file mode 100644
index 0000000..c684ca3
--- /dev/null
+++ b/cmd/buildah/common.go
@@ -0,0 +1,248 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/common/pkg/umask"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+var (
+ // configuration, including customizations made in containers.conf
+ needToShutdownStore = false
+)
+
+func getStore(c *cobra.Command) (storage.Store, error) {
+ if err := setXDGRuntimeDir(); err != nil {
+ return nil, err
+ }
+ options, err := storage.DefaultStoreOptions(unshare.GetRootlessUID() > 0, unshare.GetRootlessUID())
+ if err != nil {
+ return nil, err
+ }
+ if c.Flag("root").Changed || c.Flag("runroot").Changed {
+ options.GraphRoot = globalFlagResults.Root
+ options.RunRoot = globalFlagResults.RunRoot
+ }
+ if c.Flag("storage-driver").Changed {
+ options.GraphDriverName = globalFlagResults.StorageDriver
+ // If any options setup in config, these should be dropped if user overrode the driver
+ options.GraphDriverOptions = []string{}
+ }
+ if c.Flag("storage-opt").Changed {
+ if len(globalFlagResults.StorageOpts) > 0 {
+ options.GraphDriverOptions = globalFlagResults.StorageOpts
+ }
+ }
+
+ // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
+ // of the mount command.
+ // Differently, allow the mount if we are already in a userns, as the mount point will still
+ // be accessible once "buildah mount" exits.
+ if os.Geteuid() != 0 && options.GraphDriverName != "vfs" {
+ return nil, fmt.Errorf("cannot mount using driver %s in rootless mode. You need to run it in a `buildah unshare` session", options.GraphDriverName)
+ }
+
+ if len(globalFlagResults.UserNSUID) > 0 {
+ uopts := globalFlagResults.UserNSUID
+ gopts := globalFlagResults.UserNSGID
+
+ if len(gopts) == 0 {
+ gopts = uopts
+ }
+
+ uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts)
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = uidmap
+ options.GIDMap = gidmap
+ } else {
+ if len(globalFlagResults.UserNSGID) > 0 {
+ return nil, errors.New("option --userns-gid-map can not be used without --userns-uid-map")
+ }
+ }
+
+ // If a subcommand has the flags, check if they are set; if so, override the global values
+ if c.Flags().Lookup("userns-uid-map").Changed {
+ uopts, _ := c.Flags().GetStringSlice("userns-uid-map")
+ gopts, _ := c.Flags().GetStringSlice("userns-gid-map")
+ if len(gopts) == 0 {
+ gopts = uopts
+ }
+ uidmap, gidmap, err := unshare.ParseIDMappings(uopts, gopts)
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = uidmap
+ options.GIDMap = gidmap
+ } else {
+ if c.Flags().Lookup("userns-gid-map").Changed {
+ return nil, errors.New("option --userns-gid-map can not be used without --userns-uid-map")
+ }
+ }
+ umask.Check()
+
+ store, err := storage.GetStore(options)
+ if store != nil {
+ is.Transport.SetStore(store)
+ }
+ needToShutdownStore = true
+ return store, err
+}
+
+// setXDGRuntimeDir sets XDG_RUNTIME_DIR when if it is unset under rootless
+func setXDGRuntimeDir() error {
+ if unshare.IsRootless() && os.Getenv("XDG_RUNTIME_DIR") == "" {
+ runtimeDir, err := storage.GetRootlessRuntimeDir(unshare.GetRootlessUID())
+ if err != nil {
+ return err
+ }
+ if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
+ return errors.New("could not set XDG_RUNTIME_DIR")
+ }
+ }
+ return nil
+}
+
+func openBuilder(ctx context.Context, store storage.Store, name string) (builder *buildah.Builder, err error) {
+ if name != "" {
+ builder, err = buildah.OpenBuilder(store, name)
+ if errors.Is(err, os.ErrNotExist) {
+ options := buildah.ImportOptions{
+ Container: name,
+ }
+ builder, err = buildah.ImportBuilder(ctx, store, options)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ if builder == nil {
+ return nil, errors.New("finding build container")
+ }
+ return builder, nil
+}
+
+func openBuilders(store storage.Store) (builders []*buildah.Builder, err error) {
+ return buildah.OpenAllBuilders(store)
+}
+
+func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) {
+ options := buildah.ImportFromImageOptions{
+ Image: name,
+ SystemContext: sc,
+ }
+ builder, err = buildah.ImportBuilderFromImage(ctx, store, options)
+ if err != nil {
+ return nil, err
+ }
+ if builder == nil {
+ return nil, errors.New("mocking up build configuration")
+ }
+ return builder, nil
+}
+
+func getDateAndDigestAndSize(ctx context.Context, sys *types.SystemContext, store storage.Store, storeImage storage.Image) (time.Time, string, int64, error) {
+ created := time.Time{}
+ is.Transport.SetStore(store)
+ storeRef, err := is.Transport.ParseStoreReference(store, storeImage.ID)
+ if err != nil {
+ return created, "", -1, err
+ }
+ img, err := storeRef.NewImageSource(ctx, nil)
+ if err != nil {
+ return created, "", -1, err
+ }
+ defer img.Close()
+ imgSize, sizeErr := store.ImageSize(storeImage.ID)
+ if sizeErr != nil {
+ imgSize = -1
+ }
+ manifestBytes, _, manifestErr := img.GetManifest(ctx, nil)
+ manifestDigest := ""
+ if manifestErr == nil && len(manifestBytes) > 0 {
+ mDigest, err := manifest.Digest(manifestBytes)
+ manifestErr = err
+ if manifestErr == nil {
+ manifestDigest = mDigest.String()
+ }
+ }
+ inspectable, inspectableErr := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(img, nil))
+ if inspectableErr == nil {
+ inspectInfo, inspectErr := inspectable.Inspect(ctx)
+ if inspectErr == nil && inspectInfo != nil && inspectInfo.Created != nil {
+ created = *inspectInfo.Created
+ }
+ }
+ if sizeErr != nil {
+ err = sizeErr
+ } else if manifestErr != nil {
+ err = manifestErr
+ }
+ return created, manifestDigest, imgSize, err
+}
+
+// getContext returns a context.TODO
+func getContext() context.Context {
+ return context.TODO()
+}
+
+func getUserFlags() pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.String("user", "", "`user[:group]` to run the command as")
+ return fs
+}
+
+func defaultFormat() string {
+ format := os.Getenv("BUILDAH_FORMAT")
+ if format != "" {
+ return format
+ }
+ return buildah.OCI
+}
+
+// Tail returns a string slice after the first element unless there are
+// not enough elements, then it returns an empty slice. This is to replace
+// the urfavecli Tail method for args
+func Tail(a []string) []string {
+ if len(a) >= 2 {
+ return a[1:]
+ }
+ return []string{}
+}
+
+// UsageTemplate returns the usage template for buildah commands
+// This blocks the displaying of the global options. The main buildah
+// command should not use this.
+func UsageTemplate() string {
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+ {{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+{{end}}
+`
+}
diff --git a/cmd/buildah/common_test.go b/cmd/buildah/common_test.go
new file mode 100644
index 0000000..a390f40
--- /dev/null
+++ b/cmd/buildah/common_test.go
@@ -0,0 +1,136 @@
+package main
+
+import (
+ "flag"
+ "os"
+ "os/user"
+ "testing"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+var (
+ signaturePolicyPath = ""
+ storeOptions, _ = storage.DefaultStoreOptions(false, 0)
+ testSystemContext = types.SystemContext{}
+)
+
+func TestMain(m *testing.M) {
+ flag.StringVar(&signaturePolicyPath, "signature-policy", "", "pathname of signature policy file (not usually used)")
+ options := storage.StoreOptions{}
+ debug := false
+ flag.StringVar(&options.GraphRoot, "root", "", "storage root dir")
+ flag.StringVar(&options.RunRoot, "runroot", "", "storage state dir")
+ flag.StringVar(&options.GraphDriverName, "storage-driver", "", "storage driver")
+ flag.StringVar(&testSystemContext.SystemRegistriesConfPath, "registries-conf", "", "registries list")
+ flag.BoolVar(&debug, "debug", false, "turn on debug logging")
+ flag.Parse()
+ if options.GraphRoot != "" || options.RunRoot != "" || options.GraphDriverName != "" {
+ storeOptions = options
+ }
+ if buildah.InitReexec() {
+ return
+ }
+ logrus.SetLevel(logrus.ErrorLevel)
+ if debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+func TestGetStore(t *testing.T) {
+ // Make sure the tests are running as root
+ failTestIfNotRoot(t)
+ testCmd := &cobra.Command{
+ Use: "test",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ _, err := getStore(cmd)
+ return err
+ },
+ }
+ flags := testCmd.PersistentFlags()
+ flags.String("root", storeOptions.GraphRoot, "")
+ flags.String("runroot", storeOptions.RunRoot, "")
+ flags.String("storage-driver", storeOptions.GraphDriverName, "")
+ flags.String("signature-policy", "", "")
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ t.Error(err)
+ }
+ // The following flags had to be added or we get panics in common.go when
+ // the lookups occur
+ flags.StringSlice("storage-opt", []string{}, "")
+ flags.String("registries-conf", "", "")
+ flags.String("userns-uid-map", "", "")
+ flags.String("userns-gid-map", "", "")
+ err := testCmd.Execute()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestGetSize(t *testing.T) {
+ // Make sure the tests are running as root
+ failTestIfNotRoot(t)
+
+ store, err := storage.GetStore(storeOptions)
+ if err != nil {
+ t.Fatal(err)
+ } else if store != nil {
+ is.Transport.SetStore(store)
+ }
+
+ // Pull an image so that we know we have at least one
+ pullTestImage(t)
+
+ images, err := store.Images()
+ if err != nil {
+ t.Fatalf("Error reading images: %v", err)
+ }
+
+ _, _, _, err = getDateAndDigestAndSize(getContext(), &testSystemContext, store, images[0])
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func failTestIfNotRoot(t *testing.T) {
+ u, err := user.Current()
+ if err != nil {
+ t.Log("Could not determine user. Running without root may cause tests to fail")
+ } else if u.Uid != "0" {
+ t.Fatal("tests will fail unless run as root")
+ }
+}
+
+func pullTestImage(t *testing.T) string {
+ store, err := storage.GetStore(storeOptions)
+ if err != nil {
+ t.Fatal(err)
+ }
+ commonOpts := &define.CommonBuildOptions{
+ LabelOpts: nil,
+ }
+ options := buildah.BuilderOptions{
+ FromImage: "busybox:latest",
+ SignaturePolicyPath: signaturePolicyPath,
+ CommonBuildOpts: commonOpts,
+ SystemContext: &testSystemContext,
+ }
+
+ b, err := buildah.NewBuilder(getContext(), store, options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id := b.FromImageID
+ err = b.Delete()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return id
+}
diff --git a/cmd/buildah/config.go b/cmd/buildah/config.go
new file mode 100644
index 0000000..64d65ef
--- /dev/null
+++ b/cmd/buildah/config.go
@@ -0,0 +1,443 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/mattn/go-shellwords"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type configResults struct {
+ addHistory bool
+ annotation []string
+ arch string
+ author string
+ cmd string
+ comment string
+ createdBy string
+ domainName string
+ entrypoint string
+ env []string
+ healthcheck string
+ healthcheckInterval string
+ healthcheckRetries int
+ healthcheckStartPeriod string
+ healthcheckTimeout string
+ historyComment string
+ hostname string
+ label []string
+ onbuild []string
+ os string
+ osfeature []string
+ osversion string
+ ports []string
+ shell string
+ stopSignal string
+ user string
+ variant string
+ volume []string
+ workingDir string
+ unsetLabels []string
+}
+
+func init() {
+ var (
+ configDescription = "\n Modifies the configuration values which will be saved to the image."
+ opts configResults
+ )
+ configCommand := &cobra.Command{
+ Use: "config",
+ Short: "Update image configuration settings",
+ Long: configDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return configCmd(cmd, args, opts)
+ },
+ Example: `buildah config --author='Jane Austen' --workingdir='/etc/mycontainers' containerID
+ buildah config --entrypoint '[ "/entrypoint.sh", "dev" ]' containerID
+ buildah config --env foo=bar --env PATH=$PATH containerID`,
+ }
+ configCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := configCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVar(&opts.addHistory, "add-history", false, "add an entry for this operation to the image's history. Use BUILDAH_HISTORY environment variable to override. (default false)")
+ flags.StringArrayVarP(&opts.annotation, "annotation", "a", []string{}, "add `annotation` e.g. annotation=value, for the target image (default [])")
+ flags.StringVar(&opts.arch, "arch", "", "set `architecture` of the target image")
+ flags.StringVar(&opts.author, "author", "", "set image author contact `information`")
+ flags.StringVar(&opts.cmd, "cmd", "", "set the default `command` to run for containers based on the image")
+ flags.StringVar(&opts.comment, "comment", "", "set a `comment` in the target image")
+ flags.StringVar(&opts.createdBy, "created-by", "", "set `description` of how the image was created")
+ flags.StringVar(&opts.domainName, "domainname", "", "set a domain `name` for containers based on image")
+ flags.StringVar(&opts.entrypoint, "entrypoint", "", "set `entry point` for containers based on image")
+ flags.StringArrayVarP(&opts.env, "env", "e", []string{}, "add `environment variable` to be set when running containers based on image (default [])")
+ flags.StringVar(&opts.healthcheck, "healthcheck", "", "set a `healthcheck` command for the target image")
+ flags.StringVar(&opts.healthcheckInterval, "healthcheck-interval", "", "set the `interval` between runs of the `healthcheck` command for the target image")
+ flags.IntVar(&opts.healthcheckRetries, "healthcheck-retries", 0, "set the `number` of times the `healthcheck` command has to fail")
+ flags.StringVar(&opts.healthcheckStartPeriod, "healthcheck-start-period", "", "set the amount of `time` to wait after starting a container before a failed `healthcheck` command will count as a failure")
+ flags.StringVar(&opts.healthcheckTimeout, "healthcheck-timeout", "", "set the maximum amount of `time` to wait for a `healthcheck` command for the target image")
+ flags.StringVar(&opts.historyComment, "history-comment", "", "set a `comment` for the history of the target image")
+ flags.StringVar(&opts.hostname, "hostname", "", "set a host`name` for containers based on image")
+ flags.StringArrayVarP(&opts.label, "label", "l", []string{}, "add image configuration `label` e.g. label=value")
+ flags.StringSliceVar(&opts.onbuild, "onbuild", []string{}, "add onbuild command to be run on images based on this image. Only supported on 'docker' formatted images")
+ flags.StringVar(&opts.os, "os", "", "set `operating system` of the target image")
+ flags.StringArrayVar(&opts.osfeature, "os-feature", []string{}, "set required OS `feature` for the target image")
+ flags.StringVar(&opts.osversion, "os-version", "", "set required OS `version` for the target image")
+ flags.StringSliceVarP(&opts.ports, "port", "p", []string{}, "add `port` to expose when running containers based on image (default [])")
+ flags.StringVar(&opts.shell, "shell", "", "add `shell` to run in containers")
+ flags.StringVar(&opts.stopSignal, "stop-signal", "", "set `stop signal` for containers based on image")
+ flags.StringVarP(&opts.user, "user", "u", "", "set default `user` to run inside containers based on image")
+ flags.StringVar(&opts.variant, "variant", "", "set architecture `variant` of the target image")
+ flags.StringSliceVarP(&opts.volume, "volume", "v", []string{}, "add default `volume` path to be created for containers based on image (default [])")
+ flags.StringVar(&opts.workingDir, "workingdir", "", "set working `directory` for containers based on image")
+ flags.StringSliceVar(&opts.unsetLabels, "unsetlabel", nil, "remove image configuration label")
+
+ rootCmd.AddCommand(configCommand)
+
+}
+
+func updateCmd(builder *buildah.Builder, cmd string) error {
+ if len(strings.TrimSpace(cmd)) == 0 {
+ builder.SetCmd(nil)
+ return nil
+ }
+ var cmdJSON []string
+ err := json.Unmarshal([]byte(cmd), &cmdJSON)
+
+ if err == nil {
+ builder.SetCmd(cmdJSON)
+ return nil
+ }
+ cmdSpec, err := shellwords.Parse(cmd)
+ if err != nil {
+ return fmt.Errorf("parsing --cmd %q: %w", cmd, err)
+ }
+ builder.SetCmd(cmdSpec)
+ return nil
+}
+
+func updateEntrypoint(builder *buildah.Builder, entrypoint string) {
+ if len(strings.TrimSpace(entrypoint)) == 0 {
+ builder.SetEntrypoint(nil)
+ return
+ }
+ var entrypointJSON []string
+ err := json.Unmarshal([]byte(entrypoint), &entrypointJSON)
+
+ if err == nil {
+ builder.SetEntrypoint(entrypointJSON)
+ if len(builder.Cmd()) > 0 {
+ logrus.Warnf("cmd %q exists and will be passed to entrypoint as a parameter", strings.Join(builder.Cmd(), " "))
+ }
+ return
+ }
+
+ // it wasn't a valid json array, fall back to string
+ entrypointSpec := make([]string, 3)
+ entrypointSpec[0] = "/bin/sh"
+ entrypointSpec[1] = "-c"
+ entrypointSpec[2] = entrypoint
+ if len(builder.Cmd()) > 0 {
+ logrus.Warnf("cmd %q exists but will be ignored because of entrypoint settings", strings.Join(builder.Cmd(), " "))
+ }
+
+ builder.SetEntrypoint(entrypointSpec)
+}
+
+func conditionallyAddHistory(builder *buildah.Builder, c *cobra.Command, createdByFmt string, args ...interface{}) {
+ history := buildahcli.DefaultHistory()
+ if c.Flag("add-history").Changed {
+ history, _ = c.Flags().GetBool("add-history")
+ }
+ if history {
+ now := time.Now().UTC()
+ created := &now
+ createdBy := fmt.Sprintf(createdByFmt, args...)
+ builder.AddPrependedEmptyLayer(created, createdBy, "", "")
+ }
+}
+
+func updateConfig(builder *buildah.Builder, c *cobra.Command, iopts configResults) error {
+ if c.Flag("author").Changed {
+ builder.SetMaintainer(iopts.author)
+ }
+ if c.Flag("created-by").Changed {
+ builder.SetCreatedBy(iopts.createdBy)
+ }
+ if c.Flag("arch").Changed {
+ builder.SetArchitecture(iopts.arch)
+ }
+ if c.Flag("variant").Changed {
+ builder.SetVariant(iopts.variant)
+ }
+ if c.Flag("os").Changed {
+ builder.SetOS(iopts.os)
+ }
+ if c.Flag("os-feature").Changed {
+ for _, osFeatureSpec := range iopts.osfeature {
+ switch {
+ case osFeatureSpec == "-":
+ builder.ClearOSFeatures()
+ case strings.HasSuffix(osFeatureSpec, "-"):
+ builder.UnsetOSFeature(strings.TrimSuffix(osFeatureSpec, "-"))
+ default:
+ builder.SetOSFeature(osFeatureSpec)
+ }
+ }
+ }
+ if c.Flag("os-version").Changed {
+ builder.SetOSVersion(iopts.osversion)
+ }
+ if c.Flag("user").Changed {
+ builder.SetUser(iopts.user)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) USER %s", iopts.user)
+ }
+ if c.Flag("shell").Changed {
+ shell := iopts.shell
+ shellSpec, err := shellwords.Parse(shell)
+ if err != nil {
+ return fmt.Errorf("parsing --shell %q: %w", shell, err)
+ }
+ builder.SetShell(shellSpec)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) SHELL %s", shell)
+ }
+ if c.Flag("stop-signal").Changed {
+ builder.SetStopSignal(iopts.stopSignal)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) STOPSIGNAL %s", iopts.stopSignal)
+ }
+ if c.Flag("port").Changed {
+ for _, portSpec := range iopts.ports {
+ switch {
+ case string(portSpec[0]) == "-":
+ builder.ClearPorts()
+ case strings.HasSuffix(portSpec, "-"):
+ builder.UnsetPort(strings.TrimSuffix(portSpec, "-"))
+ default:
+ builder.SetPort(portSpec)
+ }
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) EXPOSE %s", strings.Join(iopts.ports, " "))
+ }
+
+ for _, envSpec := range iopts.env {
+ env := strings.SplitN(envSpec, "=", 2)
+ switch {
+ case len(env) > 1:
+ var unexpanded []string
+ getenv := func(name string) string {
+ for _, envvar := range builder.Env() {
+ val := strings.SplitN(envvar, "=", 2)
+ if len(val) == 2 && val[0] == name {
+ return val[1]
+ }
+ }
+ logrus.Errorf("error expanding variable %q: no value set in configuration", name)
+ unexpanded = append(unexpanded, name)
+ return name
+ }
+ env[1] = os.Expand(env[1], getenv)
+ builder.SetEnv(env[0], env[1])
+ case env[0] == "-":
+ builder.ClearEnv()
+ case strings.HasSuffix(env[0], "-"):
+ builder.UnsetEnv(strings.TrimSuffix(env[0], "-"))
+ default:
+ value := os.Getenv(env[0])
+ if value == "" {
+ return fmt.Errorf("setting env %q: no value given", env[0])
+ }
+ builder.SetEnv(env[0], value)
+ }
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) ENV %s", strings.Join(iopts.env, " "))
+ if c.Flag("entrypoint").Changed {
+ updateEntrypoint(builder, iopts.entrypoint)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) ENTRYPOINT %s", iopts.entrypoint)
+ }
+ // cmd should always run after entrypoint; setting entrypoint clears cmd
+ if c.Flag("cmd").Changed {
+ if err := updateCmd(builder, iopts.cmd); err != nil {
+ return err
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) CMD %s", iopts.cmd)
+ }
+ if c.Flag("volume").Changed {
+ if volSpec := iopts.volume; len(volSpec) > 0 {
+ for _, volVal := range volSpec {
+ switch {
+ case volVal == "-":
+ builder.ClearVolumes()
+ case strings.HasSuffix(volVal, "-"):
+ builder.RemoveVolume(strings.TrimSuffix(volVal, "-"))
+ default:
+ builder.AddVolume(volVal)
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) VOLUME %s", volVal)
+ }
+ }
+ }
+ if err := updateHealthcheck(builder, c, iopts); err != nil {
+ return err
+ }
+ if c.Flag("label").Changed {
+ for _, labelSpec := range iopts.label {
+ label := strings.SplitN(labelSpec, "=", 2)
+ switch {
+ case len(label) > 1:
+ builder.SetLabel(label[0], label[1])
+ case label[0] == "-":
+ builder.ClearLabels()
+ case strings.HasSuffix(label[0], "-"):
+ builder.UnsetLabel(strings.TrimSuffix(label[0], "-"))
+ default:
+ builder.SetLabel(label[0], "")
+ }
+ }
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) LABEL %s", strings.Join(iopts.label, " "))
+ }
+ // unset labels if any
+ for _, key := range iopts.unsetLabels {
+ builder.UnsetLabel(key)
+ }
+ if c.Flag("workingdir").Changed {
+ builder.SetWorkDir(iopts.workingDir)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) WORKDIR %s", iopts.workingDir)
+ }
+ if c.Flag("comment").Changed {
+ builder.SetComment(iopts.comment)
+ }
+ if c.Flag("history-comment").Changed {
+ builder.SetHistoryComment(iopts.historyComment)
+ }
+ if c.Flag("domainname").Changed {
+ builder.SetDomainname(iopts.domainName)
+ }
+ if c.Flag("hostname").Changed {
+ name := iopts.hostname
+ if name != "" && builder.Format == define.OCIv1ImageManifest {
+ logrus.Warnf("HOSTNAME is not supported for OCI V1 image format, hostname %s will be ignored. Must use `docker` format", name)
+ }
+ builder.SetHostname(name)
+ }
+ if c.Flag("onbuild").Changed {
+ for _, onbuild := range iopts.onbuild {
+ builder.SetOnBuild(onbuild)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) ONBUILD %s", onbuild)
+ }
+ }
+
+ if c.Flag("annotation").Changed {
+ for _, annotationSpec := range iopts.annotation {
+ annotation := strings.SplitN(annotationSpec, "=", 2)
+ switch {
+ case len(annotation) > 1:
+ builder.SetAnnotation(annotation[0], annotation[1])
+ case annotation[0] == "-":
+ builder.ClearAnnotations()
+ case strings.HasSuffix(annotation[0], "-"):
+ builder.UnsetAnnotation(strings.TrimSuffix(annotation[0], "-"))
+ default:
+ builder.SetAnnotation(annotation[0], "")
+ }
+ }
+ }
+ return nil
+}
+
+func updateHealthcheck(builder *buildah.Builder, c *cobra.Command, iopts configResults) error {
+ if c.Flag("healthcheck").Changed || c.Flag("healthcheck-interval").Changed || c.Flag("healthcheck-retries").Changed || c.Flag("healthcheck-start-period").Changed || c.Flag("healthcheck-timeout").Changed {
+ healthcheck := builder.Healthcheck()
+ args := ""
+ if healthcheck == nil {
+ healthcheck = &docker.HealthConfig{
+ Test: []string{"NONE"},
+ Interval: 30 * time.Second,
+ StartPeriod: 0,
+ Timeout: 30 * time.Second,
+ Retries: 3,
+ }
+ }
+ if c.Flag("healthcheck").Changed {
+ test, err := shellwords.Parse(iopts.healthcheck)
+ if err != nil {
+ return fmt.Errorf("parsing --healthcheck %q: %w", iopts.healthcheck, err)
+ }
+ healthcheck.Test = test
+ }
+ if c.Flag("healthcheck-interval").Changed {
+ duration, err := time.ParseDuration(iopts.healthcheckInterval)
+ if err != nil {
+ return fmt.Errorf("parsing --healthcheck-interval %q: %w", iopts.healthcheckInterval, err)
+ }
+ healthcheck.Interval = duration
+ args = args + "--interval=" + iopts.healthcheckInterval + " "
+ }
+ if c.Flag("healthcheck-retries").Changed {
+ healthcheck.Retries = iopts.healthcheckRetries
+ args = args + "--retries=" + strconv.Itoa(iopts.healthcheckRetries) + " "
+ //args = fmt.Sprintf("%s --retries=%d ", args, iopts.healthcheckRetries)
+
+ }
+ if c.Flag("healthcheck-start-period").Changed {
+ duration, err := time.ParseDuration(iopts.healthcheckStartPeriod)
+ if err != nil {
+ return fmt.Errorf("parsing --healthcheck-start-period %q: %w", iopts.healthcheckStartPeriod, err)
+ }
+ healthcheck.StartPeriod = duration
+ args = args + "--start-period=" + iopts.healthcheckStartPeriod + " "
+ }
+ if c.Flag("healthcheck-timeout").Changed {
+ duration, err := time.ParseDuration(iopts.healthcheckTimeout)
+ if err != nil {
+ return fmt.Errorf("parsing --healthcheck-timeout %q: %w", iopts.healthcheckTimeout, err)
+ }
+ healthcheck.Timeout = duration
+ args = args + "--timeout=" + iopts.healthcheckTimeout + " "
+ }
+ if len(healthcheck.Test) == 0 {
+ builder.SetHealthcheck(nil)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) HEALTHCHECK NONE")
+ } else {
+ builder.SetHealthcheck(healthcheck)
+ conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) HEALTHCHECK %s%s", args, iopts.healthcheck)
+ }
+ }
+ return nil
+}
+
+func configCmd(c *cobra.Command, args []string, iopts configResults) error {
+ if len(args) == 0 {
+ return fmt.Errorf("container ID must be specified")
+ }
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments specified")
+ }
+ name := args[0]
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", name, err)
+ }
+
+ if err := updateConfig(builder, c, iopts); err != nil {
+ return err
+ }
+ return builder.Save()
+}
diff --git a/cmd/buildah/containers.go b/cmd/buildah/containers.go
new file mode 100644
index 0000000..40216e0
--- /dev/null
+++ b/cmd/buildah/containers.go
@@ -0,0 +1,344 @@
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+ "text/template"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/formats"
+ "github.com/containers/buildah/util"
+ "github.com/containers/storage"
+ "github.com/spf13/cobra"
+)
+
+var containersHeader = map[string]string{
+ "ContainerName": "CONTAINER NAME",
+ "ContainerID": "CONTAINER ID",
+ "Builder": "BUILDER",
+ "ImageID": "IMAGE ID",
+ "ImageName": "IMAGE NAME",
+}
+
+type jsonContainer struct {
+ ID string `json:"id"`
+ Builder bool `json:"builder"`
+ ImageID string `json:"imageid"`
+ ImageName string `json:"imagename"`
+ ContainerName string `json:"containername"`
+}
+
+type containerOutputParams struct {
+ ContainerID string
+ Builder string
+ ImageID string
+ ImageName string
+ ContainerName string
+}
+
+type containerOptions struct {
+ all bool
+ format string
+ json bool
+ noHeading bool
+ noTruncate bool
+ quiet bool
+}
+
+type containerFilterParams struct {
+ id string
+ name string
+ ancestor string
+}
+
+type containersResults struct {
+ all bool
+ filter string
+ format string
+ json bool
+ noheading bool
+ notruncate bool
+ quiet bool
+}
+
+func init() {
+ var (
+ containersDescription = "\n Lists containers which appear to be " + define.Package + " working containers, their\n names and IDs, and the names and IDs of the images from which they were\n initialized."
+ opts containersResults
+ )
+ containersCommand := &cobra.Command{
+ Use: "containers",
+ Aliases: []string{"list", "ls", "ps"},
+ Short: "List working containers and their base images",
+ Long: containersDescription,
+ //Flags: sortFlags(containersFlags),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return containersCmd(cmd, args, opts)
+ },
+ Example: `buildah containers
+ buildah containers --format "{{.ContainerID}} {{.ContainerName}}"
+ buildah containers -q --noheading --notruncate`,
+ }
+ containersCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := containersCommand.Flags()
+ flags.BoolVarP(&opts.all, "all", "a", false, "also list non-buildah containers")
+ flags.StringVarP(&opts.filter, "filter", "f", "", "filter output based on conditions provided")
+ flags.StringVar(&opts.format, "format", "", "pretty-print containers using a Go template")
+ flags.BoolVar(&opts.json, "json", false, "output in JSON format")
+ flags.BoolVarP(&opts.noheading, "noheading", "n", false, "do not print column headings")
+ flags.BoolVar(&opts.notruncate, "notruncate", false, "do not truncate output")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "display only container IDs")
+
+ rootCmd.AddCommand(containersCommand)
+}
+
+func containersCmd(c *cobra.Command, args []string, iopts containersResults) error {
+ if len(args) > 0 {
+ return errors.New("'buildah containers' does not accept arguments")
+ }
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ if c.Flag("quiet").Changed && c.Flag("format").Changed {
+ return errors.New("quiet and format are mutually exclusive")
+ }
+
+ opts := containerOptions{
+ all: iopts.all,
+ format: iopts.format,
+ json: iopts.json,
+ noHeading: iopts.noheading,
+ noTruncate: iopts.notruncate,
+ quiet: iopts.quiet,
+ }
+
+ var params *containerFilterParams
+ if c.Flag("filter").Changed {
+ params, err = parseCtrFilter(iopts.filter)
+ if err != nil {
+ return fmt.Errorf("parsing filter: %w", err)
+ }
+ }
+
+ if !opts.noHeading && !opts.quiet && opts.format == "" && !opts.json {
+ containerOutputHeader(!opts.noTruncate)
+ }
+
+ return outputContainers(store, opts, params)
+}
+
+func outputContainers(store storage.Store, opts containerOptions, params *containerFilterParams) error {
+ seenImages := make(map[string]string)
+ imageNameForID := func(id string) string {
+ if id == "" {
+ return buildah.BaseImageFakeName
+ }
+ imageName, ok := seenImages[id]
+ if ok {
+ return imageName
+ }
+ img, err2 := store.Image(id)
+ if err2 == nil && len(img.Names) > 0 {
+ seenImages[id] = img.Names[0]
+ }
+ return seenImages[id]
+ }
+
+ builders, err := openBuilders(store)
+ if err != nil {
+ return fmt.Errorf("reading build containers: %w", err)
+ }
+ var (
+ containerOutput []containerOutputParams
+ JSONContainers []jsonContainer
+ )
+ if !opts.all {
+ // only output containers created by buildah
+ for _, builder := range builders {
+ image := imageNameForID(builder.FromImageID)
+ if !matchesCtrFilter(builder.ContainerID, builder.Container, builder.FromImageID, image, params) {
+ continue
+ }
+ if opts.json {
+ JSONContainers = append(JSONContainers, jsonContainer{ID: builder.ContainerID,
+ Builder: true,
+ ImageID: builder.FromImageID,
+ ImageName: image,
+ ContainerName: builder.Container})
+ continue
+ }
+ output := containerOutputParams{
+ ContainerID: builder.ContainerID,
+ Builder: " *",
+ ImageID: builder.FromImageID,
+ ImageName: image,
+ ContainerName: builder.Container,
+ }
+ containerOutput = append(containerOutput, output)
+ }
+ } else {
+ // output all containers currently in storage
+ builderMap := make(map[string]struct{})
+ for _, builder := range builders {
+ builderMap[builder.ContainerID] = struct{}{}
+ }
+ containers, err2 := store.Containers()
+ if err2 != nil {
+ return fmt.Errorf("reading list of all containers: %w", err2)
+ }
+ for _, container := range containers {
+ name := ""
+ if len(container.Names) > 0 {
+ name = container.Names[0]
+ }
+ _, ours := builderMap[container.ID]
+ builder := ""
+ if ours {
+ builder = " *"
+ }
+ if !matchesCtrFilter(container.ID, name, container.ImageID, imageNameForID(container.ImageID), params) {
+ continue
+ }
+ if opts.json {
+ JSONContainers = append(JSONContainers, jsonContainer{ID: container.ID,
+ Builder: ours,
+ ImageID: container.ImageID,
+ ImageName: imageNameForID(container.ImageID),
+ ContainerName: name})
+ continue
+ }
+ output := containerOutputParams{
+ ContainerID: container.ID,
+ Builder: builder,
+ ImageID: container.ImageID,
+ ImageName: imageNameForID(container.ImageID),
+ ContainerName: name,
+ }
+ containerOutput = append(containerOutput, output)
+ }
+ }
+ if opts.json {
+ data, err := json.MarshalIndent(JSONContainers, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", data)
+ return nil
+ }
+
+ if opts.format != "" {
+ out := formats.StdoutTemplateArray{Output: containersToGeneric(containerOutput), Template: opts.format, Fields: containersHeader}
+ return formats.Writer(out).Out()
+ }
+
+ for _, ctr := range containerOutput {
+ if opts.quiet {
+ fmt.Printf("%-64s\n", ctr.ContainerID)
+ continue
+ }
+ containerOutputUsingFormatString(!opts.noTruncate, ctr)
+ }
+ return nil
+}
+
+func containersToGeneric(templParams []containerOutputParams) (genericParams []interface{}) {
+ if len(templParams) > 0 {
+ for _, v := range templParams {
+ genericParams = append(genericParams, interface{}(v))
+ }
+ }
+ return genericParams
+}
+
+func containerOutputUsingTemplate(format string, params containerOutputParams) error {
+ if matched, err := regexp.MatchString("{{.*}}", format); err != nil {
+ return fmt.Errorf("validating format provided: %s: %w", format, err)
+ } else if !matched {
+ return fmt.Errorf("invalid format provided: %s", format)
+ }
+
+ tmpl, err := template.New("container").Parse(format)
+ if err != nil {
+ return fmt.Errorf("Template parsing error: %w", err)
+ }
+
+ err = tmpl.Execute(os.Stdout, params)
+ if err != nil {
+ return err
+ }
+ fmt.Println()
+ return nil
+}
+
+func containerOutputUsingFormatString(truncate bool, params containerOutputParams) {
+ if truncate {
+ fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, util.TruncateString(params.ImageName, 32), params.ContainerName)
+ } else {
+ fmt.Printf("%-64s %-8s %-64s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
+ }
+}
+
+func containerOutputHeader(truncate bool) {
+ if truncate {
+ fmt.Printf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
+ } else {
+ fmt.Printf("%-64s %-8s %-64s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
+ }
+}
+
+func parseCtrFilter(filter string) (*containerFilterParams, error) {
+ params := new(containerFilterParams)
+ filters := strings.Split(filter, ",")
+ for _, param := range filters {
+ pair := strings.SplitN(param, "=", 2)
+ if len(pair) != 2 {
+ return nil, fmt.Errorf("incorrect filter value %q, should be of form filter=value", param)
+ }
+ switch strings.TrimSpace(pair[0]) {
+ case "id":
+ params.id = pair[1]
+ case "name":
+ params.name = pair[1]
+ case "ancestor":
+ params.ancestor = pair[1]
+ default:
+ return nil, fmt.Errorf("invalid filter %q", pair[0])
+ }
+ }
+ return params, nil
+}
+
+func matchesCtrName(ctrName, argName string) bool {
+ return strings.Contains(ctrName, argName)
+}
+
+func matchesAncestor(imgName, imgID, argName string) bool {
+ if matchesID(imgID, argName) {
+ return true
+ }
+ return matchesReference(imgName, argName)
+}
+
+func matchesCtrFilter(ctrID, ctrName, imgID, imgName string, params *containerFilterParams) bool {
+ if params == nil {
+ return true
+ }
+ if params.id != "" && !matchesID(ctrID, params.id) {
+ return false
+ }
+ if params.name != "" && !matchesCtrName(ctrName, params.name) {
+ return false
+ }
+ if params.ancestor != "" && !matchesAncestor(imgName, imgID, params.ancestor) {
+ return false
+ }
+ return true
+}
diff --git a/cmd/buildah/containers_test.go b/cmd/buildah/containers_test.go
new file mode 100644
index 0000000..d07b700
--- /dev/null
+++ b/cmd/buildah/containers_test.go
@@ -0,0 +1,145 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestContainerTemplateOutputValidFormat(t *testing.T) {
+ params := containerOutputParams{
+ ContainerID: "e477836657bb",
+ Builder: " ",
+ ImageID: "f975c5035748",
+ ImageName: "test/image:latest",
+ ContainerName: "test-container",
+ }
+
+ formatString := "Container ID: {{.ContainerID}}"
+ expectedString := "Container ID: " + params.ContainerID
+
+ output, err := captureOutputWithError(func() error {
+ return containerOutputUsingTemplate(formatString, params)
+ })
+ if err != nil {
+ t.Error(err)
+ } else if strings.TrimSpace(output) != expectedString {
+ t.Errorf("Errorf with template output:\nExpected: %s\nReceived: %s\n", expectedString, output)
+ }
+}
+
+func TestContainerTemplateOutputInvalidFormat(t *testing.T) {
+ params := containerOutputParams{
+ ContainerID: "e477836657bb",
+ Builder: " ",
+ ImageID: "f975c5035748",
+ ImageName: "test/image:latest",
+ ContainerName: "test-container",
+ }
+
+ formatString := "ContainerID"
+
+ err := containerOutputUsingTemplate(formatString, params)
+ if err == nil || err.Error() != "invalid format provided: ContainerID" {
+ t.Fatalf("expected error invalid format")
+ }
+}
+
+func TestContainerTemplateOutputNonexistentField(t *testing.T) {
+ params := containerOutputParams{
+ ContainerID: "e477836657bb",
+ Builder: " ",
+ ImageID: "f975c5035748",
+ ImageName: "test/image:latest",
+ ContainerName: "test-container",
+ }
+
+ formatString := "{{.ID}}"
+
+ err := containerOutputUsingTemplate(formatString, params)
+ if err == nil || !strings.Contains(err.Error(), "can't evaluate field ID") {
+ t.Fatalf("expected error nonexistent field")
+ }
+}
+
+func TestContainerFormatStringOutput(t *testing.T) {
+ params := containerOutputParams{
+ ContainerID: "e477836657bb",
+ Builder: " ",
+ ImageID: "f975c5035748",
+ ImageName: "test/with/this/very/long/image:latest",
+ ContainerName: "test-container",
+ }
+ const trimmedImageName = "test/with/this/very/long/imag..."
+
+ output := captureOutput(func() {
+ containerOutputUsingFormatString(true, params)
+ })
+ expectedOutput := fmt.Sprintf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, trimmedImageName, params.ContainerName)
+ if output != expectedOutput {
+ t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
+ }
+
+ output = captureOutput(func() {
+ containerOutputUsingFormatString(false, params)
+ })
+ expectedOutput = fmt.Sprintf("%-64s %-8s %-64s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
+ if output != expectedOutput {
+ t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
+ }
+}
+
+func TestContainerHeaderOutput(t *testing.T) {
+ output := captureOutput(func() {
+ containerOutputHeader(true)
+ })
+ expectedOutput := fmt.Sprintf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
+ if output != expectedOutput {
+ t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
+ }
+
+ output = captureOutput(func() {
+ containerOutputHeader(false)
+ })
+ expectedOutput = fmt.Sprintf("%-64s %-8s %-64s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
+ if output != expectedOutput {
+ t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
+ }
+}
+
+func captureOutputWithError(f func() error) (string, error) {
+ old := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ return "", err
+ }
+ os.Stdout = w
+
+ if err := f(); err != nil {
+ return "", err
+ }
+
+ w.Close()
+ os.Stdout = old
+ var buf bytes.Buffer
+ io.Copy(&buf, r) //nolint
+ return buf.String(), err
+}
+
+// Captures output so that it can be compared to expected values
+func captureOutput(f func()) string {
+ old := os.Stdout
+ r, w, _ := os.Pipe()
+ os.Stdout = w
+
+ f()
+
+ w.Close()
+ os.Stdout = old
+ var buf bytes.Buffer
+ io.Copy(&buf, r) //nolint
+ return buf.String()
+}
diff --git a/cmd/buildah/dumpbolt.go b/cmd/buildah/dumpbolt.go
new file mode 100644
index 0000000..b208b3b
--- /dev/null
+++ b/cmd/buildah/dumpbolt.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+ bolt "go.etcd.io/bbolt"
+)
+
+var (
+ dumpBoltDescription = `Dumps a bolt database. The output format should not be depended upon.`
+ dumpBoltCommand = &cobra.Command{
+ Use: "dumpbolt",
+ Short: "Dump a bolt database",
+ Long: dumpBoltDescription,
+ RunE: dumpBoltCmd,
+ Example: "DATABASE",
+ Args: cobra.ExactArgs(1),
+ Hidden: true,
+ }
+)
+
+func dumpBoltCmd(c *cobra.Command, args []string) error {
+ db, err := bolt.Open(args[0], 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return fmt.Errorf("opening database %q: %w", args[0], err)
+ }
+ defer db.Close()
+
+ encode := func(value []byte) string {
+ var b strings.Builder
+ for i := range value {
+ if value[i] <= 32 || value[i] >= 127 {
+ b.WriteString(fmt.Sprintf("\\%03o", value[i]))
+ } else {
+ b.WriteByte(value[i])
+ }
+ }
+ return b.String()
+ }
+
+ return db.View(func(tx *bolt.Tx) error {
+ var dumpBucket func(string, []byte, *bolt.Bucket) error
+ dumpBucket = func(indent string, name []byte, b *bolt.Bucket) error {
+ var subs [][]byte
+ indentMore := " "
+ fmt.Printf("%s%s:\n", indent, encode(name))
+ err := b.ForEach(func(k, v []byte) (err error) {
+ if v == nil {
+ subs = append(subs, k)
+ } else {
+ _, err = fmt.Printf("%s%s: %s\n", indent+indentMore, encode(k), encode(v))
+ }
+ return err
+ })
+ if err != nil {
+ return err
+ }
+ for _, sub := range subs {
+ subbucket := b.Bucket(sub)
+ if err = dumpBucket(indent+indentMore, sub, subbucket); err != nil {
+ return err
+ }
+ }
+ return err
+ }
+ return tx.ForEach(func(name []byte, b *bolt.Bucket) error { return dumpBucket("", name, b) })
+ })
+}
+
+func init() {
+ rootCmd.AddCommand(dumpBoltCommand)
+}
diff --git a/cmd/buildah/from.go b/cmd/buildah/from.go
new file mode 100644
index 0000000..3ee6f8a
--- /dev/null
+++ b/cmd/buildah/from.go
@@ -0,0 +1,355 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/common/pkg/config"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type fromReply struct {
+ authfile string
+ certDir string
+ cidfile string
+ creds string
+ format string
+ name string
+ pull string
+ pullAlways bool
+ pullNever bool
+ quiet bool
+ signaturePolicy string
+ tlsVerify bool
+ *cli.FromAndBudResults
+ *cli.UserNSResults
+ *cli.NameSpaceResults
+}
+
+var suffix string
+
+func init() {
+ var (
+ fromDescription = "\n Creates a new working container, either from scratch or using a specified\n image as a starting point."
+ opts fromReply
+ )
+ fromAndBudResults := cli.FromAndBudResults{}
+ userNSResults := cli.UserNSResults{}
+ namespaceResults := cli.NameSpaceResults{}
+ fromCommand := &cobra.Command{
+ Use: "from",
+ Short: "Create a working container based on an image",
+ Long: fromDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Add in the results from the common cli commands
+ opts.FromAndBudResults = &fromAndBudResults
+ opts.UserNSResults = &userNSResults
+ opts.NameSpaceResults = &namespaceResults
+ return fromCmd(cmd, args, opts)
+ },
+ Example: `buildah from --pull imagename
+ buildah from docker-daemon:imagename:imagetag
+ buildah from --name "myimagename" myregistry/myrepository/imagename:imagetag`,
+ }
+ fromCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := fromCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&opts.cidfile, "cidfile", "", "write the container ID to the file")
+ flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVarP(&opts.format, "format", "f", defaultFormat(), "`format` of the image manifest and metadata")
+ flags.StringVar(&opts.name, "name", "", "`name` for the working container")
+ flags.StringVar(&opts.pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
+ flags.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
+
+ flags.BoolVar(&opts.pullAlways, "pull-always", false, "pull the image even if the named image is present in store")
+ if err := flags.MarkHidden("pull-always"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
+ }
+ flags.BoolVar(&opts.pullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
+ if err := flags.MarkHidden("pull-never"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
+ }
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when pulling images")
+ flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ flags.StringVar(&suffix, "suffix", "", "suffix to add to intermediate containers")
+ if err := flags.MarkHidden("suffix"); err != nil {
+ panic(fmt.Sprintf("error marking the suffix flag as hidden: %v", err))
+ }
+
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+
+ // Add in the common flags
+ fromAndBudFlags, err := cli.GetFromAndBudFlags(&fromAndBudResults, &userNSResults, &namespaceResults)
+ if err != nil {
+ logrus.Errorf("failed to setup From and Bud flags: %v", err)
+ os.Exit(1)
+ }
+ flags.AddFlagSet(&fromAndBudFlags)
+ flags.SetNormalizeFunc(cli.AliasFlags)
+
+ rootCmd.AddCommand(fromCommand)
+}
+
+func onBuild(builder *buildah.Builder, quiet bool) error {
+ ctr := 0
+ for _, onBuildSpec := range builder.OnBuild() {
+ ctr = ctr + 1
+ commands := strings.Split(onBuildSpec, " ")
+ command := strings.ToUpper(commands[0])
+ args := commands[1:]
+ if !quiet {
+ fmt.Fprintf(os.Stderr, "STEP %d: %s\n", ctr, onBuildSpec)
+ }
+ switch command {
+ case "ADD":
+ case "COPY":
+ dest := ""
+ size := len(args)
+ if size > 1 {
+ dest = args[size-1]
+ args = args[:size-1]
+ }
+ if err := builder.Add(dest, command == "ADD", buildah.AddAndCopyOptions{}, args...); err != nil {
+ return err
+ }
+ case "ANNOTATION":
+ annotation := strings.SplitN(args[0], "=", 2)
+ if len(annotation) > 1 {
+ builder.SetAnnotation(annotation[0], annotation[1])
+ } else {
+ builder.UnsetAnnotation(annotation[0])
+ }
+ case "CMD":
+ builder.SetCmd(args)
+ case "ENV":
+ env := strings.SplitN(args[0], "=", 2)
+ if len(env) > 1 {
+ builder.SetEnv(env[0], env[1])
+ } else {
+ builder.UnsetEnv(env[0])
+ }
+ case "ENTRYPOINT":
+ builder.SetEntrypoint(args)
+ case "EXPOSE":
+ builder.SetPort(strings.Join(args, " "))
+ case "HOSTNAME":
+ builder.SetHostname(strings.Join(args, " "))
+ case "LABEL":
+ label := strings.SplitN(args[0], "=", 2)
+ if len(label) > 1 {
+ builder.SetLabel(label[0], label[1])
+ } else {
+ builder.UnsetLabel(label[0])
+ }
+ case "MAINTAINER":
+ builder.SetMaintainer(strings.Join(args, " "))
+ case "ONBUILD":
+ builder.SetOnBuild(strings.Join(args, " "))
+ case "RUN":
+ var stdout io.Writer
+ if quiet {
+ stdout = io.Discard
+ }
+ if err := builder.Run(args, buildah.RunOptions{Stdout: stdout}); err != nil {
+ return err
+ }
+ case "SHELL":
+ builder.SetShell(args)
+ case "STOPSIGNAL":
+ builder.SetStopSignal(strings.Join(args, " "))
+ case "USER":
+ builder.SetUser(strings.Join(args, " "))
+ case "VOLUME":
+ builder.AddVolume(strings.Join(args, " "))
+ case "WORKINGDIR":
+ builder.SetWorkDir(strings.Join(args, " "))
+ default:
+ logrus.Errorf("unknown OnBuild command %q; ignored", onBuildSpec)
+ }
+ }
+ builder.ClearOnBuild()
+ return nil
+}
+
+func fromCmd(c *cobra.Command, args []string, iopts fromReply) error {
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ if len(args) == 0 {
+ return errors.New("an image name (or \"scratch\") must be specified")
+ }
+ if err := cli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if len(args) > 1 {
+ return errors.New("too many arguments specified")
+ }
+
+ if err := auth.CheckAuthFile(iopts.authfile); err != nil {
+ return err
+ }
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ platforms, err := parse.PlatformsFromOptions(c)
+ if err != nil {
+ return err
+ }
+ if len(platforms) > 1 {
+ logrus.Warnf("ignoring platforms other than %+v: %+v", platforms[0], platforms[1:])
+ }
+
+ pullFlagsCount := 0
+ if c.Flag("pull").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-always").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-never").Changed {
+ pullFlagsCount++
+ }
+
+ if pullFlagsCount > 1 {
+ return errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ }
+
+ // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
+ // --pull-always and --pull-never. The --pull-never and --pull-always options
+ // will not be documented.
+ pullPolicy := define.PullIfMissing
+ if strings.EqualFold(strings.TrimSpace(iopts.pull), "true") {
+ pullPolicy = define.PullIfNewer
+ }
+ if iopts.pullAlways || strings.EqualFold(strings.TrimSpace(iopts.pull), "always") {
+ pullPolicy = define.PullAlways
+ }
+ if iopts.pullNever || strings.EqualFold(strings.TrimSpace(iopts.pull), "never") {
+ pullPolicy = define.PullNever
+ }
+ logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
+
+ signaturePolicy := iopts.signaturePolicy
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ commonOpts, err := parse.CommonBuildOptions(c)
+ if err != nil {
+ return err
+ }
+
+ isolation, err := parse.IsolationOption(iopts.Isolation)
+ if err != nil {
+ return err
+ }
+
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
+ if err != nil {
+ return fmt.Errorf("parsing namespace-related options: %w", err)
+ }
+ usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
+ if err != nil {
+ return fmt.Errorf("parsing ID mapping options: %w", err)
+ }
+ namespaceOptions.AddOrReplace(usernsOption...)
+
+ format, err := cli.GetFormat(iopts.format)
+ if err != nil {
+ return err
+ }
+ devices := define.ContainerDevices{}
+ for _, device := range append(defaultContainerConfig.Containers.Devices.Get(), iopts.Devices...) {
+ dev, err := parse.DeviceFromPath(device)
+ if err != nil {
+ return err
+ }
+ devices = append(devices, dev...)
+ }
+
+ capabilities, err := defaultContainerConfig.Capabilities("", iopts.CapAdd, iopts.CapDrop)
+ if err != nil {
+ return err
+ }
+
+ commonOpts.Ulimit = append(defaultContainerConfig.Containers.DefaultUlimits.Get(), commonOpts.Ulimit...)
+
+ decConfig, err := cli.DecryptConfig(iopts.DecryptionKeys)
+ if err != nil {
+ return fmt.Errorf("unable to obtain decrypt config: %w", err)
+ }
+
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.RetryDelay)
+ if err != nil {
+ return fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.RetryDelay, err)
+ }
+
+ options := buildah.BuilderOptions{
+ FromImage: args[0],
+ Container: iopts.name,
+ ContainerSuffix: suffix,
+ GroupAdd: iopts.GroupAdd,
+ PullPolicy: pullPolicy,
+ SignaturePolicyPath: signaturePolicy,
+ SystemContext: systemContext,
+ DefaultMountsFilePath: globalFlagResults.DefaultMountsFile,
+ Isolation: isolation,
+ NamespaceOptions: namespaceOptions,
+ ConfigureNetwork: networkPolicy,
+ CNIPluginPath: iopts.CNIPlugInPath,
+ CNIConfigDir: iopts.CNIConfigDir,
+ IDMappingOptions: idmappingOptions,
+ Capabilities: capabilities,
+ CommonBuildOpts: commonOpts,
+ Format: format,
+ BlobDirectory: iopts.BlobCache,
+ Devices: devices,
+ MaxPullRetries: iopts.Retry,
+ PullRetryDelay: pullPushRetryDelay,
+ OciDecryptConfig: decConfig,
+ }
+
+ if !iopts.quiet {
+ options.ReportWriter = os.Stderr
+ }
+
+ builder, err := buildah.NewBuilder(getContext(), store, options)
+ if err != nil {
+ return err
+ }
+
+ if err := onBuild(builder, iopts.quiet); err != nil {
+ return err
+ }
+
+ if iopts.cidfile != "" {
+ filePath := iopts.cidfile
+ if err := os.WriteFile(filePath, []byte(builder.ContainerID), 0644); err != nil {
+ return fmt.Errorf("failed to write container ID file %q: %w", filePath, err)
+ }
+ }
+ fmt.Printf("%s\n", builder.Container)
+ return builder.Save()
+}
diff --git a/cmd/buildah/images.go b/cmd/buildah/images.go
new file mode 100644
index 0000000..27eb069
--- /dev/null
+++ b/cmd/buildah/images.go
@@ -0,0 +1,348 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/formats"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/libimage"
+ "github.com/docker/go-units"
+ "github.com/spf13/cobra"
+)
+
+const none = "<none>"
+
+type jsonImage struct {
+ ID string `json:"id"`
+ Names []string `json:"names"`
+ Digest string `json:"digest"`
+ CreatedAt string `json:"createdat"`
+ Size string `json:"size"`
+ Created int64 `json:"created"`
+ CreatedAtRaw time.Time `json:"createdatraw"`
+ ReadOnly bool `json:"readonly"`
+ History []string `json:"history"`
+}
+
+type imageOutputParams struct {
+ Tag string
+ ID string
+ Name string
+ Digest string
+ Created int64
+ CreatedAt string
+ Size string
+ CreatedAtRaw time.Time
+ ReadOnly bool
+ History string
+}
+
+type imageOptions struct {
+ all bool
+ digests bool
+ format string
+ json bool
+ noHeading bool
+ truncate bool
+ quiet bool
+ readOnly bool
+ history bool
+}
+
+type imageResults struct {
+ imageOptions
+ filter []string
+}
+
+var imagesHeader = map[string]string{
+ "Name": "REPOSITORY",
+ "Tag": "TAG",
+ "ID": "IMAGE ID",
+ "CreatedAt": "CREATED",
+ "Size": "SIZE",
+ "ReadOnly": "R/O",
+ "History": "HISTORY",
+}
+
+func init() {
+ var (
+ opts imageResults
+ imagesDescription = "\n Lists locally stored images."
+ )
+ imagesCommand := &cobra.Command{
+ Use: "images",
+ Short: "List images in local storage",
+ Long: imagesDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return imagesCmd(cmd, args, &opts)
+ },
+ Example: `buildah images --all
+ buildah images [imageName]
+ buildah images --format '{{.ID}} {{.Name}} {{.Size}} {{.CreatedAtRaw}}'`,
+ }
+ imagesCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := imagesCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVarP(&opts.all, "all", "a", false, "show all images, including intermediate images from a build")
+ flags.BoolVar(&opts.digests, "digests", false, "show digests")
+ flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "filter output based on conditions provided")
+ flags.StringVar(&opts.format, "format", "", "pretty-print images using a Go template")
+ flags.BoolVar(&opts.json, "json", false, "output in JSON format")
+ flags.BoolVarP(&opts.noHeading, "noheading", "n", false, "do not print column headings")
+ // TODO needs alias here -- to `notruncate`
+ flags.BoolVar(&opts.truncate, "no-trunc", false, "do not truncate output")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "display only image IDs")
+ flags.BoolVarP(&opts.history, "history", "", false, "display the image name history")
+
+ rootCmd.AddCommand(imagesCommand)
+}
+
+func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
+ if len(args) > 0 {
+ if iopts.all {
+ return errors.New("when using the --all switch, you may not pass any images names or IDs")
+ }
+
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if len(args) > 1 {
+ return errors.New("'buildah images' requires at most 1 argument")
+ }
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ ctx := context.Background()
+
+ options := &libimage.ListImagesOptions{}
+ if len(iopts.filter) > 0 {
+ options.Filters = iopts.filter
+ }
+ if !iopts.all {
+ options.Filters = append(options.Filters, "intermediate=false")
+ }
+
+ images, err := runtime.ListImages(ctx, args, options)
+ if err != nil {
+ return err
+ }
+
+ if iopts.quiet && iopts.format != "" {
+ return errors.New("quiet and format are mutually exclusive")
+ }
+
+ opts := imageOptions{
+ all: iopts.all,
+ digests: iopts.digests,
+ format: iopts.format,
+ json: iopts.json,
+ noHeading: iopts.noHeading,
+ truncate: !iopts.truncate,
+ quiet: iopts.quiet,
+ history: iopts.history,
+ }
+
+ if opts.json {
+ return formatImagesJSON(images, opts)
+ }
+
+ return formatImages(images, opts)
+}
+
+func outputHeader(opts imageOptions) string {
+ if opts.format != "" {
+ return strings.Replace(opts.format, `\t`, "\t", -1)
+ }
+ if opts.quiet {
+ return formats.IDString
+ }
+ format := "table {{.Name}}\t{{.Tag}}\t"
+ if opts.noHeading {
+ format = "{{.Name}}\t{{.Tag}}\t"
+ }
+
+ if opts.digests {
+ format += "{{.Digest}}\t"
+ }
+ format += "{{.ID}}\t{{.CreatedAt}}\t{{.Size}}"
+ if opts.readOnly {
+ format += "\t{{.ReadOnly}}"
+ }
+ if opts.history {
+ format += "\t{{.History}}"
+ }
+ return format
+}
+
+func formatImagesJSON(images []*libimage.Image, opts imageOptions) error {
+ jsonImages := []jsonImage{}
+ for _, image := range images {
+ // Copy the base data over to the output param.
+ size, err := image.Size()
+ if err != nil {
+ return err
+ }
+ created := image.Created()
+ jsonImages = append(jsonImages,
+ jsonImage{
+ CreatedAtRaw: created,
+ Created: created.Unix(),
+ CreatedAt: units.HumanDuration(time.Since(created)) + " ago",
+ Digest: image.Digest().String(),
+ ID: truncateID(image.ID(), opts.truncate),
+ Names: image.Names(),
+ ReadOnly: image.IsReadOnly(),
+ Size: formattedSize(size),
+ })
+ }
+
+ data, err := json.MarshalIndent(jsonImages, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+type imagesSorted []imageOutputParams
+
+func (a imagesSorted) Less(i, j int) bool {
+ return a[i].CreatedAtRaw.After(a[j].CreatedAtRaw)
+}
+
+func (a imagesSorted) Len() int {
+ return len(a)
+}
+
+func (a imagesSorted) Swap(i, j int) {
+ a[i], a[j] = a[j], a[i]
+}
+
+func formatImages(images []*libimage.Image, opts imageOptions) error {
+ var outputData imagesSorted
+
+ for _, image := range images {
+ var outputParam imageOutputParams
+ size, err := image.Size()
+ if err != nil {
+ return err
+ }
+ created := image.Created()
+ outputParam.CreatedAtRaw = created
+ outputParam.Created = created.Unix()
+ outputParam.CreatedAt = units.HumanDuration(time.Since(created)) + " ago"
+ outputParam.Digest = image.Digest().String()
+ outputParam.ID = truncateID(image.ID(), opts.truncate)
+ outputParam.Size = formattedSize(size)
+ outputParam.ReadOnly = image.IsReadOnly()
+
+ repoTags, err := image.NamedRepoTags()
+ if err != nil {
+ return err
+ }
+
+ nameTagPairs, err := libimage.ToNameTagPairs(repoTags)
+ if err != nil {
+ return err
+ }
+
+ for _, pair := range nameTagPairs {
+ newParam := outputParam
+ newParam.Name = pair.Name
+ newParam.Tag = pair.Tag
+ newParam.History = formatHistory(image.NamesHistory(), pair.Name, pair.Tag)
+ outputData = append(outputData, newParam)
+ // `images -q` should a given ID only once.
+ if opts.quiet {
+ break
+ }
+ }
+ }
+
+ sort.Sort(outputData)
+ out := formats.StdoutTemplateArray{Output: imagesToGeneric(outputData), Template: outputHeader(opts), Fields: imagesHeader}
+ return formats.Writer(out).Out()
+}
+
+func formatHistory(history []string, name, tag string) string {
+ if len(history) == 0 {
+ return none
+ }
+ // Skip the first history entry if already existing as name
+ if fmt.Sprintf("%s:%s", name, tag) == history[0] {
+ if len(history) == 1 {
+ return none
+ }
+ return strings.Join(history[1:], ", ")
+ }
+ return strings.Join(history, ", ")
+}
+
+func truncateID(id string, truncate bool) string {
+ if !truncate {
+ return "sha256:" + id
+ }
+ idTruncLength := 12
+ if len(id) > idTruncLength {
+ return id[:idTruncLength]
+ }
+ return id
+}
+
+func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface{}) {
+ if len(templParams) > 0 {
+ for _, v := range templParams {
+ genericParams = append(genericParams, interface{}(v))
+ }
+ }
+ return genericParams
+}
+
+func formattedSize(size int64) string {
+ suffixes := [5]string{"B", "KB", "MB", "GB", "TB"}
+
+ count := 0
+ formattedSize := float64(size)
+ for formattedSize >= 1000 && count < 4 {
+ formattedSize /= 1000
+ count++
+ }
+ return fmt.Sprintf("%.3g %s", formattedSize, suffixes[count])
+}
+
+func matchesID(imageID, argID string) bool {
+ return strings.HasPrefix(imageID, argID)
+}
+
+func matchesReference(name, argName string) bool {
+ if argName == "" {
+ return true
+ }
+ splitName := strings.Split(name, ":")
+ // If the arg contains a tag, we handle it differently than if it does not
+ if strings.Contains(argName, ":") {
+ splitArg := strings.Split(argName, ":")
+ return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])
+ }
+ return strings.HasSuffix(splitName[0], argName)
+}
diff --git a/cmd/buildah/images_test.go b/cmd/buildah/images_test.go
new file mode 100644
index 0000000..bfd3f84
--- /dev/null
+++ b/cmd/buildah/images_test.go
@@ -0,0 +1,70 @@
+package main
+
+import (
+ "testing"
+)
+
+func TestSizeFormatting(t *testing.T) {
+ size := formattedSize(0)
+ if size != "0 B" {
+ t.Errorf("Error formatting size: expected '%s' got '%s'", "0 B", size)
+ }
+
+ size = formattedSize(1000)
+ if size != "1 KB" {
+ t.Errorf("Error formatting size: expected '%s' got '%s'", "1 KB", size)
+ }
+
+ size = formattedSize(1000 * 1000 * 1000 * 1000)
+ if size != "1 TB" {
+ t.Errorf("Error formatting size: expected '%s' got '%s'", "1 TB", size)
+ }
+}
+
+func TestMatchWithTag(t *testing.T) {
+ isMatch := matchesReference("gcr.io/pause:latest", "pause:latest")
+ if !isMatch {
+ t.Error("expected match, got not match")
+ }
+
+ isMatch = matchesReference("gcr.io/pause:latest", "kubernetes/pause:latest")
+ if isMatch {
+ t.Error("expected not match, got match")
+ }
+}
+
+func TestNoMatchesReferenceWithTag(t *testing.T) {
+ isMatch := matchesReference("gcr.io/pause:latest", "redis:latest")
+ if isMatch {
+ t.Error("expected no match, got match")
+ }
+
+ isMatch = matchesReference("gcr.io/pause:latest", "kubernetes/redis:latest")
+ if isMatch {
+ t.Error("expected no match, got match")
+ }
+}
+
+func TestMatchesReferenceWithoutTag(t *testing.T) {
+ isMatch := matchesReference("gcr.io/pause:latest", "pause")
+ if !isMatch {
+ t.Error("expected match, got not match")
+ }
+
+ isMatch = matchesReference("gcr.io/pause:latest", "kubernetes/pause")
+ if isMatch {
+ t.Error("expected not match, got match")
+ }
+}
+
+func TestNoMatchesReferenceWithoutTag(t *testing.T) {
+ isMatch := matchesReference("gcr.io/pause:latest", "redis")
+ if isMatch {
+ t.Error("expected no match, got match")
+ }
+
+ isMatch = matchesReference("gcr.io/pause:latest", "kubernetes/redis")
+ if isMatch {
+ t.Error("expected no match, got match")
+ }
+}
diff --git a/cmd/buildah/info.go b/cmd/buildah/info.go
new file mode 100644
index 0000000..98b2178
--- /dev/null
+++ b/cmd/buildah/info.go
@@ -0,0 +1,102 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "text/template"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+type infoResults struct {
+ debug bool
+ format string
+}
+
+func init() {
+ var (
+ infoDescription = "\n Display information about the host and current storage statistics which are useful when reporting issues."
+ opts infoResults
+ )
+ infoCommand := &cobra.Command{
+ Use: "info",
+ Short: "Display Buildah system information",
+ Long: infoDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return infoCmd(cmd, opts)
+ },
+ Args: cobra.NoArgs,
+ Example: `buildah info`,
+ }
+ infoCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := infoCommand.Flags()
+ flags.BoolVarP(&opts.debug, "debug", "d", false, "display additional debug information")
+ flags.StringVar(&opts.format, "format", "", "use `format` as a Go template to format the output")
+ rootCmd.AddCommand(infoCommand)
+}
+
+func infoCmd(c *cobra.Command, iopts infoResults) error {
+ info := map[string]interface{}{}
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ infoArr, err := buildah.Info(store)
+ if err != nil {
+ return fmt.Errorf("getting info: %w", err)
+ }
+
+ if iopts.debug {
+ debugInfo := debugInfo()
+ infoArr = append(infoArr, buildah.InfoData{Type: "debug", Data: debugInfo})
+ }
+
+ for _, currInfo := range infoArr {
+ info[currInfo.Type] = currInfo.Data
+ }
+
+ if iopts.format != "" {
+ format := iopts.format
+ if matched, err := regexp.MatchString("{{.*}}", format); err != nil {
+ return fmt.Errorf("validating format provided: %s: %w", format, err)
+ } else if !matched {
+ return fmt.Errorf("invalid format provided: %s", format)
+ }
+ t, err := template.New("format").Parse(format)
+ if err != nil {
+ return fmt.Errorf("Template parsing error: %w", err)
+ }
+ if err = t.Execute(os.Stdout, info); err != nil {
+ return err
+ }
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ fmt.Println()
+ }
+ return nil
+ }
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ enc.SetEscapeHTML(false)
+ }
+ return enc.Encode(info)
+}
+
+// top-level "debug" info
+func debugInfo() map[string]interface{} {
+ info := map[string]interface{}{}
+ info["compiler"] = runtime.Compiler
+ info["go version"] = runtime.Version()
+ info["buildah version"] = define.Version
+ info["git commit"] = GitCommit
+ return info
+}
diff --git a/cmd/buildah/inspect.go b/cmd/buildah/inspect.go
new file mode 100644
index 0000000..d78022b
--- /dev/null
+++ b/cmd/buildah/inspect.go
@@ -0,0 +1,135 @@
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "regexp"
+ "text/template"
+
+ "github.com/containers/buildah"
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+const (
+ inspectTypeContainer = "container"
+ inspectTypeImage = "image"
+ inspectTypeManifest = "manifest"
+)
+
+type inspectResults struct {
+ format string
+ inspectType string
+}
+
+func init() {
+ var (
+ opts inspectResults
+ inspectDescription = "\n Inspects a build container's or built image's configuration."
+ )
+
+ inspectCommand := &cobra.Command{
+ Use: "inspect",
+ Short: "Inspect the configuration of a container or image",
+ Long: inspectDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return inspectCmd(cmd, args, opts)
+ },
+ Example: `buildah inspect containerID
+ buildah inspect --type image imageID
+ buildah inspect --format '{{.OCIv1.Config.Env}}' alpine`,
+ }
+ inspectCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := inspectCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.StringVarP(&opts.format, "format", "f", "", "use `format` as a Go template to format the output")
+ flags.StringVarP(&opts.inspectType, "type", "t", inspectTypeContainer, "look at the item of the specified `type` (container or image) and name")
+
+ rootCmd.AddCommand(inspectCommand)
+}
+
+func inspectCmd(c *cobra.Command, args []string, iopts inspectResults) error {
+ var builder *buildah.Builder
+
+ if len(args) == 0 {
+ return errors.New("container or image name must be specified")
+ }
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if len(args) > 1 {
+ return errors.New("too many arguments specified")
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ name := args[0]
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ ctx := getContext()
+
+ switch iopts.inspectType {
+ case inspectTypeContainer:
+ builder, err = openBuilder(ctx, store, name)
+ if err != nil {
+ if c.Flag("type").Changed {
+ return fmt.Errorf("reading build container: %w", err)
+ }
+ builder, err = openImage(ctx, systemContext, store, name)
+ if err != nil {
+ if manifestErr := manifestInspect(ctx, store, systemContext, name); manifestErr == nil {
+ return nil
+ }
+ return err
+ }
+ }
+ case inspectTypeImage:
+ builder, err = openImage(ctx, systemContext, store, name)
+ if err != nil {
+ return err
+ }
+ case inspectTypeManifest:
+ return manifestInspect(ctx, store, systemContext, name)
+ default:
+ return fmt.Errorf("the only recognized types are %q and %q", inspectTypeContainer, inspectTypeImage)
+ }
+ out := buildah.GetBuildInfo(builder)
+ if iopts.format != "" {
+ format := iopts.format
+ if matched, err := regexp.MatchString("{{.*}}", format); err != nil {
+ return fmt.Errorf("validating format provided: %s: %w", format, err)
+ } else if !matched {
+ return fmt.Errorf("invalid format provided: %s", format)
+ }
+ t, err := template.New("format").Parse(format)
+ if err != nil {
+ return fmt.Errorf("Template parsing error: %w", err)
+ }
+ if err = t.Execute(os.Stdout, out); err != nil {
+ return err
+ }
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ fmt.Println()
+ }
+ return nil
+ }
+
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ enc.SetEscapeHTML(false)
+ }
+ return enc.Encode(out)
+}
diff --git a/cmd/buildah/login.go b/cmd/buildah/login.go
new file mode 100644
index 0000000..e39883c
--- /dev/null
+++ b/cmd/buildah/login.go
@@ -0,0 +1,73 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/auth"
+ "github.com/spf13/cobra"
+)
+
+type loginReply struct {
+ loginOpts auth.LoginOptions
+ getLogin bool
+ tlsVerify bool
+}
+
+func init() {
+ var (
+ opts = loginReply{
+ loginOpts: auth.LoginOptions{
+ Stdin: os.Stdin,
+ Stdout: os.Stdout,
+ AcceptRepositories: true,
+ },
+ }
+ loginDescription = "Login to a container registry on a specified server."
+ )
+ loginCommand := &cobra.Command{
+ Use: "login",
+ Short: "Login to a container registry",
+ Long: loginDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return loginCmd(cmd, args, &opts)
+ },
+ Example: `buildah login quay.io`,
+ }
+ loginCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := loginCommand.Flags()
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ flags.BoolVar(&opts.getLogin, "get-login", true, "return the current login user for the registry")
+ flags.AddFlagSet(auth.GetLoginFlags(&opts.loginOpts))
+ opts.loginOpts.Stdin = os.Stdin
+ opts.loginOpts.Stdout = os.Stdout
+ rootCmd.AddCommand(loginCommand)
+}
+
+func loginCmd(c *cobra.Command, args []string, iopts *loginReply) error {
+ if len(args) > 1 {
+ return errors.New("too many arguments, login takes only 1 argument")
+ }
+ if len(args) == 0 {
+ return errors.New("please specify a registry to login to")
+ }
+
+ if err := setXDGRuntimeDir(); err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ // parse.SystemContextFromOptions may point this field to an auth.json or to a .docker/config.json;
+ // that’s fair enough for reads, but incorrect for writes (the two files have incompatible formats),
+ // and it interferes with the auth.Login’s own argument parsing.
+ systemContext.AuthFilePath = ""
+ ctx := getContext()
+ iopts.loginOpts.GetLoginSet = c.Flag("get-login").Changed
+ return auth.Login(ctx, systemContext, &iopts.loginOpts, args)
+}
diff --git a/cmd/buildah/logout.go b/cmd/buildah/logout.go
new file mode 100644
index 0000000..92117fa
--- /dev/null
+++ b/cmd/buildah/logout.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/auth"
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ var (
+ opts = auth.LogoutOptions{
+ Stdout: os.Stdout,
+ AcceptRepositories: true,
+ }
+ logoutDescription = "Remove the cached username and password for the registry."
+ )
+ logoutCommand := &cobra.Command{
+ Use: "logout",
+ Short: "Logout of a container registry",
+ Long: logoutDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return logoutCmd(cmd, args, &opts)
+ },
+ Example: `buildah logout quay.io`,
+ }
+ logoutCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := auth.GetLogoutFlags(&opts)
+ flags.SetInterspersed(false)
+ logoutCommand.Flags().AddFlagSet(flags)
+ rootCmd.AddCommand(logoutCommand)
+}
+
+func logoutCmd(c *cobra.Command, args []string, iopts *auth.LogoutOptions) error {
+ if len(args) > 1 {
+ return errors.New("too many arguments, logout takes at most 1 argument")
+ }
+ if len(args) == 0 && !iopts.All {
+ return errors.New("registry must be given")
+ }
+
+ if err := setXDGRuntimeDir(); err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ // parse.SystemContextFromOptions may point this field to an auth.json or to a .docker/config.json;
+ // that’s fair enough for reads, but incorrect for writes (the two files have incompatible formats),
+ // and it interferes with the auth.Logout’s own argument parsing.
+ systemContext.AuthFilePath = ""
+ return auth.Logout(systemContext, iopts, args)
+}
diff --git a/cmd/buildah/main.go b/cmd/buildah/main.go
new file mode 100644
index 0000000..335878e
--- /dev/null
+++ b/cmd/buildah/main.go
@@ -0,0 +1,253 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+ "syscall"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ ispecs "github.com/opencontainers/image-spec/specs-go"
+ rspecs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type globalFlags struct {
+ Debug bool
+ LogLevel string
+ Root string
+ RunRoot string
+ StorageDriver string
+ RegistriesConf string
+ RegistriesConfDir string
+ DefaultMountsFile string
+ StorageOpts []string
+ UserNSUID []string
+ UserNSGID []string
+ CPUProfile string
+ cpuProfileFile *os.File
+ MemoryProfile string
+ UserShortNameAliasConfPath string
+ CgroupManager string
+}
+
+var rootCmd = &cobra.Command{
+ Use: "buildah",
+ Long: "A tool that facilitates building OCI images",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return cmd.Help()
+ },
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ return before(cmd)
+ },
+ PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
+ return after(cmd)
+ },
+ SilenceUsage: true,
+ SilenceErrors: true,
+}
+
+var (
+ globalFlagResults globalFlags
+ exitCode int
+)
+
+func init() {
+ var (
+ defaultStoreDriverOptions []string
+ )
+ storageOptions, err := storage.DefaultStoreOptions(false, 0)
+ if err != nil {
+ logrus.Errorf(err.Error())
+ os.Exit(1)
+
+ }
+
+ if len(storageOptions.GraphDriverOptions) > 0 {
+ optionSlice := storageOptions.GraphDriverOptions[:]
+ defaultStoreDriverOptions = optionSlice
+ }
+
+ containerConfig, err := config.Default()
+ if err != nil {
+ logrus.Errorf(err.Error())
+ os.Exit(1)
+ }
+ containerConfig.CheckCgroupsAndAdjustConfig()
+
+ cobra.OnInitialize(initConfig)
+ // Disable the implicit `completion` command in cobra.
+ rootCmd.CompletionOptions.DisableDefaultCmd = true
+ //rootCmd.TraverseChildren = true
+ rootCmd.Version = fmt.Sprintf("%s (image-spec %s, runtime-spec %s)", define.Version, ispecs.Version, rspecs.Version)
+ rootCmd.PersistentFlags().BoolVar(&globalFlagResults.Debug, "debug", false, "print debugging information")
+ // TODO Need to allow for environment variable
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.RegistriesConf, "registries-conf", "", "path to registries.conf file (not usually used)")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.RegistriesConfDir, "registries-conf-dir", "", "path to registries.conf.d directory (not usually used)")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.UserShortNameAliasConfPath, "short-name-alias-conf", "", "path to short name alias cache file (not usually used)")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.Root, "root", storageOptions.GraphRoot, "storage root dir")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.RunRoot, "runroot", storageOptions.RunRoot, "storage state dir")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.CgroupManager, "cgroup-manager", containerConfig.Engine.CgroupManager, "cgroup manager")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.StorageDriver, "storage-driver", storageOptions.GraphDriverName, "storage-driver")
+ rootCmd.PersistentFlags().StringSliceVar(&globalFlagResults.StorageOpts, "storage-opt", defaultStoreDriverOptions, "storage driver option")
+ rootCmd.PersistentFlags().StringSliceVar(&globalFlagResults.UserNSUID, "userns-uid-map", []string{}, "default `ctrID:hostID:length` UID mapping to use")
+ rootCmd.PersistentFlags().StringSliceVar(&globalFlagResults.UserNSGID, "userns-gid-map", []string{}, "default `ctrID:hostID:length` GID mapping to use")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.DefaultMountsFile, "default-mounts-file", "", "path to default mounts file")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.LogLevel, logLevel, "warn", `the log level to be used, one of "trace", "debug", "info", "warn", "error", "fatal", or "panic"`)
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.CPUProfile, "cpu-profile", "", "`file` to write CPU profile")
+ rootCmd.PersistentFlags().StringVar(&globalFlagResults.MemoryProfile, "memory-profile", "", "`file` to write memory profile")
+
+ if err := rootCmd.PersistentFlags().MarkHidden("cpu-profile"); err != nil {
+ logrus.Fatalf("unable to mark cpu-profile flag as hidden: %v", err)
+ }
+ if err := rootCmd.PersistentFlags().MarkHidden("debug"); err != nil {
+ logrus.Fatalf("unable to mark debug flag as hidden: %v", err)
+ }
+ if err := rootCmd.PersistentFlags().MarkHidden("default-mounts-file"); err != nil {
+ logrus.Fatalf("unable to mark default-mounts-file flag as hidden: %v", err)
+ }
+ if err := rootCmd.PersistentFlags().MarkHidden("memory-profile"); err != nil {
+ logrus.Fatalf("unable to mark memory-profile flag as hidden: %v", err)
+ }
+}
+
+func initConfig() {
+ // TODO Cobra allows us to do extra stuff here at init
+ // time if we ever want to take advantage.
+}
+
+const logLevel = "log-level"
+
+func before(cmd *cobra.Command) error {
+ strLvl, err := cmd.Flags().GetString(logLevel)
+ if err != nil {
+ return err
+ }
+ logrusLvl, err := logrus.ParseLevel(strLvl)
+ if err != nil {
+ return fmt.Errorf("unable to parse log level: %w", err)
+ }
+ logrus.SetLevel(logrusLvl)
+ if globalFlagResults.Debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+
+ switch cmd.Use {
+ case "", "help", "version", "mount":
+ return nil
+ }
+ debugCapabilities()
+ unshare.MaybeReexecUsingUserNamespace(false)
+ if globalFlagResults.CPUProfile != "" {
+ globalFlagResults.cpuProfileFile, err = os.Create(globalFlagResults.CPUProfile)
+ if err != nil {
+ logrus.Fatalf("could not create CPU profile %s: %v", globalFlagResults.CPUProfile, err)
+ }
+ if err = pprof.StartCPUProfile(globalFlagResults.cpuProfileFile); err != nil {
+ logrus.Fatalf("error starting CPU profiling: %v", err)
+ }
+ }
+
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return err
+ }
+
+ for _, env := range defaultContainerConfig.Engine.Env.Get() {
+ splitEnv := strings.SplitN(env, "=", 2)
+ if len(splitEnv) != 2 {
+ return fmt.Errorf("invalid environment variable %q from containers.conf, valid configuration is KEY=value pair", env)
+ }
+ // skip if the env is already defined
+ if _, ok := os.LookupEnv(splitEnv[0]); ok {
+ logrus.Debugf("environment variable %q is already defined, skip the settings from containers.conf", splitEnv[0])
+ continue
+ }
+ if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func shutdownStore(cmd *cobra.Command) error {
+ if needToShutdownStore {
+ store, err := getStore(cmd)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("shutting down the store")
+ needToShutdownStore = false
+ if _, err = store.Shutdown(false); err != nil {
+ if errors.Is(err, storage.ErrLayerUsedByContainer) {
+ logrus.Infof("failed to shutdown storage: %q", err)
+ } else {
+ logrus.Warnf("failed to shutdown storage: %q", err)
+ }
+ }
+ }
+ return nil
+}
+
+func after(cmd *cobra.Command) error {
+ if err := shutdownStore(cmd); err != nil {
+ return err
+ }
+
+ if globalFlagResults.CPUProfile != "" {
+ pprof.StopCPUProfile()
+ globalFlagResults.cpuProfileFile.Close()
+ }
+ if globalFlagResults.MemoryProfile != "" {
+ memoryProfileFile, err := os.Create(globalFlagResults.MemoryProfile)
+ if err != nil {
+ logrus.Fatalf("could not create memory profile %s: %v", globalFlagResults.MemoryProfile, err)
+ }
+ defer memoryProfileFile.Close()
+ runtime.GC()
+ if err := pprof.Lookup("heap").WriteTo(memoryProfileFile, 1); err != nil {
+ logrus.Fatalf("could not write memory profile %s: %v", globalFlagResults.MemoryProfile, err)
+ }
+ }
+ return nil
+}
+
+func main() {
+ if buildah.InitReexec() {
+ return
+ }
+
+ // Hard code TMPDIR functions to use $TMPDIR or /var/tmp
+ os.Setenv("TMPDIR", parse.GetTempDir())
+
+ if err := rootCmd.Execute(); err != nil {
+ if logrus.IsLevelEnabled(logrus.TraceLevel) {
+ fmt.Fprintf(os.Stderr, "Error: %+v\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ }
+ exitCode = cli.ExecErrorCodeGeneric
+ var ee *exec.ExitError
+ if errors.As(err, &ee) {
+ if w, ok := ee.Sys().(syscall.WaitStatus); ok {
+ exitCode = w.ExitStatus()
+ }
+ }
+ if err := shutdownStore(rootCmd); err != nil {
+ logrus.Warnf("failed to shutdown storage: %q", err)
+ }
+ }
+ os.Exit(exitCode)
+}
diff --git a/cmd/buildah/manifest.go b/cmd/buildah/manifest.go
new file mode 100644
index 0000000..9568d4b
--- /dev/null
+++ b/cmd/buildah/manifest.go
@@ -0,0 +1,956 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/libimage/manifests"
+ "github.com/containers/common/pkg/auth"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/hashicorp/go-multierror"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type manifestCreateOpts = struct {
+ os, arch string
+ all, tlsVerify, insecure, amend bool
+}
+type manifestAddOpts = struct {
+ authfile, certDir, creds, os, arch, variant, osVersion string
+ features, osFeatures, annotations []string
+ tlsVerify, insecure, all bool
+}
+type manifestRemoveOpts = struct{}
+type manifestAnnotateOpts = struct {
+ os, arch, variant, osVersion string
+ features, osFeatures, annotations []string
+}
+type manifestInspectOpts = struct {
+ authfile string
+ tlsVerify bool
+}
+
+func init() {
+ var (
+ manifestDescription = "\n Creates, modifies, and pushes manifest lists and image indexes."
+ manifestCreateDescription = "\n Creates manifest lists and image indexes."
+ manifestAddDescription = "\n Adds an image to a manifest list or image index."
+ manifestRemoveDescription = "\n Removes an image from a manifest list or image index."
+ manifestAnnotateDescription = "\n Adds or updates information about an entry in a manifest list or image index."
+ manifestInspectDescription = "\n Display the contents of a manifest list or image index."
+ manifestPushDescription = "\n Pushes manifest lists and image indexes to registries."
+ manifestRmDescription = "\n Remove one or more manifest lists from local storage."
+ manifestExistsDescription = "\n Check if a manifest list exists in local storage."
+ manifestCreateOpts manifestCreateOpts
+ manifestAddOpts manifestAddOpts
+ manifestRemoveOpts manifestRemoveOpts
+ manifestAnnotateOpts manifestAnnotateOpts
+ manifestInspectOpts manifestInspectOpts
+ manifestPushOpts pushOptions
+ )
+ manifestCommand := &cobra.Command{
+ Use: "manifest",
+ Short: "Manipulate manifest lists and image indexes",
+ Long: manifestDescription,
+ Example: `buildah manifest create localhost/list
+ buildah manifest add localhost/list localhost/image
+ buildah manifest annotate --annotation A=B localhost/list localhost/image
+ buildah manifest annotate --annotation A=B localhost/list sha256:entryManifestDigest
+ buildah manifest inspect localhost/list
+ buildah manifest push localhost/list transport:destination
+ buildah manifest remove localhost/list sha256:entryManifestDigest
+ buildah manifest rm localhost/list`,
+ }
+ manifestCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(manifestCommand)
+
+ manifestCreateCommand := &cobra.Command{
+ Use: "create",
+ Short: "Create manifest list or image index",
+ Long: manifestCreateDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestCreateCmd(cmd, args, manifestCreateOpts)
+ },
+ Example: `buildah manifest create mylist:v1.11
+ buildah manifest create mylist:v1.11 arch-specific-image-to-add
+ buildah manifest create --all mylist:v1.11 transport:tagged-image-to-add`,
+ Args: cobra.MinimumNArgs(1),
+ }
+ manifestCreateCommand.SetUsageTemplate(UsageTemplate())
+ flags := manifestCreateCommand.Flags()
+ flags.BoolVar(&manifestCreateOpts.all, "all", false, "add all of the lists' images if the images to add are lists")
+ flags.BoolVar(&manifestCreateOpts.amend, "amend", false, "modify an existing list if one with the desired name already exists")
+ flags.StringVar(&manifestCreateOpts.os, "os", "", "if any of the specified images is a list, choose the one for `os`")
+ if err := flags.MarkHidden("os"); err != nil {
+ panic(fmt.Sprintf("error marking --os as hidden: %v", err))
+ }
+ flags.StringVar(&manifestCreateOpts.arch, "arch", "", "if any of the specified images is a list, choose the one for `arch`")
+ if err := flags.MarkHidden("arch"); err != nil {
+ panic(fmt.Sprintf("error marking --arch as hidden: %v", err))
+ }
+ flags.BoolVar(&manifestCreateOpts.insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ if err := flags.MarkHidden("insecure"); err != nil {
+ panic(fmt.Sprintf("error marking insecure as hidden: %v", err))
+ }
+ flags.BoolVar(&manifestCreateOpts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ flags.SetNormalizeFunc(cli.AliasFlags)
+ manifestCommand.AddCommand(manifestCreateCommand)
+
+ manifestAddCommand := &cobra.Command{
+ Use: "add",
+ Short: "Add images to a manifest list or image index",
+ Long: manifestAddDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestAddCmd(cmd, args, manifestAddOpts)
+ },
+ Example: `buildah manifest add mylist:v1.11 image:v1.11-amd64
+ buildah manifest add mylist:v1.11 transport:imageName`,
+ Args: cobra.MinimumNArgs(2),
+ }
+ manifestAddCommand.SetUsageTemplate(UsageTemplate())
+ flags = manifestAddCommand.Flags()
+ flags.StringVar(&manifestAddOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&manifestAddOpts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&manifestAddOpts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVar(&manifestAddOpts.os, "os", "", "override the `OS` of the specified image")
+ flags.StringVar(&manifestAddOpts.arch, "arch", "", "override the `architecture` of the specified image")
+ flags.StringVar(&manifestAddOpts.variant, "variant", "", "override the `variant` of the specified image")
+ flags.StringVar(&manifestAddOpts.osVersion, "os-version", "", "override the OS `version` of the specified image")
+ flags.StringSliceVar(&manifestAddOpts.features, "features", nil, "override the `features` of the specified image")
+ flags.StringSliceVar(&manifestAddOpts.osFeatures, "os-features", nil, "override the OS `features` of the specified image")
+ flags.StringSliceVar(&manifestAddOpts.annotations, "annotation", nil, "set an `annotation` for the specified image")
+ flags.BoolVar(&manifestAddOpts.insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ if err := flags.MarkHidden("insecure"); err != nil {
+ panic(fmt.Sprintf("error marking insecure as hidden: %v", err))
+ }
+ flags.BoolVar(&manifestAddOpts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ flags.BoolVar(&manifestAddOpts.all, "all", false, "add all of the list's images if the image is a list")
+ flags.SetNormalizeFunc(cli.AliasFlags)
+ manifestCommand.AddCommand(manifestAddCommand)
+
+ manifestRemoveCommand := &cobra.Command{
+ Use: "remove",
+ Short: "Remove an entry from a manifest list or image index",
+ Long: manifestRemoveDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestRemoveCmd(cmd, args, manifestRemoveOpts)
+ },
+ Example: `buildah manifest remove mylist:v1.11 sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736`,
+ Args: cobra.MinimumNArgs(2),
+ }
+ manifestRemoveCommand.SetUsageTemplate(UsageTemplate())
+ manifestCommand.AddCommand(manifestRemoveCommand)
+
+ manifestExistsCommand := &cobra.Command{
+ Use: "exists",
+ Short: "Check if a manifest list exists in local storage",
+ Long: manifestExistsDescription,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestExistsCmd(cmd, args)
+ },
+ Example: "buildah manifest exists mylist",
+ }
+ manifestExistsCommand.SetUsageTemplate(UsageTemplate())
+ manifestCommand.AddCommand(manifestExistsCommand)
+
+ manifestAnnotateCommand := &cobra.Command{
+ Use: "annotate",
+ Short: "Add or update information about an entry in a manifest list or image index",
+ Long: manifestAnnotateDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestAnnotateCmd(cmd, args, manifestAnnotateOpts)
+ },
+ Example: `buildah manifest annotate --annotation left=right mylist:v1.11 image:v1.11-amd64`,
+ Args: cobra.MinimumNArgs(2),
+ }
+ flags = manifestAnnotateCommand.Flags()
+ flags.StringVar(&manifestAnnotateOpts.os, "os", "", "override the `OS` of the specified image")
+ flags.StringVar(&manifestAnnotateOpts.arch, "arch", "", "override the `Architecture` of the specified image")
+ flags.StringVar(&manifestAnnotateOpts.variant, "variant", "", "override the `Variant` of the specified image")
+ flags.StringVar(&manifestAnnotateOpts.osVersion, "os-version", "", "override the os `version` of the specified image")
+ flags.StringSliceVar(&manifestAnnotateOpts.features, "features", nil, "override the `features` of the specified image")
+ flags.StringSliceVar(&manifestAnnotateOpts.osFeatures, "os-features", nil, "override the os `features` of the specified image")
+ flags.StringSliceVar(&manifestAnnotateOpts.annotations, "annotation", nil, "set an `annotation` for the specified image")
+ manifestAnnotateCommand.SetUsageTemplate(UsageTemplate())
+ manifestCommand.AddCommand(manifestAnnotateCommand)
+
+ manifestInspectCommand := &cobra.Command{
+ Use: "inspect",
+ Short: "Display the contents of a manifest list or image index",
+ Long: manifestInspectDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestInspectCmd(cmd, args, manifestInspectOpts)
+ },
+ Example: `buildah manifest inspect mylist:v1.11`,
+ Args: cobra.MinimumNArgs(1),
+ }
+ flags = manifestInspectCommand.Flags()
+ flags.StringVar(&manifestInspectOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.BoolVar(&manifestInspectOpts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ manifestInspectCommand.SetUsageTemplate(UsageTemplate())
+ manifestCommand.AddCommand(manifestInspectCommand)
+
+ manifestPushCommand := &cobra.Command{
+ Use: "push",
+ Short: "Push a manifest list or image index to a registry",
+ Long: manifestPushDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestPushCmd(cmd, args, manifestPushOpts)
+ },
+ Example: `buildah manifest push mylist:v1.11 transport:imageName`,
+ Args: cobra.MinimumNArgs(1),
+ }
+ manifestPushCommand.SetUsageTemplate(UsageTemplate())
+ flags = manifestPushCommand.Flags()
+ flags.BoolVar(&manifestPushOpts.rm, "rm", false, "remove the manifest list if push succeeds")
+ flags.BoolVar(&manifestPushOpts.all, "all", false, "also push the images in the list")
+ flags.StringVar(&manifestPushOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&manifestPushOpts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&manifestPushOpts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVar(&manifestPushOpts.digestfile, "digestfile", "", "after copying the image, write the digest of the resulting digest to the file")
+ flags.BoolVarP(&manifestPushOpts.forceCompressionFormat, "force-compression", "", false, "use the specified compression algorithm if the destination contains a differently-compressed variant already")
+ flags.StringVar(&manifestPushOpts.compressionFormat, "compression-format", "", "compression format to use")
+ flags.IntVar(&manifestPushOpts.compressionLevel, "compression-level", 0, "compression level to use")
+ flags.StringVarP(&manifestPushOpts.format, "format", "f", "", "manifest type (oci or v2s2) to attempt to use when pushing the manifest list (default is manifest type of source)")
+ flags.StringSliceVar(&manifestPushOpts.addCompression, "add-compression", nil, "add instances with selected compression while pushing")
+ flags.BoolVarP(&manifestPushOpts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pushing images")
+ flags.StringVar(&manifestPushOpts.signBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
+ flags.StringVar(&manifestPushOpts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+ flags.BoolVar(&manifestPushOpts.insecure, "insecure", false, "neither require HTTPS nor verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ if err := flags.MarkHidden("insecure"); err != nil {
+ panic(fmt.Sprintf("error marking insecure as hidden: %v", err))
+ }
+ flags.BoolVar(&manifestPushOpts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ flags.BoolVarP(&manifestPushOpts.quiet, "quiet", "q", false, "don't output progress information when pushing lists")
+ flags.SetNormalizeFunc(cli.AliasFlags)
+ manifestCommand.AddCommand(manifestPushCommand)
+
+ manifestRmCommand := &cobra.Command{
+ Use: "rm",
+ Short: "Remove manifest list or image index",
+ Long: manifestRmDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return manifestRmCmd(cmd, args)
+ },
+ Example: `buildah manifest rm mylist:v1.11`,
+ Args: cobra.MinimumNArgs(1),
+ }
+ manifestRmCommand.SetUsageTemplate(UsageTemplate())
+ manifestCommand.AddCommand(manifestRmCommand)
+}
+
+func manifestExistsCmd(c *cobra.Command, args []string) error {
+ if len(args) == 0 {
+ return errors.New("At least a name must be specified for the list")
+ }
+ name := args[0]
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ _, err = runtime.LookupManifestList(name)
+ if err != nil {
+ if errors.Is(err, storage.ErrImageUnknown) {
+ exitCode = 1
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
+func manifestCreateCmd(c *cobra.Command, args []string, opts manifestCreateOpts) error {
+ if len(args) == 0 {
+ return errors.New("At least a name must be specified for the list")
+ }
+ listImageSpec := args[0]
+ imageSpecs := args[1:]
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ list := manifests.Create()
+ var manifestListID string
+
+ names, err := util.ExpandNames([]string{listImageSpec}, systemContext, store)
+ if err != nil {
+ return fmt.Errorf("encountered while expanding image name %q: %w", listImageSpec, err)
+ }
+ if manifestListID, err = list.SaveToImage(store, "", names, manifest.DockerV2ListMediaType); err != nil {
+ if errors.Is(err, storage.ErrDuplicateName) && opts.amend {
+ for _, name := range names {
+ manifestList, err := runtime.LookupManifestList(listImageSpec)
+ if err != nil {
+ logrus.Debugf("no list named %q found: %v", listImageSpec, err)
+ continue
+ }
+ if _, list, err = manifests.LoadFromImage(store, manifestList.ID()); err != nil {
+ logrus.Debugf("no list found in %q", name)
+ continue
+ }
+ manifestListID = manifestList.ID()
+ break
+ }
+ if list == nil {
+ return fmt.Errorf("--amend specified but no matching manifest list found with name %q", listImageSpec)
+ }
+ } else {
+ return err
+ }
+ }
+
+ for _, imageSpec := range imageSpecs {
+ ref, err := alltransports.ParseImageName(imageSpec)
+ if err != nil {
+ if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil {
+ // check if the local image exists
+ if ref, _, err = util.FindImage(store, "", systemContext, imageSpec); err != nil {
+ return err
+ }
+ }
+ }
+ refLocal, _, err := util.FindImage(store, "", systemContext, imageSpec)
+ if err == nil {
+ // Found local image so use that.
+ ref = refLocal
+ }
+ _, err = list.Add(getContext(), systemContext, ref, opts.all)
+ if err != nil {
+ return err
+ }
+ }
+
+ imageID, err := list.SaveToImage(store, manifestListID, names, manifest.DockerV2ListMediaType)
+ if err == nil {
+ fmt.Printf("%s\n", imageID)
+ }
+ return err
+}
+
+func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error {
+ if err := auth.CheckAuthFile(opts.authfile); err != nil {
+ return err
+ }
+
+ listImageSpec := ""
+ imageSpec := ""
+ switch len(args) {
+ case 0, 1:
+ return errors.New("At least a list image and an image to add must be specified")
+ case 2:
+ listImageSpec = args[0]
+ if listImageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[0])
+ }
+ imageSpec = args[1]
+ if imageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[1])
+ }
+ default:
+ return errors.New("At least two arguments are necessary: list and image to add to list")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ manifestList, err := runtime.LookupManifestList(listImageSpec)
+ if err != nil {
+ return err
+ }
+ _, list, err := manifests.LoadFromImage(store, manifestList.ID())
+ if err != nil {
+ return err
+ }
+
+ ref, err := alltransports.ParseImageName(imageSpec)
+ if err != nil {
+ if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil {
+ // check if the local image exists
+ if ref, _, err = util.FindImage(store, "", systemContext, imageSpec); err != nil {
+ return err
+ }
+ }
+ }
+
+ digest, err := list.Add(getContext(), systemContext, ref, opts.all)
+ if err != nil {
+ var storeErr error
+ // Retry without a custom system context. A user may want to add
+ // a custom platform (see #3511).
+ if ref, _, storeErr = util.FindImage(store, "", nil, imageSpec); storeErr != nil {
+ logrus.Errorf("Error while trying to find image on local storage: %v", storeErr)
+ return err
+ }
+ digest, storeErr = list.Add(getContext(), systemContext, ref, opts.all)
+ if storeErr != nil {
+ logrus.Errorf("Error while trying to add on manifest list: %v", storeErr)
+ return err
+ }
+ }
+
+ if opts.os != "" {
+ if err := list.SetOS(digest, opts.os); err != nil {
+ return err
+ }
+ }
+ if opts.osVersion != "" {
+ if err := list.SetOSVersion(digest, opts.osVersion); err != nil {
+ return err
+ }
+ }
+ if len(opts.osFeatures) != 0 {
+ if err := list.SetOSFeatures(digest, opts.osFeatures); err != nil {
+ return err
+ }
+ }
+ if opts.arch != "" {
+ if err := list.SetArchitecture(digest, opts.arch); err != nil {
+ return err
+ }
+ }
+ if opts.variant != "" {
+ if err := list.SetVariant(digest, opts.variant); err != nil {
+ return err
+ }
+ }
+ if len(opts.features) != 0 {
+ if err := list.SetFeatures(digest, opts.features); err != nil {
+ return err
+ }
+ }
+ if len(opts.annotations) != 0 {
+ annotations := make(map[string]string)
+ for _, annotationSpec := range opts.annotations {
+ spec := strings.SplitN(annotationSpec, "=", 2)
+ if len(spec) != 2 {
+ return fmt.Errorf("no value given for annotation %q", spec[0])
+ }
+ annotations[spec[0]] = spec[1]
+ }
+ if err := list.SetAnnotations(&digest, annotations); err != nil {
+ return err
+ }
+ }
+
+ updatedListID, err := list.SaveToImage(store, manifestList.ID(), nil, "")
+ if err == nil {
+ fmt.Printf("%s: %s\n", updatedListID, digest.String())
+ }
+
+ return err
+}
+
+func manifestRemoveCmd(c *cobra.Command, args []string, opts manifestRemoveOpts) error {
+ listImageSpec := ""
+ var instanceDigest digest.Digest
+ switch len(args) {
+ case 0, 1:
+ return errors.New("At least a list image and one or more instance digests must be specified")
+ case 2:
+ listImageSpec = args[0]
+ if listImageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[0])
+ }
+ instanceSpec := args[1]
+ if instanceSpec == "" {
+ return fmt.Errorf(`Invalid instance "%s"`, args[1])
+ }
+ d, err := digest.Parse(instanceSpec)
+ if err != nil {
+ return fmt.Errorf(`Invalid instance "%s": %v`, args[1], err)
+ }
+ instanceDigest = d
+ default:
+ return errors.New("At least two arguments are necessary: list and digest of instance to remove from list")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+ manifestList, err := runtime.LookupManifestList(listImageSpec)
+ if err != nil {
+ return err
+ }
+
+ if err := manifestList.RemoveInstance(instanceDigest); err != nil {
+ return err
+ }
+
+ fmt.Printf("%s: %s\n", manifestList.ID(), instanceDigest.String())
+
+ return nil
+}
+
+func manifestRmCmd(c *cobra.Command, args []string) error {
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ options := &libimage.RemoveImagesOptions{
+ Filters: []string{"readonly=false"},
+ LookupManifest: true,
+ }
+ rmiReports, rmiErrors := runtime.RemoveImages(context.Background(), args, options)
+ for _, r := range rmiReports {
+ for _, u := range r.Untagged {
+ fmt.Printf("untagged: %s\n", u)
+ }
+ }
+ for _, r := range rmiReports {
+ if r.Removed {
+ fmt.Printf("%s\n", r.ID)
+ }
+ }
+
+ var multiE *multierror.Error
+ multiE = multierror.Append(multiE, rmiErrors...)
+ return multiE.ErrorOrNil()
+}
+
+func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateOpts) error {
+ listImageSpec := ""
+ imageSpec := ""
+ switch len(args) {
+ case 0:
+ return errors.New("At least a list image must be specified")
+ case 1:
+ listImageSpec = args[0]
+ if listImageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[0])
+ }
+ case 2:
+ listImageSpec = args[0]
+ if listImageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[0])
+ }
+ imageSpec = args[1]
+ if imageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, args[1])
+ }
+ default:
+ return errors.New("At least two arguments are necessary: list and image to add to list")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ manifestList, err := runtime.LookupManifestList(listImageSpec)
+ if err != nil {
+ return err
+ }
+
+ _, list, err := manifests.LoadFromImage(store, manifestList.ID())
+ if err != nil {
+ return err
+ }
+
+ digest, err := digest.Parse(imageSpec)
+ if err != nil {
+ ctx := getContext()
+ ref, _, err := util.FindImage(store, "", systemContext, imageSpec)
+ if err != nil {
+ return err
+ }
+ img, err := ref.NewImageSource(ctx, systemContext)
+ if err != nil {
+ return err
+ }
+ defer img.Close()
+ manifestBytes, _, err := img.GetManifest(ctx, nil)
+ if err != nil {
+ return err
+ }
+ digest, err = manifest.Digest(manifestBytes)
+ if err != nil {
+ return err
+ }
+ }
+
+ if opts.os != "" {
+ if err := list.SetOS(digest, opts.os); err != nil {
+ return err
+ }
+ }
+ if opts.osVersion != "" {
+ if err := list.SetOSVersion(digest, opts.osVersion); err != nil {
+ return err
+ }
+ }
+ if len(opts.osFeatures) != 0 {
+ if err := list.SetOSFeatures(digest, opts.osFeatures); err != nil {
+ return err
+ }
+ }
+ if opts.arch != "" {
+ if err := list.SetArchitecture(digest, opts.arch); err != nil {
+ return err
+ }
+ }
+ if opts.variant != "" {
+ if err := list.SetVariant(digest, opts.variant); err != nil {
+ return err
+ }
+ }
+ if len(opts.features) != 0 {
+ if err := list.SetFeatures(digest, opts.features); err != nil {
+ return err
+ }
+ }
+ if len(opts.annotations) != 0 {
+ annotations := make(map[string]string)
+ for _, annotationSpec := range opts.annotations {
+ spec := strings.SplitN(annotationSpec, "=", 2)
+ if len(spec) != 2 {
+ return fmt.Errorf("no value given for annotation %q", spec[0])
+ }
+ annotations[spec[0]] = spec[1]
+ }
+ if err := list.SetAnnotations(&digest, annotations); err != nil {
+ return err
+ }
+ }
+
+ updatedListID, err := list.SaveToImage(store, manifestList.ID(), nil, "")
+ if err == nil {
+ fmt.Printf("%s: %s\n", updatedListID, digest.String())
+ }
+
+ return nil
+}
+
+func manifestInspectCmd(c *cobra.Command, args []string, opts manifestInspectOpts) error {
+ if c.Flag("authfile").Changed {
+ if err := auth.CheckAuthFile(opts.authfile); err != nil {
+ return err
+ }
+ }
+ imageSpec := ""
+ switch len(args) {
+ case 0:
+ return errors.New("At least a source list ID must be specified")
+ case 1:
+ imageSpec = args[0]
+ if imageSpec == "" {
+ return fmt.Errorf(`Invalid image name "%s"`, imageSpec)
+ }
+ default:
+ return errors.New("Only one argument is necessary for inspect: an image name")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ return manifestInspect(getContext(), store, systemContext, imageSpec)
+}
+
+func manifestInspect(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageSpec string) error {
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ printManifest := func(manifest []byte) error {
+ var b bytes.Buffer
+ err = json.Indent(&b, manifest, "", " ")
+ if err != nil {
+ return fmt.Errorf("rendering manifest for display: %w", err)
+ }
+
+ fmt.Printf("%s\n", b.String())
+ return nil
+ }
+
+ // Before doing a remote lookup, attempt to resolve the manifest list
+ // locally.
+ manifestList, err := runtime.LookupManifestList(imageSpec)
+ if err == nil {
+ schema2List, err := manifestList.Inspect()
+ if err != nil {
+ return err
+ }
+
+ rawSchema2List, err := json.Marshal(schema2List)
+ if err != nil {
+ return err
+ }
+
+ return printManifest(rawSchema2List)
+ }
+ if !errors.Is(err, storage.ErrImageUnknown) && !errors.Is(err, libimage.ErrNotAManifestList) {
+ return err
+ }
+
+ // TODO: at some point `libimage` should support resolving manifests
+ // like that. Similar to `libimage.Runtime.LookupImage` we could
+ // implement a `*.LookupImageIndex`.
+ refs, err := util.ResolveNameToReferences(store, systemContext, imageSpec)
+ if err != nil {
+ logrus.Debugf("error parsing reference to image %q: %v", imageSpec, err)
+ }
+
+ if ref, _, err := util.FindImage(store, "", systemContext, imageSpec); err == nil {
+ refs = append(refs, ref)
+ } else if ref, err := alltransports.ParseImageName(imageSpec); err == nil {
+ refs = append(refs, ref)
+ }
+ if len(refs) == 0 {
+ return fmt.Errorf("locating images with names %v", imageSpec)
+ }
+
+ var (
+ latestErr error
+ result []byte
+ )
+
+ appendErr := func(e error) {
+ if latestErr == nil {
+ latestErr = e
+ } else {
+ latestErr = fmt.Errorf("tried %v: %w", e, latestErr)
+ }
+ }
+
+ for _, ref := range refs {
+ logrus.Debugf("Testing reference %q for possible manifest", transports.ImageName(ref))
+
+ src, err := ref.NewImageSource(ctx, systemContext)
+ if err != nil {
+ appendErr(fmt.Errorf("reading image %q: %w", transports.ImageName(ref), err))
+ continue
+ }
+ defer src.Close()
+
+ manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
+ if err != nil {
+ appendErr(fmt.Errorf("loading manifest %q: %w", transports.ImageName(ref), err))
+ continue
+ }
+
+ if !manifest.MIMETypeIsMultiImage(manifestType) {
+ appendErr(fmt.Errorf("manifest is of type %s (not a list type)", manifestType))
+ continue
+ }
+ result = manifestBytes
+ break
+ }
+ if len(result) == 0 && latestErr != nil {
+ return latestErr
+ }
+
+ return printManifest(result)
+}
+
+func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
+ if err := auth.CheckAuthFile(opts.authfile); err != nil {
+ return err
+ }
+
+ listImageSpec := ""
+ destSpec := ""
+ switch len(args) {
+ case 0:
+ return errors.New("At least a source list ID must be specified")
+ case 1:
+ listImageSpec = args[0]
+ destSpec = "docker://" + listImageSpec
+ case 2:
+ listImageSpec = args[0]
+ destSpec = args[1]
+ default:
+ return errors.New("Only two arguments are necessary to push: source and destination")
+ }
+ if listImageSpec == "" {
+ return fmt.Errorf(`invalid image name "%s"`, listImageSpec)
+ }
+ if destSpec == "" {
+ return fmt.Errorf(`invalid image name "%s"`, destSpec)
+ }
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ if opts.compressionFormat != "" {
+ algo, err := compression.AlgorithmByName(opts.compressionFormat)
+ if err != nil {
+ return err
+ }
+ systemContext.CompressionFormat = &algo
+ }
+ if c.Flag("compression-level").Changed {
+ systemContext.CompressionLevel = &opts.compressionLevel
+ }
+ if c.Flag("compression-format").Changed {
+ if !c.Flag("force-compression").Changed {
+ // If `compression-format` is set and no value for `--force-compression`
+ // is selected then defaults to `true`.
+ opts.forceCompressionFormat = true
+ }
+ }
+
+ return manifestPush(systemContext, store, listImageSpec, destSpec, opts)
+}
+
+func manifestPush(systemContext *types.SystemContext, store storage.Store, listImageSpec, destSpec string, opts pushOptions) error {
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ manifestList, err := runtime.LookupManifestList(listImageSpec)
+ if err != nil {
+ return err
+ }
+
+ _, list, err := manifests.LoadFromImage(store, manifestList.ID())
+ if err != nil {
+ return err
+ }
+
+ dest, err := alltransports.ParseImageName(destSpec)
+ if err != nil {
+ return err
+ }
+
+ var manifestType string
+ if opts.format != "" {
+ switch opts.format {
+ case "oci":
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "v2s2", "docker":
+ manifestType = manifest.DockerV2Schema2MediaType
+ default:
+ return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci' or 'v2s2'", opts.format)
+ }
+ }
+
+ options := manifests.PushOptions{
+ Store: store,
+ SystemContext: systemContext,
+ ImageListSelection: cp.CopySpecificImages,
+ Instances: nil,
+ RemoveSignatures: opts.removeSignatures,
+ SignBy: opts.signBy,
+ ManifestType: manifestType,
+ AddCompression: opts.addCompression,
+ ForceCompressionFormat: opts.forceCompressionFormat,
+ }
+ if opts.all {
+ options.ImageListSelection = cp.CopyAllImages
+ }
+ if !opts.quiet {
+ options.ReportWriter = os.Stderr
+ }
+
+ _, digest, err := list.Push(getContext(), dest, options)
+
+ if err == nil && opts.rm {
+ _, err = store.DeleteImage(manifestList.ID(), true)
+ }
+
+ if opts.digestfile != "" {
+ if err = os.WriteFile(opts.digestfile, []byte(digest.String()), 0644); err != nil {
+ return util.GetFailureCause(err, fmt.Errorf("failed to write digest to file %q: %w", opts.digestfile, err))
+ }
+ }
+
+ return err
+}
diff --git a/cmd/buildah/mkcw.go b/cmd/buildah/mkcw.go
new file mode 100644
index 0000000..41dd3cb
--- /dev/null
+++ b/cmd/buildah/mkcw.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/spf13/cobra"
+)
+
+func mkcwCmd(c *cobra.Command, args []string, options buildah.CWConvertImageOptions) error {
+ ctx := getContext()
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return err
+ }
+
+ if options.AttestationURL == "" && options.DiskEncryptionPassphrase == "" {
+ return fmt.Errorf("neither --attestation-url nor --passphrase flags provided, disk would not be decryptable")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ options.InputImage = args[0]
+ options.Tag = args[1]
+ options.ReportWriter = os.Stderr
+ imageID, _, _, err := buildah.CWConvertImage(ctx, systemContext, store, options)
+ if err == nil {
+ fmt.Printf("%s\n", imageID)
+ }
+ return err
+}
+
+func init() {
+ var teeType string
+ var options buildah.CWConvertImageOptions
+ mkcwDescription := `Convert a conventional image to a confidential workload image.`
+ mkcwCommand := &cobra.Command{
+ Use: "mkcw",
+ Short: "Convert a conventional image to a confidential workload image",
+ Long: mkcwDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ options.TeeType = parse.TeeType(teeType)
+ return mkcwCmd(cmd, args, options)
+ },
+ Example: `buildah mkcw localhost/repository:typical localhost/repository:cw`,
+ Args: cobra.ExactArgs(2),
+ }
+ mkcwCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(mkcwCommand)
+ flags := mkcwCommand.Flags()
+ flags.SetInterspersed(false)
+
+ flags.StringVarP(&teeType, "type", "t", "", "TEE (trusted execution environment) type: SEV,SNP (default: SNP)")
+ flags.StringVarP(&options.AttestationURL, "attestation-url", "u", "", "attestation server URL")
+ flags.StringVarP(&options.BaseImage, "base-image", "b", "", "alternate base image (default: scratch)")
+ flags.StringVarP(&options.DiskEncryptionPassphrase, "passphrase", "p", "", "disk encryption passphrase")
+ flags.IntVarP(&options.CPUs, "cpus", "c", 0, "number of CPUs to expect")
+ flags.IntVarP(&options.Memory, "memory", "m", 0, "amount of memory to expect (MB)")
+ flags.StringVarP(&options.WorkloadID, "workload-id", "w", "", "workload ID")
+ flags.StringVarP(&options.Slop, "slop", "s", "25%", "extra space needed for converting a container rootfs to a disk image")
+ flags.StringVarP(&options.FirmwareLibrary, "firmware-library", "f", "", "location of libkrunfw-sev.so")
+ flags.BoolVarP(&options.IgnoreAttestationErrors, "ignore-attestation-errors", "", false, "ignore attestation errors")
+ if err := flags.MarkHidden("ignore-attestation-errors"); err != nil {
+ panic(fmt.Sprintf("error marking ignore-attestation-errors as hidden: %v", err))
+ }
+ flags.String("signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+}
diff --git a/cmd/buildah/mount.go b/cmd/buildah/mount.go
new file mode 100644
index 0000000..ab52131
--- /dev/null
+++ b/cmd/buildah/mount.go
@@ -0,0 +1,143 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type jsonMount struct {
+ Container string `json:"container,omitempty"`
+ MountPoint string `json:"mountPoint"`
+}
+
+type mountOptions struct {
+ json bool
+}
+
+func init() {
+ var (
+ mountDescription = `buildah mount
+ mounts a working container's root filesystem for manipulation.
+
+ Note: In rootless mode you need to first execute buildah unshare, to put you
+ into the usernamespace. Afterwards you can buildah mount the container and
+ view/modify the content in the containers root file system.
+`
+ opts mountOptions
+ noTruncate bool
+ )
+ mountCommand := &cobra.Command{
+ Use: "mount",
+ Short: "Mount a working container's root filesystem",
+ Long: mountDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return mountCmd(cmd, args, opts)
+ },
+ Example: `buildah mount
+ buildah mount containerID
+ buildah mount containerID1 containerID2
+
+ In rootless mode you must use buildah unshare first.
+ buildah unshare
+ buildah mount containerID
+`,
+ }
+ mountCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := mountCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVar(&opts.json, "json", false, "output in JSON format")
+ flags.BoolVar(&noTruncate, "notruncate", false, "do not truncate output")
+ rootCmd.AddCommand(mountCommand)
+ if err := flags.MarkHidden("notruncate"); err != nil {
+ logrus.Fatalf("error marking notruncate as hidden: %v", err)
+ }
+}
+
+func mountCmd(c *cobra.Command, args []string, opts mountOptions) error {
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+ var jsonMounts []jsonMount
+ var lastError error
+ if len(args) > 0 {
+ // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
+ // of the mount command.
+ // Differently, allow the mount if we are already in a userns, as the mount point will still
+ // be accessible once "buildah mount" exits.
+ if os.Geteuid() != 0 && store.GraphDriverName() != "vfs" {
+ return fmt.Errorf("cannot mount using driver %s in rootless mode. You need to run it in a `buildah unshare` session", store.GraphDriverName())
+ }
+
+ for _, name := range args {
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = fmt.Errorf("reading build container %q: %w", name, err)
+ continue
+ }
+ mountPoint, err := builder.Mount(builder.MountLabel)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = fmt.Errorf("mounting %q container %q: %w", name, builder.Container, err)
+ continue
+ }
+ if len(args) > 1 {
+ if opts.json {
+ jsonMounts = append(jsonMounts, jsonMount{Container: name, MountPoint: mountPoint})
+ continue
+ }
+ fmt.Printf("%s %s\n", name, mountPoint)
+ } else {
+ if opts.json {
+ jsonMounts = append(jsonMounts, jsonMount{MountPoint: mountPoint})
+ continue
+ }
+ fmt.Printf("%s\n", mountPoint)
+ }
+ }
+ } else {
+ builders, err := openBuilders(store)
+ if err != nil {
+ return fmt.Errorf("reading build containers: %w", err)
+ }
+
+ for _, builder := range builders {
+ mounted, err := builder.Mounted()
+ if err != nil {
+ return err
+ }
+ if mounted {
+ if opts.json {
+ jsonMounts = append(jsonMounts, jsonMount{Container: builder.Container, MountPoint: builder.MountPoint})
+ continue
+ }
+ fmt.Printf("%s %s\n", builder.Container, builder.MountPoint)
+ }
+ }
+ }
+
+ if opts.json {
+ data, err := json.MarshalIndent(jsonMounts, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", data)
+ }
+
+ return lastError
+}
diff --git a/cmd/buildah/passwd.go b/cmd/buildah/passwd.go
new file mode 100644
index 0000000..0cda81a
--- /dev/null
+++ b/cmd/buildah/passwd.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/crypto/bcrypt"
+)
+
+var (
+ passwdDescription = `Generate a password hash using golang.org/x/crypto/bcrypt.`
+ passwdCommand = &cobra.Command{
+ Use: "passwd",
+ Short: "Generate a password hash",
+ Long: passwdDescription,
+ RunE: passwdCmd,
+ Example: `buildah passwd testpassword`,
+ Args: cobra.ExactArgs(1),
+ Hidden: true,
+ }
+)
+
+func passwdCmd(c *cobra.Command, args []string) error {
+ passwd, err := bcrypt.GenerateFromPassword([]byte(args[0]), bcrypt.DefaultCost)
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(passwd))
+ return nil
+}
+
+func init() {
+ rootCmd.AddCommand(passwdCommand)
+}
diff --git a/cmd/buildah/prune.go b/cmd/buildah/prune.go
new file mode 100644
index 0000000..636796b
--- /dev/null
+++ b/cmd/buildah/prune.go
@@ -0,0 +1,95 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/volumes"
+ "github.com/containers/common/libimage"
+ "github.com/hashicorp/go-multierror"
+ "github.com/spf13/cobra"
+)
+
+type pruneOptions struct {
+ force bool
+ all bool
+}
+
+func init() {
+ var (
+ pruneDescription = `
+Cleanup intermediate images as well as build and mount cache.`
+ opts pruneOptions
+ )
+ pruneCommand := &cobra.Command{
+ Use: "prune",
+ Short: "Cleanup intermediate images as well as build and mount cache",
+ Long: pruneDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return pruneCmd(cmd, args, opts)
+ },
+ Example: `buildah prune
+ buildah prune`,
+ }
+ pruneCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := pruneCommand.Flags()
+ flags.SetInterspersed(false)
+
+ flags.BoolVarP(&opts.all, "all", "a", false, "remove all unused images")
+ flags.BoolVarP(&opts.force, "force", "f", false, "force removal of the image and any containers using the image")
+
+ rootCmd.AddCommand(pruneCommand)
+}
+
+func pruneCmd(c *cobra.Command, args []string, iopts pruneOptions) error {
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return err
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ err = volumes.CleanCacheMount()
+ if err != nil {
+ return err
+ }
+
+ options := &libimage.RemoveImagesOptions{
+ Filters: []string{"readonly=false"},
+ }
+ if !iopts.all {
+ options.Filters = append(options.Filters, "dangling=true")
+ options.Filters = append(options.Filters, "intermediate=true")
+ }
+ options.Force = iopts.force
+
+ rmiReports, rmiErrors := runtime.RemoveImages(context.Background(), args, options)
+ for _, r := range rmiReports {
+ for _, u := range r.Untagged {
+ fmt.Printf("untagged: %s\n", u)
+ }
+ }
+ for _, r := range rmiReports {
+ if r.Removed {
+ fmt.Printf("%s\n", r.ID)
+ }
+ }
+
+ var multiE *multierror.Error
+ multiE = multierror.Append(multiE, rmiErrors...)
+ return multiE.ErrorOrNil()
+}
diff --git a/cmd/buildah/pull.go b/cmd/buildah/pull.go
new file mode 100644
index 0000000..74fe950
--- /dev/null
+++ b/cmd/buildah/pull.go
@@ -0,0 +1,155 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/auth"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type pullOptions struct {
+ allTags bool
+ authfile string
+ blobCache string
+ certDir string
+ creds string
+ signaturePolicy string
+ quiet bool
+ removeSignatures bool
+ tlsVerify bool
+ decryptionKeys []string
+ pullPolicy string
+ retry int
+ retryDelay string
+}
+
+func init() {
+ var (
+ opts pullOptions
+
+ pullDescription = ` Pulls an image from a registry and stores it locally.
+ An image can be pulled using its tag or digest. If a tag is not
+ specified, the image with the 'latest' tag (if it exists) is pulled.`
+ )
+
+ pullCommand := &cobra.Command{
+ Use: "pull",
+ Short: "Pull an image from the specified location",
+ Long: pullDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return pullCmd(cmd, args, opts)
+ },
+ Example: `buildah pull imagename
+ buildah pull docker-daemon:imagename:imagetag
+ buildah pull myregistry/myrepository/imagename:imagetag`,
+ }
+ pullCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := pullCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVarP(&opts.allTags, "all-tags", "a", false, "download all tagged images in the repository")
+ flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&opts.blobCache, "blob-cache", "", "store copies of pulled image blobs in the specified directory")
+ flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVar(&opts.pullPolicy, "policy", "missing", "missing, always, ifnewer, or never.")
+ flags.BoolVarP(&opts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pulling image")
+ flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when pulling images")
+ flags.String("os", runtime.GOOS, "prefer `OS` instead of the running OS for choosing images")
+ flags.String("arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine for choosing images")
+ flags.StringSlice("platform", []string{parse.DefaultPlatform()}, "prefer OS/ARCH instead of the current operating system and architecture for choosing images")
+ flags.String("variant", "", "override the `variant` of the specified image")
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ flags.IntVar(&opts.retry, "retry", cli.MaxPullPushRetries, "number of times to retry in case of failure when performing pull")
+ flags.StringVar(&opts.retryDelay, "retry-delay", cli.PullPushRetryDelay.String(), "delay between retries in case of pull failures")
+ if err := flags.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
+ }
+
+ rootCmd.AddCommand(pullCommand)
+}
+
+func pullCmd(c *cobra.Command, args []string, iopts pullOptions) error {
+ if len(args) == 0 {
+ return errors.New("an image name must be specified")
+ }
+ if err := cli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if len(args) > 1 {
+ return errors.New("too many arguments specified")
+ }
+ if err := auth.CheckAuthFile(iopts.authfile); err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ platforms, err := parse.PlatformsFromOptions(c)
+ if err != nil {
+ return err
+ }
+ if len(platforms) > 1 {
+ logrus.Warnf("ignoring platforms other than %+v: %+v", platforms[0], platforms[1:])
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ decConfig, err := cli.DecryptConfig(iopts.decryptionKeys)
+ if err != nil {
+ return fmt.Errorf("unable to obtain decryption config: %w", err)
+ }
+
+ policy, ok := define.PolicyMap[iopts.pullPolicy]
+ if !ok {
+ return fmt.Errorf("unsupported pull policy %q", iopts.pullPolicy)
+ }
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.retryDelay)
+ if err != nil {
+ return fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.retryDelay, err)
+ }
+ options := buildah.PullOptions{
+ SignaturePolicyPath: iopts.signaturePolicy,
+ Store: store,
+ SystemContext: systemContext,
+ BlobDirectory: iopts.blobCache,
+ AllTags: iopts.allTags,
+ ReportWriter: os.Stderr,
+ RemoveSignatures: iopts.removeSignatures,
+ MaxRetries: iopts.retry,
+ RetryDelay: pullPushRetryDelay,
+ OciDecryptConfig: decConfig,
+ PullPolicy: policy,
+ }
+
+ if iopts.quiet {
+ options.ReportWriter = nil // Turns off logging output
+ }
+
+ id, err := buildah.Pull(getContext(), args[0], options)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", id)
+ return nil
+}
diff --git a/cmd/buildah/push.go b/cmd/buildah/push.go
new file mode 100644
index 0000000..3086dae
--- /dev/null
+++ b/cmd/buildah/push.go
@@ -0,0 +1,271 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "errors"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ util "github.com/containers/buildah/util"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/storage"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type pushOptions struct {
+ all bool
+ authfile string
+ blobCache string
+ certDir string
+ creds string
+ digestfile string
+ disableCompression bool
+ format string
+ compressionFormat string
+ compressionLevel int
+ forceCompressionFormat bool
+ retry int
+ retryDelay string
+ rm bool
+ quiet bool
+ removeSignatures bool
+ signaturePolicy string
+ signBy string
+ tlsVerify bool
+ encryptionKeys []string
+ encryptLayers []int
+ insecure bool
+ addCompression []string
+}
+
+func init() {
+ var (
+ opts pushOptions
+ pushDescription = fmt.Sprintf(`
+ Pushes an image to a specified location.
+
+ The Image "DESTINATION" uses a "transport":"details" format. If not specified, will reuse source IMAGE as DESTINATION.
+
+ Supported transports:
+ %s
+
+ See buildah-push(1) section "DESTINATION" for the expected format
+`, getListOfTransports())
+ )
+
+ pushCommand := &cobra.Command{
+ Use: "push",
+ Short: "Push an image to a specified destination",
+ Long: pushDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return pushCmd(cmd, args, opts)
+ },
+ Example: `buildah push imageID docker://registry.example.com/repository:tag
+ buildah push imageID docker-daemon:image:tagi
+ buildah push imageID oci:/path/to/layout:image:tag`,
+ }
+ pushCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := pushCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVar(&opts.all, "all", false, "push all of the images referenced by the manifest list")
+ flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&opts.blobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVar(&opts.digestfile, "digestfile", "", "after copying the image, write the digest of the resulting image to the file")
+ flags.BoolVarP(&opts.disableCompression, "disable-compression", "D", false, "don't compress layers")
+ flags.BoolVarP(&opts.forceCompressionFormat, "force-compression", "", false, "use the specified compression algorithm if the destination contains a differently-compressed variant already")
+ flags.StringVarP(&opts.format, "format", "f", "", "manifest type (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)")
+ flags.StringVar(&opts.compressionFormat, "compression-format", "", "compression format to use")
+ flags.IntVar(&opts.compressionLevel, "compression-level", 0, "compression level to use")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when pushing images")
+ flags.IntVar(&opts.retry, "retry", cli.MaxPullPushRetries, "number of times to retry in case of failure when performing push/pull")
+ flags.StringVar(&opts.retryDelay, "retry-delay", cli.PullPushRetryDelay.String(), "delay between retries in case of push/pull failures")
+ flags.BoolVar(&opts.rm, "rm", false, "remove the manifest list if push succeeds")
+ flags.BoolVarP(&opts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pushing image")
+ flags.StringVar(&opts.signBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
+ flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", nil, "key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
+ flags.IntSliceVar(&opts.encryptLayers, "encrypt-layer", nil, "layers to encrypt, 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified")
+
+ if err := flags.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
+ }
+ flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
+ if err := flags.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
+ }
+
+ rootCmd.AddCommand(pushCommand)
+}
+
+func pushCmd(c *cobra.Command, args []string, iopts pushOptions) error {
+ var src, destSpec string
+
+ if err := cli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+ if err := auth.CheckAuthFile(iopts.authfile); err != nil {
+ return err
+ }
+
+ switch len(args) {
+ case 0:
+ return errors.New("at least a source image ID must be specified")
+ case 1:
+ src = args[0]
+ destSpec = src
+ logrus.Debugf("Destination argument not specified, assuming the same as the source: %s", destSpec)
+ case 2:
+ src = args[0]
+ destSpec = args[1]
+ if src == "" {
+ return fmt.Errorf(`invalid image name "%s"`, args[0])
+ }
+ default:
+ return errors.New("Only two arguments are necessary to push: source and destination")
+ }
+
+ compress := define.Gzip
+ if iopts.disableCompression {
+ compress = define.Uncompressed
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ dest, err := alltransports.ParseImageName(destSpec)
+ // add the docker:// transport to see if they neglected it.
+ if err != nil {
+ destTransport := strings.Split(destSpec, ":")[0]
+ if t := transports.Get(destTransport); t != nil {
+ return err
+ }
+
+ if strings.Contains(destSpec, "://") {
+ return err
+ }
+
+ destSpec = "docker://" + destSpec
+ dest2, err2 := alltransports.ParseImageName(destSpec)
+ if err2 != nil {
+ return err
+ }
+ dest = dest2
+ logrus.Debugf("Assuming docker:// as the transport method for DESTINATION: %s", destSpec)
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+
+ var manifestType string
+ if iopts.format != "" {
+ switch iopts.format {
+ case "oci":
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "v2s1":
+ manifestType = manifest.DockerV2Schema1SignedMediaType
+ case "v2s2", "docker":
+ manifestType = manifest.DockerV2Schema2MediaType
+ default:
+ return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", iopts.format)
+ }
+ }
+
+ encConfig, encLayers, err := cli.EncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
+ if err != nil {
+ return fmt.Errorf("unable to obtain encryption config: %w", err)
+ }
+
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.retryDelay)
+ if err != nil {
+ return fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.retryDelay, err)
+ }
+ if c.Flag("compression-format").Changed {
+ if !c.Flag("force-compression").Changed {
+ // If `compression-format` is set and no value for `--force-compression`
+ // is selected then defaults to `true`.
+ iopts.forceCompressionFormat = true
+ }
+ }
+
+ options := buildah.PushOptions{
+ Compression: compress,
+ ManifestType: manifestType,
+ SignaturePolicyPath: iopts.signaturePolicy,
+ Store: store,
+ SystemContext: systemContext,
+ BlobDirectory: iopts.blobCache,
+ RemoveSignatures: iopts.removeSignatures,
+ SignBy: iopts.signBy,
+ MaxRetries: iopts.retry,
+ RetryDelay: pullPushRetryDelay,
+ OciEncryptConfig: encConfig,
+ OciEncryptLayers: encLayers,
+ ForceCompressionFormat: iopts.forceCompressionFormat,
+ }
+ if !iopts.quiet {
+ options.ReportWriter = os.Stderr
+ }
+ if iopts.compressionFormat != "" {
+ algo, err := compression.AlgorithmByName(iopts.compressionFormat)
+ if err != nil {
+ return err
+ }
+ options.CompressionFormat = &algo
+ }
+ if c.Flag("compression-level").Changed {
+ options.CompressionLevel = &iopts.compressionLevel
+ }
+
+ ref, digest, err := buildah.Push(getContext(), src, dest, options)
+ if err != nil {
+ if !errors.Is(err, storage.ErrImageUnknown) {
+ // Image might be a manifest so attempt a manifest push
+ if manifestsErr := manifestPush(systemContext, store, src, destSpec, iopts); manifestsErr == nil {
+ return nil
+ }
+ }
+ return util.GetFailureCause(err, fmt.Errorf("pushing image %q to %q: %w", src, destSpec, err))
+ }
+ if ref != nil {
+ logrus.Debugf("pushed image %q with digest %s", ref, digest.String())
+ } else {
+ logrus.Debugf("pushed image with digest %s", digest.String())
+ }
+
+ logrus.Debugf("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String())
+
+ if iopts.digestfile != "" {
+ if err = os.WriteFile(iopts.digestfile, []byte(digest.String()), 0644); err != nil {
+ return util.GetFailureCause(err, fmt.Errorf("failed to write digest to file %q: %w", iopts.digestfile, err))
+ }
+ }
+
+ return nil
+}
+
+// getListOfTransports gets the transports supported from the image library
+// and strips of the "tarball" transport from the string of transports returned
+func getListOfTransports() string {
+ allTransports := strings.Join(transports.ListNames(), ",")
+ return strings.Replace(allTransports, ",tarball", "", 1)
+}
diff --git a/cmd/buildah/rename.go b/cmd/buildah/rename.go
new file mode 100644
index 0000000..17960ac
--- /dev/null
+++ b/cmd/buildah/rename.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/buildah"
+ "github.com/spf13/cobra"
+)
+
+var (
+ renameDescription = "\n Renames a local container."
+ renameCommand = &cobra.Command{
+ Use: "rename",
+ Short: "Rename a container",
+ Long: renameDescription,
+ RunE: renameCmd,
+ Example: `buildah rename containerName NewName
+ buildah rename containerID NewName`,
+ Args: cobra.ExactArgs(2),
+ }
+)
+
+func init() {
+ renameCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(renameCommand)
+}
+
+func renameCmd(c *cobra.Command, args []string) error {
+ var builder *buildah.Builder
+
+ name := args[0]
+ newName := args[1]
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ builder, err = openBuilder(getContext(), store, name)
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", name, err)
+ }
+
+ oldName := builder.Container
+ if oldName == newName {
+ return fmt.Errorf("renaming a container with the same name as its current name")
+ }
+
+ if build, err := openBuilder(getContext(), store, newName); err == nil {
+ return fmt.Errorf("The container name %q is already in use by container %q", newName, build.ContainerID)
+ }
+
+ err = store.SetNames(builder.ContainerID, []string{newName})
+ if err != nil {
+ return fmt.Errorf("renaming container %q to the name %q: %w", oldName, newName, err)
+ }
+ builder.Container = newName
+ return builder.Save()
+}
diff --git a/cmd/buildah/rm.go b/cmd/buildah/rm.go
new file mode 100644
index 0000000..3104e4c
--- /dev/null
+++ b/cmd/buildah/rm.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/util"
+ "github.com/spf13/cobra"
+)
+
+type rmResults struct {
+ all bool
+}
+
+func init() {
+ var (
+ rmDescription = "\n Removes one or more working containers, unmounting them if necessary."
+ opts rmResults
+ )
+ rmCommand := &cobra.Command{
+ Use: "rm",
+ Aliases: []string{"delete"},
+ Short: "Remove one or more working containers",
+ Long: rmDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return rmCmd(cmd, args, opts)
+ },
+ Example: `buildah rm containerID
+ buildah rm containerID1 containerID2 containerID3
+ buildah rm --all`,
+ }
+ rmCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := rmCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVarP(&opts.all, "all", "a", false, "remove all containers")
+ rootCmd.AddCommand(rmCommand)
+}
+
+func rmCmd(c *cobra.Command, args []string, iopts rmResults) error {
+ delContainerErrStr := "removing container"
+ if len(args) == 0 && !iopts.all {
+ return errors.New("container ID must be specified")
+ }
+ if len(args) > 0 && iopts.all {
+ return errors.New("when using the --all switch, you may not pass any containers names or IDs")
+ }
+
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ var lastError error
+ if iopts.all {
+ builders, err := openBuilders(store)
+ if err != nil {
+ return fmt.Errorf("reading build containers: %w", err)
+ }
+
+ for _, builder := range builders {
+ id := builder.ContainerID
+ if err = builder.Delete(); err != nil {
+ lastError = util.WriteError(os.Stderr, fmt.Errorf("%s %q: %w", delContainerErrStr, builder.Container, err), lastError)
+ continue
+ }
+ fmt.Printf("%s\n", id)
+ }
+ } else {
+ for _, name := range args {
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ lastError = util.WriteError(os.Stderr, fmt.Errorf("%s %q: %w", delContainerErrStr, name, err), lastError)
+ continue
+ }
+ id := builder.ContainerID
+ if err = builder.Delete(); err != nil {
+ lastError = util.WriteError(os.Stderr, fmt.Errorf("%s %q: %w", delContainerErrStr, name, err), lastError)
+ continue
+ }
+ fmt.Printf("%s\n", id)
+ }
+
+ }
+ return lastError
+}
diff --git a/cmd/buildah/rmi.go b/cmd/buildah/rmi.go
new file mode 100644
index 0000000..e214aa7
--- /dev/null
+++ b/cmd/buildah/rmi.go
@@ -0,0 +1,106 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/libimage"
+ "github.com/hashicorp/go-multierror"
+ "github.com/spf13/cobra"
+)
+
+type rmiOptions struct {
+ all bool
+ prune bool
+ force bool
+}
+
+func init() {
+ var (
+ rmiDescription = "\n Removes one or more locally stored images."
+ opts rmiOptions
+ )
+ rmiCommand := &cobra.Command{
+ Use: "rmi",
+ Short: "Remove one or more images from local storage",
+ Long: rmiDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return rmiCmd(cmd, args, opts)
+ },
+ Example: `buildah rmi imageID
+ buildah rmi --all --force
+ buildah rmi imageID1 imageID2 imageID3`,
+ }
+ rmiCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := rmiCommand.Flags()
+ flags.SetInterspersed(false)
+
+ flags.BoolVarP(&opts.all, "all", "a", false, "remove all images")
+ flags.BoolVarP(&opts.prune, "prune", "p", false, "prune dangling images")
+ flags.BoolVarP(&opts.force, "force", "f", false, "force removal of the image and any containers using the image")
+
+ rootCmd.AddCommand(rmiCommand)
+}
+
+func rmiCmd(c *cobra.Command, args []string, iopts rmiOptions) error {
+ if len(args) == 0 && !iopts.all && !iopts.prune {
+ return errors.New("image name or ID must be specified")
+ }
+ if len(args) > 0 && iopts.all {
+ return errors.New("when using the --all switch, you may not pass any images names or IDs")
+ }
+ if iopts.all && iopts.prune {
+ return errors.New("when using the --all switch, you may not use --prune switch")
+ }
+ if len(args) > 0 && iopts.prune {
+ return errors.New("when using the --prune switch, you may not pass any images names or IDs")
+ }
+
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return err
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ options := &libimage.RemoveImagesOptions{
+ Filters: []string{"readonly=false"},
+ }
+ if iopts.prune {
+ options.Filters = append(options.Filters, "dangling=true")
+ } else if !iopts.all {
+ options.Filters = append(options.Filters, "intermediate=false")
+ }
+ options.Force = iopts.force
+
+ rmiReports, rmiErrors := runtime.RemoveImages(context.Background(), args, options)
+ for _, r := range rmiReports {
+ for _, u := range r.Untagged {
+ fmt.Printf("untagged: %s\n", u)
+ }
+ }
+ for _, r := range rmiReports {
+ if r.Removed {
+ fmt.Printf("%s\n", r.ID)
+ }
+ }
+
+ var multiE *multierror.Error
+ multiE = multierror.Append(multiE, rmiErrors...)
+ return multiE.ErrorOrNil()
+}
diff --git a/cmd/buildah/run.go b/cmd/buildah/run.go
new file mode 100644
index 0000000..a7c1697
--- /dev/null
+++ b/cmd/buildah/run.go
@@ -0,0 +1,199 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/internal/volumes"
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/util"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type runInputOptions struct {
+ addHistory bool
+ capAdd []string
+ capDrop []string
+ contextDir string
+ env []string
+ hostname string
+ isolation string
+ mounts []string
+ runtime string
+ runtimeFlag []string
+ noHostname bool
+ noHosts bool
+ noPivot bool
+ terminal bool
+ volumes []string
+ workingDir string
+ *buildahcli.NameSpaceResults
+}
+
+func init() {
+ var (
+ runDescription = "\n Runs a specified command using the container's root filesystem as a root\n filesystem, using configuration settings inherited from the container's\n image or as specified using previous calls to the config command."
+ opts runInputOptions
+ )
+
+ namespaceResults := buildahcli.NameSpaceResults{}
+
+ runCommand := &cobra.Command{
+ Use: "run",
+ Short: "Run a command inside of the container",
+ Long: runDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.NameSpaceResults = &namespaceResults
+ return runCmd(cmd, args, opts)
+
+ },
+ Example: `buildah run containerID -- ps -auxw
+ buildah run --terminal containerID /bin/bash
+ buildah run --volume /path/on/host:/path/in/container:ro,z containerID /bin/sh`,
+ }
+ runCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := runCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolVar(&opts.addHistory, "add-history", false, "add an entry for this operation to the image's history. Use BUILDAH_HISTORY environment variable to override. (default false)")
+ flags.StringSliceVar(&opts.capAdd, "cap-add", []string{}, "add the specified capability (default [])")
+ flags.StringSliceVar(&opts.capDrop, "cap-drop", []string{}, "drop the specified capability (default [])")
+ flags.StringVar(&opts.contextDir, "contextdir", "", "context directory path")
+ flags.StringArrayVarP(&opts.env, "env", "e", []string{}, "add environment variable to be set temporarily when running command (default [])")
+ flags.StringVar(&opts.hostname, "hostname", "", "set the hostname inside of the container")
+ flags.StringVar(&opts.isolation, "isolation", "", "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
+ // Do not set a default runtime here, we'll do that later in the processing.
+ flags.StringVar(&opts.runtime, "runtime", util.Runtime(), "`path` to an alternate OCI runtime")
+ flags.StringSliceVar(&opts.runtimeFlag, "runtime-flag", []string{}, "add global flags for the container runtime")
+ flags.BoolVar(&opts.noHostname, "no-hostname", false, "do not override the /etc/hostname file within the container")
+ flags.BoolVar(&opts.noHosts, "no-hosts", false, "do not override the /etc/hosts file within the container")
+ flags.BoolVar(&opts.noPivot, "no-pivot", false, "do not use pivot root to jail process inside rootfs")
+ flags.BoolVarP(&opts.terminal, "terminal", "t", false, "allocate a pseudo-TTY in the container")
+ flags.StringArrayVarP(&opts.volumes, "volume", "v", []string{}, "bind mount a host location into the container while running the command")
+ flags.StringArrayVar(&opts.mounts, "mount", []string{}, "attach a filesystem mount to the container (default [])")
+ flags.StringVar(&opts.workingDir, "workingdir", "", "temporarily set working directory for command (default to container's workingdir)")
+
+ userFlags := getUserFlags()
+ namespaceFlags := buildahcli.GetNameSpaceFlags(&namespaceResults)
+
+ flags.AddFlagSet(&userFlags)
+ flags.AddFlagSet(&namespaceFlags)
+ flags.SetNormalizeFunc(buildahcli.AliasFlags)
+
+ rootCmd.AddCommand(runCommand)
+}
+
+func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
+ if len(args) == 0 {
+ return errors.New("container ID must be specified")
+ }
+ name := args[0]
+ args = Tail(args)
+ if len(args) > 0 && args[0] == "--" {
+ args = args[1:]
+ }
+
+ if len(args) == 0 {
+ return errors.New("command must be specified")
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ return fmt.Errorf("reading build container %q: %w", name, err)
+ }
+
+ isolation, err := parse.IsolationOption(c.Flag("isolation").Value.String())
+ if err != nil {
+ return err
+ }
+
+ runtimeFlags := []string{}
+ for _, arg := range iopts.runtimeFlag {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+
+ noPivot := iopts.noPivot || (os.Getenv("BUILDAH_NOPIVOT") != "")
+
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
+ if err != nil {
+ return err
+ }
+ if c.Flag("network").Changed && c.Flag("isolation").Changed {
+ if isolation == buildah.IsolationChroot {
+ if ns := namespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ if !ns.Host {
+ return fmt.Errorf("cannot set --network other than host with --isolation %s", c.Flag("isolation").Value.String())
+ }
+ }
+ }
+ }
+
+ options := buildah.RunOptions{
+ Hostname: iopts.hostname,
+ Runtime: iopts.runtime,
+ Args: runtimeFlags,
+ NoHostname: iopts.noHostname,
+ NoHosts: iopts.noHosts,
+ NoPivot: noPivot,
+ User: c.Flag("user").Value.String(),
+ Isolation: isolation,
+ NamespaceOptions: namespaceOptions,
+ ConfigureNetwork: networkPolicy,
+ ContextDir: iopts.contextDir,
+ CNIPluginPath: iopts.CNIPlugInPath,
+ CNIConfigDir: iopts.CNIConfigDir,
+ AddCapabilities: iopts.capAdd,
+ DropCapabilities: iopts.capDrop,
+ WorkingDir: iopts.workingDir,
+ }
+
+ if c.Flag("terminal").Changed {
+ if iopts.terminal {
+ options.Terminal = buildah.WithTerminal
+ } else {
+ options.Terminal = buildah.WithoutTerminal
+ }
+ }
+
+ options.Env = buildahcli.LookupEnvVarReferences(iopts.env, os.Environ())
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ mounts, mountedImages, targetLocks, err := volumes.GetVolumes(systemContext, store, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir)
+ if err != nil {
+ return err
+ }
+ defer volumes.UnlockLockArray(targetLocks)
+ options.Mounts = mounts
+ // Run() will automatically clean them up.
+ options.ExternalImageMounts = mountedImages
+ options.CgroupManager = globalFlagResults.CgroupManager
+
+ runerr := builder.Run(args, options)
+
+ if runerr != nil {
+ logrus.Debugf("error running %v in container %q: %v", args, builder.Container, runerr)
+ }
+ if runerr == nil {
+ shell := "/bin/sh -c"
+ if len(builder.Shell()) > 0 {
+ shell = strings.Join(builder.Shell(), " ")
+ }
+ conditionallyAddHistory(builder, c, "%s %s", shell, strings.Join(args, " "))
+ return builder.Save()
+ }
+ return runerr
+}
diff --git a/cmd/buildah/source.go b/cmd/buildah/source.go
new file mode 100644
index 0000000..6f00a4f
--- /dev/null
+++ b/cmd/buildah/source.go
@@ -0,0 +1,127 @@
+package main
+
+import (
+ "context"
+
+ "github.com/containers/buildah/internal/source"
+ "github.com/spf13/cobra"
+)
+
+var (
+ // buildah source
+ sourceDescription = ` Create, push, pull and manage source images and associated source artifacts. A source image contains all source artifacts an ordinary OCI image has been built with. Those artifacts can be any kind of source artifact, such as source RPMs, an entire source tree or text files.
+
+ Note that the buildah-source command and all its subcommands are experimental and may be subject to future changes.
+`
+ sourceCommand = &cobra.Command{
+ Use: "source",
+ Short: "Manage source containers",
+ Long: sourceDescription,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return nil
+ },
+ }
+
+ // buildah source create
+ sourceCreateDescription = ` Create and initialize a source image. A source image is an OCI artifact; an OCI image with a custom config media type.
+
+ Note that the buildah-source command and all its subcommands are experimental and may be subject to future changes.
+`
+ sourceCreateOptions = source.CreateOptions{}
+ sourceCreateCommand = &cobra.Command{
+ Args: cobra.ExactArgs(1),
+ Use: "create",
+ Short: "Create a source image",
+ Long: sourceCreateDescription,
+ Example: "buildah source create /tmp/fedora:latest-source",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return source.Create(context.Background(), args[0], sourceCreateOptions)
+ },
+ }
+
+ // buildah source add
+ sourceAddOptions = source.AddOptions{}
+ sourceAddDescription = ` Add add a source artifact to a source image. The artifact will be added as a gzip-compressed tar ball. Add attempts to auto-tar and auto-compress only if necessary.
+
+ Note that the buildah-source command and all its subcommands are experimental and may be subject to future changes.
+`
+ sourceAddCommand = &cobra.Command{
+ Args: cobra.ExactArgs(2),
+ Use: "add",
+ Short: "Add a source artifact to a source image",
+ Long: sourceAddDescription,
+ Example: "buildah source add /tmp/fedora sources.tar.gz",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return source.Add(context.Background(), args[0], args[1], sourceAddOptions)
+ },
+ }
+
+ // buildah source pull
+ sourcePullOptions = source.PullOptions{}
+ sourcePullDescription = ` Pull a source image from a registry to a specified path. The pull operation will fail if the image does not comply with a source-image OCI artifact.
+
+ Note that the buildah-source command and all its subcommands are experimental and may be subject to future changes.
+`
+ sourcePullCommand = &cobra.Command{
+ Args: cobra.ExactArgs(2),
+ Use: "pull",
+ Short: "Pull a source image from a registry to a specified path",
+ Long: sourcePullDescription,
+ Example: "buildah source pull quay.io/sourceimage/example:latest /tmp/sourceimage:latest",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return source.Pull(context.Background(), args[0], args[1], sourcePullOptions)
+ },
+ }
+
+ // buildah source push
+ sourcePushOptions = source.PushOptions{}
+ sourcePushDescription = ` Push a source image from a specified path to a registry.
+
+ Note that the buildah-source command and all its subcommands are experimental and may be subject to future changes.
+`
+ sourcePushCommand = &cobra.Command{
+ Args: cobra.ExactArgs(2),
+ Use: "push",
+ Short: "Push a source image from a specified path to a registry",
+ Long: sourcePushDescription,
+ Example: "buildah source push /tmp/sourceimage:latest quay.io/sourceimage/example:latest",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return source.Push(context.Background(), args[0], args[1], sourcePushOptions)
+ },
+ }
+)
+
+func init() {
+ // buildah source
+ sourceCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(sourceCommand)
+
+ // buildah source create
+ sourceCreateCommand.SetUsageTemplate(UsageTemplate())
+ sourceCommand.AddCommand(sourceCreateCommand)
+ sourceCreateFlags := sourceCreateCommand.Flags()
+ sourceCreateFlags.StringVar(&sourceCreateOptions.Author, "author", "", "set the author")
+ sourceCreateFlags.BoolVar(&sourceCreateOptions.TimeStamp, "time-stamp", true, "set the \"created\" time stamp")
+
+ // buildah source add
+ sourceAddCommand.SetUsageTemplate(UsageTemplate())
+ sourceCommand.AddCommand(sourceAddCommand)
+ sourceAddFlags := sourceAddCommand.Flags()
+ sourceAddFlags.StringArrayVar(&sourceAddOptions.Annotations, "annotation", []string{}, "add an annotation (format: key=value)")
+
+ // buildah source pull
+ sourcePullCommand.SetUsageTemplate(UsageTemplate())
+ sourceCommand.AddCommand(sourcePullCommand)
+ sourcePullFlags := sourcePullCommand.Flags()
+ sourcePullFlags.StringVar(&sourcePullOptions.Credentials, "creds", "", "use `[username[:password]]` for accessing the registry")
+ sourcePullFlags.BoolVar(&sourcePullOptions.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ sourcePullFlags.BoolVarP(&sourcePullOptions.Quiet, "quiet", "q", false, "don't output pull progress information")
+
+ // buildah source push
+ sourcePushCommand.SetUsageTemplate(UsageTemplate())
+ sourceCommand.AddCommand(sourcePushCommand)
+ sourcePushFlags := sourcePushCommand.Flags()
+ sourcePushFlags.StringVar(&sourcePushOptions.Credentials, "creds", "", "use `[username[:password]]` for accessing the registry")
+ sourcePushFlags.BoolVar(&sourcePushOptions.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ sourcePushFlags.BoolVarP(&sourcePushOptions.Quiet, "quiet", "q", false, "don't output push progress information")
+}
diff --git a/cmd/buildah/tag.go b/cmd/buildah/tag.go
new file mode 100644
index 0000000..13071e1
--- /dev/null
+++ b/cmd/buildah/tag.go
@@ -0,0 +1,57 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/libimage"
+ "github.com/spf13/cobra"
+)
+
+var (
+ tagDescription = "\n Adds one or more additional names to locally-stored image."
+ tagCommand = &cobra.Command{
+ Use: "tag",
+ Short: "Add an additional name to a local image",
+ Long: tagDescription,
+ RunE: tagCmd,
+
+ Example: `buildah tag imageName firstNewName
+ buildah tag imageName firstNewName SecondNewName`,
+ Args: cobra.MinimumNArgs(2),
+ }
+)
+
+func tagCmd(c *cobra.Command, args []string) error {
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return fmt.Errorf("building system context: %w", err)
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ // Allow tagging manifest list instead of resolving instances from manifest
+ lookupOptions := &libimage.LookupImageOptions{ManifestList: true}
+ image, _, err := runtime.LookupImage(args[0], lookupOptions)
+ if err != nil {
+ return err
+ }
+
+ for _, tag := range args[1:] {
+ if err := image.Tag(tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func init() {
+ tagCommand.SetUsageTemplate(UsageTemplate())
+ rootCmd.AddCommand(tagCommand)
+}
diff --git a/cmd/buildah/umount.go b/cmd/buildah/umount.go
new file mode 100644
index 0000000..a40e52a
--- /dev/null
+++ b/cmd/buildah/umount.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ umountCommand := &cobra.Command{
+ Use: "umount",
+ Aliases: []string{"unmount"},
+ Short: "Unmount the root file system of the specified working containers",
+ Long: "Unmounts the root file system of the specified working containers.",
+ RunE: umountCmd,
+ Example: `buildah umount containerID
+ buildah umount containerID1 containerID2 containerID3
+ buildah umount --all`,
+ }
+ umountCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := umountCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.BoolP("all", "a", false, "umount all of the currently mounted containers")
+
+ rootCmd.AddCommand(umountCommand)
+}
+
+func umountCmd(c *cobra.Command, args []string) error {
+ umountAll := false
+ if c.Flag("all").Changed {
+ umountAll = true
+ }
+ umountContainerErrStr := "error unmounting container"
+ if len(args) == 0 && !umountAll {
+ return errors.New("at least one container ID must be specified")
+ }
+ if len(args) > 0 && umountAll {
+ return errors.New("when using the --all switch, you may not pass any container IDs")
+ }
+ if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
+ return err
+ }
+
+ store, err := getStore(c)
+ if err != nil {
+ return err
+ }
+
+ var lastError error
+ if len(args) > 0 {
+ for _, name := range args {
+ builder, err := openBuilder(getContext(), store, name)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = fmt.Errorf("%s %s: %w", umountContainerErrStr, name, err)
+ continue
+ }
+ if builder.MountPoint == "" {
+ continue
+ }
+
+ if err = builder.Unmount(); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = fmt.Errorf("%s %q: %w", umountContainerErrStr, builder.Container, err)
+ continue
+ }
+ fmt.Printf("%s\n", builder.ContainerID)
+ }
+ } else {
+ builders, err := openBuilders(store)
+ if err != nil {
+ return fmt.Errorf("reading build Containers: %w", err)
+ }
+ for _, builder := range builders {
+ if builder.MountPoint == "" {
+ continue
+ }
+
+ if err = builder.Unmount(); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = fmt.Errorf("%s %q: %w", umountContainerErrStr, builder.Container, err)
+ continue
+ }
+ fmt.Printf("%s\n", builder.ContainerID)
+ }
+ }
+ return lastError
+}
diff --git a/cmd/buildah/unshare.go b/cmd/buildah/unshare.go
new file mode 100644
index 0000000..d45498d
--- /dev/null
+++ b/cmd/buildah/unshare.go
@@ -0,0 +1,150 @@
+//go:build linux
+// +build linux
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "sort"
+ "strings"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/syndtr/gocapability/capability"
+)
+
+var (
+ unshareDescription = "\n Runs a command in a modified user namespace."
+ unshareCommand = &cobra.Command{
+ Use: "unshare",
+ Short: "Run a command in a modified user namespace",
+ Long: unshareDescription,
+ RunE: unshareCmd,
+ Example: `buildah unshare id
+ buildah unshare cat /proc/self/uid_map
+ buildah unshare buildah-script.sh`,
+ }
+ unshareMounts []string
+)
+
+func init() {
+ unshareCommand.SetUsageTemplate(UsageTemplate())
+ flags := unshareCommand.Flags()
+ flags.SetInterspersed(false)
+ flags.StringSliceVarP(&unshareMounts, "mount", "m", []string{}, "mount the specified containers (default [])")
+ rootCmd.AddCommand(unshareCommand)
+}
+
+func unshareMount(c *cobra.Command, mounts []string) ([]string, func(), error) {
+ var store storage.Store
+ var mountedContainers, env []string
+ if len(mounts) == 0 {
+ return nil, nil, nil
+ }
+ unmount := func() {
+ for _, mounted := range mountedContainers {
+ builder, err := openBuilder(getContext(), store, mounted)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("loading information about build container %q: %w", mounted, err))
+ continue
+ }
+ err = builder.Unmount()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("unmounting build container %q: %w", mounted, err))
+ continue
+ }
+ }
+ }
+ store, err := getStore(c)
+ if err != nil {
+ return nil, nil, err
+ }
+ for _, mountSpec := range mounts {
+ mount := strings.SplitN(mountSpec, "=", 2)
+ container := mountSpec
+ envVar := container
+ if len(mount) == 2 {
+ envVar = mount[0]
+ container = mount[1]
+ }
+ builder, err := openBuilder(getContext(), store, container)
+ if err != nil {
+ unmount()
+ return nil, nil, fmt.Errorf("loading information about build container %q: %w", container, err)
+ }
+ mountPoint, err := builder.Mount(builder.MountLabel)
+ if err != nil {
+ unmount()
+ return nil, nil, fmt.Errorf("mounting build container %q: %w", container, err)
+ }
+ logrus.Debugf("mounted container %q at %q", container, mountPoint)
+ mountedContainers = append(mountedContainers, container)
+ if envVar != "" {
+ envSpec := fmt.Sprintf("%s=%s", envVar, mountPoint)
+ logrus.Debugf("adding %q to environment", envSpec)
+ env = append(env, envSpec)
+ }
+ }
+ return env, unmount, nil
+}
+
+// unshareCmd execs whatever using the ID mappings that we want to use for ourselves
+func unshareCmd(c *cobra.Command, args []string) error {
+ // Set the default isolation type to use the "rootless" method.
+ if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
+ if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
+ logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
+ os.Exit(1)
+ }
+ }
+
+ // force reexec using the configured ID mappings
+ unshare.MaybeReexecUsingUserNamespace(true)
+ // exec the specified command, if there is one
+ if len(args) < 1 {
+ // try to exec the shell, if one's set
+ shell, shellSet := os.LookupEnv("SHELL")
+ if !shellSet {
+ logrus.Errorf("no command specified")
+ os.Exit(1)
+ }
+ args = []string{shell}
+ }
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Env = unshare.RootlessEnv()
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ mountEnvs, unmountMounts, err := unshareMount(c, unshareMounts)
+ if err != nil {
+ return err
+ }
+ cmd.Env = append(cmd.Env, mountEnvs...)
+ unshare.ExecRunnable(cmd, unmountMounts)
+ os.Exit(1)
+ return nil
+}
+
+func debugCapabilities() {
+ pid, err := capability.NewPid2(0)
+ if err != nil {
+ logrus.Errorf("error checking our capabilities: %v", err)
+ return
+ }
+ if err := pid.Load(); err != nil {
+ logrus.Errorf("error loading our current capabilities: %v", err)
+ return
+ }
+ knownCaps := capability.List()
+ effective := make([]string, 0, len(knownCaps))
+ for i := range knownCaps {
+ have := pid.Get(capability.EFFECTIVE, knownCaps[i])
+ effective = append(effective, fmt.Sprintf("%s=%v", knownCaps[i].String(), have))
+ }
+ sort.Strings(effective)
+ logrus.Debugf("effective capabilities: %v", effective)
+}
diff --git a/cmd/buildah/unshare_unsupported.go b/cmd/buildah/unshare_unsupported.go
new file mode 100644
index 0000000..cfd666b
--- /dev/null
+++ b/cmd/buildah/unshare_unsupported.go
@@ -0,0 +1,22 @@
+//go:build !linux
+// +build !linux
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ unshareCommand := cobra.Command{
+ Use: "unshare",
+ Hidden: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return nil
+ },
+ }
+ rootCmd.AddCommand(&unshareCommand)
+}
+
+func debugCapabilities() {
+}
diff --git a/cmd/buildah/version.go b/cmd/buildah/version.go
new file mode 100644
index 0000000..0566b71
--- /dev/null
+++ b/cmd/buildah/version.go
@@ -0,0 +1,115 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "strconv"
+ "time"
+
+ "github.com/containerd/containerd/platforms"
+ cniversion "github.com/containernetworking/cni/pkg/version"
+ "github.com/containers/buildah/define"
+ iversion "github.com/containers/image/v5/version"
+ ispecs "github.com/opencontainers/image-spec/specs-go"
+ rspecs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/cobra"
+)
+
+//Overwritten at build time
+var (
+ GitCommit string
+ buildInfo string
+ cniVersion string
+)
+
+type versionInfo struct {
+ Version string `json:"version"`
+ GoVersion string `json:"goVersion"`
+ ImageSpec string `json:"imageSpec"`
+ RuntimeSpec string `json:"runtimeSpec"`
+ CniSpec string `json:"cniSpec"`
+ LibcniVersion string `json:"libcniVersion"`
+ ImageVersion string `json:"imageVersion"`
+ GitCommit string `json:"gitCommit"`
+ Built string `json:"built"`
+ OsArch string `json:"osArch"`
+ BuildPlatform string `json:"buildPlatform"`
+}
+
+type versionOptions struct {
+ json bool
+}
+
+func init() {
+ var opts versionOptions
+
+ //cli command to print out the version info of buildah
+ versionCommand := &cobra.Command{
+ Use: "version",
+ Short: "Display the Buildah version information",
+ Long: "Displays Buildah version information.",
+ RunE: func(c *cobra.Command, args []string) error {
+ return versionCmd(opts)
+ },
+ Args: cobra.NoArgs,
+ Example: `buildah version`,
+ }
+ versionCommand.SetUsageTemplate(UsageTemplate())
+
+ flags := versionCommand.Flags()
+ flags.BoolVar(&opts.json, "json", false, "output in JSON format")
+
+ rootCmd.AddCommand(versionCommand)
+}
+
+func versionCmd(opts versionOptions) error {
+ var err error
+ buildTime := int64(0)
+ if buildInfo != "" {
+ //converting unix time from string to int64
+ buildTime, err = strconv.ParseInt(buildInfo, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ version := versionInfo{
+ Version: define.Version,
+ GoVersion: runtime.Version(),
+ ImageSpec: ispecs.Version,
+ RuntimeSpec: rspecs.Version,
+ CniSpec: cniversion.Current(),
+ LibcniVersion: cniVersion,
+ ImageVersion: iversion.Version,
+ GitCommit: GitCommit,
+ Built: time.Unix(buildTime, 0).Format(time.ANSIC),
+ OsArch: runtime.GOOS + "/" + runtime.GOARCH,
+ BuildPlatform: platforms.DefaultString(),
+ }
+
+ if opts.json {
+ data, err := json.MarshalIndent(version, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", data)
+ return nil
+ }
+
+ fmt.Println("Version: ", version.Version)
+ fmt.Println("Go Version: ", version.GoVersion)
+ fmt.Println("Image Spec: ", version.ImageSpec)
+ fmt.Println("Runtime Spec: ", version.RuntimeSpec)
+ fmt.Println("CNI Spec: ", version.CniSpec)
+ fmt.Println("libcni Version: ", version.LibcniVersion)
+ fmt.Println("image Version: ", version.ImageVersion)
+ fmt.Println("Git Commit: ", version.GitCommit)
+
+ //Prints out the build time in readable format
+ fmt.Println("Built: ", version.Built)
+ fmt.Println("OS/Arch: ", version.OsArch)
+ fmt.Println("BuildPlatform: ", version.BuildPlatform)
+
+ return nil
+}
diff --git a/commit.go b/commit.go
new file mode 100644
index 0000000..ef55e54
--- /dev/null
+++ b/commit.go
@@ -0,0 +1,428 @@
+package buildah
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/pkg/blobcache"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/libimage/manifests"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/stringid"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // BuilderIdentityAnnotation is the name of the annotation key containing
+ // the name and version of the producer of the image stored as an
+ // annotation on commit.
+ BuilderIdentityAnnotation = "io.buildah.version"
+)
+
+// CommitOptions can be used to alter how an image is committed.
+type CommitOptions struct {
+ // PreferredManifestType is the preferred type of image manifest. The
+ // image configuration format will be of a compatible type.
+ PreferredManifestType string
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ Compression archive.Compression
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // AdditionalTags is a list of additional names to add to the image, if
+ // the transport to which we're writing the image gives us a way to add
+ // them.
+ AdditionalTags []string
+ // ReportWriter is an io.Writer which will be used to log the writing
+ // of the new image.
+ ReportWriter io.Writer
+ // HistoryTimestamp is the timestamp used when creating new items in the
+ // image's history. If unset, the current time will be used.
+ HistoryTimestamp *time.Time
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // IIDFile tells the builder to write the image ID to the specified file
+ IIDFile string
+ // Squash tells the builder to produce an image with a single layer
+ // instead of with possibly more than one layer.
+ Squash bool
+ // OmitHistory tells the builder to ignore the history of build layers and
+ // base while preparing image-spec, setting this to true will ensure no history
+ // is added to the image-spec. (default false)
+ OmitHistory bool
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers. If blobs are available, the
+ // manifest of the new image will reference the blobs rather than
+ // on-disk layers.
+ BlobDirectory string
+ // EmptyLayer tells the builder to omit the diff for the working
+ // container.
+ EmptyLayer bool
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ // Deprecated use HistoryTimestamp instead.
+ OmitTimestamp bool
+ // SignBy is the fingerprint of a GPG key to use for signing the image.
+ SignBy string
+ // Manifest list to add the image to.
+ Manifest string
+ // MaxRetries is the maximum number of attempts we'll make to commit
+ // the image to an external registry if the first attempt fails.
+ MaxRetries int
+ // RetryDelay is how long to wait before retrying a commit attempt to a
+ // registry.
+ RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+ // ConfidentialWorkloadOptions is used to force the output image's rootfs to contain a
+ // LUKS-compatibly encrypted disk image (for use with krun) instead of the usual
+ // contents of a rootfs.
+ ConfidentialWorkloadOptions ConfidentialWorkloadOptions
+ // UnsetEnvs is a list of environments to not add to final image.
+ // Deprecated: use UnsetEnv() before committing instead.
+ UnsetEnvs []string
+ // OverrideConfig is an optional Schema2Config which can override parts
+ // of the working container's configuration for the image that is being
+ // committed.
+ OverrideConfig *manifest.Schema2Config
+ // OverrideChanges is a slice of Dockerfile-style instructions to make
+ // to the configuration of the image that is being committed, after
+ // OverrideConfig is applied.
+ OverrideChanges []string
+}
+
+var (
+ // storageAllowedPolicyScopes overrides the policy for local storage
+ // to ensure that we can read images from it.
+ storageAllowedPolicyScopes = signature.PolicyTransportScopes{
+ "": []signature.PolicyRequirement{
+ signature.NewPRInsecureAcceptAnything(),
+ },
+ }
+)
+
+// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
+// variable, if it's set. The contents are expected to be a JSON-encoded
+// github.com/openshift/api/config/v1.Image, set by an OpenShift build
+// controller that arranged for us to be run in a container.
+func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (insecure bool, err error) {
+ transport := dest.Transport()
+ if transport == nil {
+ return false, nil
+ }
+ if transport.Name() != docker.Transport.Name() {
+ return false, nil
+ }
+ dref := dest.DockerReference()
+ if dref == nil || reference.Domain(dref) == "" {
+ return false, nil
+ }
+
+ if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 {
+ // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources
+ var sources struct {
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ }
+ if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
+ return false, fmt.Errorf("parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
+ }
+ blocked := false
+ if len(sources.BlockedRegistries) > 0 {
+ for _, blockedDomain := range sources.BlockedRegistries {
+ if blockedDomain == reference.Domain(dref) {
+ blocked = true
+ }
+ }
+ }
+ if blocked {
+ return false, fmt.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
+ }
+ allowed := true
+ if len(sources.AllowedRegistries) > 0 {
+ allowed = false
+ for _, allowedDomain := range sources.AllowedRegistries {
+ if allowedDomain == reference.Domain(dref) {
+ allowed = true
+ }
+ }
+ }
+ if !allowed {
+ return false, fmt.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
+ }
+ if len(sources.InsecureRegistries) > 0 {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpec string) (string, error) {
+ var create bool
+ systemContext := &types.SystemContext{}
+ var list manifests.List
+ runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return "", err
+ }
+ manifestList, err := runtime.LookupManifestList(manifestName)
+ if err != nil {
+ create = true
+ list = manifests.Create()
+ } else {
+ locker, err := manifests.LockerForImage(b.store, manifestList.ID())
+ if err != nil {
+ return "", err
+ }
+ locker.Lock()
+ defer locker.Unlock()
+ _, list, err = manifests.LoadFromImage(b.store, manifestList.ID())
+ if err != nil {
+ return "", err
+ }
+ }
+
+ names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
+ if err != nil {
+ return "", fmt.Errorf("encountered while expanding manifest list name %q: %w", manifestName, err)
+ }
+
+ ref, err := util.VerifyTagName(imageSpec)
+ if err != nil {
+ // check if the local image exists
+ if ref, _, err = util.FindImage(b.store, "", systemContext, imageSpec); err != nil {
+ return "", err
+ }
+ }
+
+ if _, err = list.Add(ctx, systemContext, ref, true); err != nil {
+ return "", err
+ }
+ var imageID string
+ if create {
+ imageID, err = list.SaveToImage(b.store, "", names, manifest.DockerV2ListMediaType)
+ } else {
+ imageID, err = list.SaveToImage(b.store, manifestList.ID(), nil, "")
+ }
+ return imageID, err
+}
+
+// Commit writes the contents of the container, along with its updated
+// configuration, to a new image in the specified location, and if we know how,
+// add any additional tags that were specified. Returns the ID of the new image
+// if commit was successful and the image destination was local.
+func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
+ var (
+ imgID string
+ src types.ImageReference
+ )
+
+ // If we weren't given a name, build a destination reference using a
+ // temporary name that we'll remove later. The correct thing to do
+ // would be to read the manifest and configuration blob, and ask the
+ // manifest for the ID that we'd give the image, but that computation
+ // requires that we know the digests of the layer blobs, which we don't
+ // want to compute here because we'll have to do it again when
+ // cp.Image() instantiates a source image, and we don't want to do the
+ // work twice.
+ if options.OmitTimestamp {
+ if options.HistoryTimestamp != nil {
+ return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
+ }
+ timestamp := time.Unix(0, 0).UTC()
+ options.HistoryTimestamp = &timestamp
+ }
+ nameToRemove := ""
+ if dest == nil {
+ nameToRemove = stringid.GenerateRandomID() + "-tmp"
+ dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("creating temporary destination reference for image: %w", err)
+ }
+ dest = dest2
+ }
+
+ systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath)
+
+ blocked, err := isReferenceBlocked(dest, systemContext)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err)
+ }
+ if blocked {
+ return "", nil, "", fmt.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
+ }
+
+ // Load the system signing policy.
+ commitPolicy, err := signature.DefaultPolicy(systemContext)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ // Override the settings for local storage to make sure that we can always read the source "image".
+ commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
+
+ policyContext, err := signature.NewPolicyContext(commitPolicy)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("creating new signature policy context: %w", err)
+ }
+ defer func() {
+ if err2 := policyContext.Destroy(); err2 != nil {
+ logrus.Debugf("error destroying signature policy context: %v", err2)
+ }
+ }()
+
+ // Check if the commit is blocked by $BUILDER_REGISTRY_SOURCES.
+ insecure, err := checkRegistrySourcesAllows("commit to", dest)
+ if err != nil {
+ return imgID, nil, "", err
+ }
+ if insecure {
+ if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return imgID, nil, "", fmt.Errorf("can't require tls verification on an insecured registry")
+ }
+ systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ systemContext.OCIInsecureSkipTLSVerify = true
+ systemContext.DockerDaemonInsecureSkipTLSVerify = true
+ }
+ logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
+
+ // Build an image reference from which we can copy the finished image.
+ src, err = b.makeContainerImageRef(options)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("computing layer digests and building metadata for container %q: %w", b.ContainerID, err)
+ }
+ // In case we're using caching, decide how to handle compression for a cache.
+ // If we're using blob caching, set it up for the source.
+ maybeCachedSrc := src
+ maybeCachedDest := dest
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err)
+ }
+ maybeCachedSrc = cache
+ cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err)
+ }
+ maybeCachedDest = cache
+ }
+ // "Copy" our image to where it needs to be.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
+
+ if systemContext.ArchitectureChoice != b.Architecture() {
+ systemContext.ArchitectureChoice = b.Architecture()
+ }
+ if systemContext.OSChoice != b.OS() {
+ systemContext.OSChoice = b.OS()
+ }
+
+ var manifestBytes []byte
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
+ return imgID, nil, "", fmt.Errorf("copying layers and metadata for container %q: %w", b.ContainerID, err)
+ }
+ // If we've got more names to attach, and we know how to do that for
+ // the transport that we're writing the new image to, add them now.
+ if len(options.AdditionalTags) > 0 {
+ switch dest.Transport().Name() {
+ case is.Transport.Name():
+ _, img, err := is.ResolveReference(dest)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
+ }
+ if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil {
+ return imgID, nil, "", fmt.Errorf("setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err)
+ }
+ logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
+ default:
+ logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
+ }
+ }
+
+ if dest.Transport().Name() == is.Transport.Name() {
+ dest2, img, err := is.ResolveReference(dest)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("locating image %q in local storage: %w", transports.ImageName(dest), err)
+ }
+ dest = dest2
+ imgID = img.ID
+ toPruneNames := make([]string, 0, len(img.Names))
+ for _, name := range img.Names {
+ if nameToRemove != "" && strings.Contains(name, nameToRemove) {
+ toPruneNames = append(toPruneNames, name)
+ }
+ }
+ if len(toPruneNames) > 0 {
+ if err = b.store.RemoveNames(imgID, toPruneNames); err != nil {
+ return imgID, nil, "", fmt.Errorf("failed to remove temporary name from image %q: %w", imgID, err)
+ }
+ logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID)
+ }
+ if options.IIDFile != "" {
+ if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
+ return imgID, nil, "", err
+ }
+ }
+ }
+
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return imgID, nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
+ }
+
+ var ref reference.Canonical
+ if name := dest.DockerReference(); name != nil {
+ ref, err = reference.WithDigest(name, manifestDigest)
+ if err != nil {
+ logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
+ }
+ }
+
+ if options.Manifest != "" {
+ manifestID, err := b.addManifest(ctx, options.Manifest, imgID)
+ if err != nil {
+ return imgID, nil, "", err
+ }
+ logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID)
+
+ }
+ return imgID, ref, manifestDigest, nil
+}
diff --git a/common.go b/common.go
new file mode 100644
index 0000000..ea28be1
--- /dev/null
+++ b/common.go
@@ -0,0 +1,88 @@
+package buildah
+
+import (
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/pkg/retry"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+)
+
+const (
+ // OCI used to define the "oci" image format
+ OCI = define.OCI
+ // DOCKER used to define the "docker" image format
+ DOCKER = define.DOCKER
+)
+
+func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options {
+ sourceCtx := getSystemContext(store, nil, "")
+ if sourceSystemContext != nil {
+ *sourceCtx = *sourceSystemContext
+ }
+
+ destinationCtx := getSystemContext(store, nil, "")
+ if destinationSystemContext != nil {
+ *destinationCtx = *destinationSystemContext
+ }
+ return &cp.Options{
+ ReportWriter: reportWriter,
+ SourceCtx: sourceCtx,
+ DestinationCtx: destinationCtx,
+ ForceManifestMIMEType: manifestType,
+ RemoveSignatures: removeSignatures,
+ SignBy: addSigner,
+ OciEncryptConfig: ociEncryptConfig,
+ OciDecryptConfig: ociDecryptConfig,
+ OciEncryptLayers: ociEncryptLayers,
+ }
+}
+
+func getSystemContext(store storage.Store, defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
+ sc := &types.SystemContext{}
+ if defaults != nil {
+ *sc = *defaults
+ }
+ if signaturePolicyPath != "" {
+ sc.SignaturePolicyPath = signaturePolicyPath
+ }
+ if store != nil {
+ if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
+ userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
+ }
+ }
+ return sc
+}
+
+func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
+ var (
+ manifestBytes []byte
+ err error
+ lastErr error
+ )
+ err = retry.RetryIfNecessary(ctx, func() error {
+ manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
+ if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
+ lastErr = err
+ return nil
+ }
+ return err
+ }, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
+ if lastErr != nil {
+ err = lastErr
+ }
+ return manifestBytes, err
+}
diff --git a/config.go b/config.go
new file mode 100644
index 0000000..3a287c7
--- /dev/null
+++ b/config.go
@@ -0,0 +1,747 @@
+package buildah
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/common/pkg/util"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/stringid"
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format
+// (either as it exists, or converting the image if necessary), and unmarshals it into dest.
+// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned.
+func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
+ _, actualManifestMIMEType, err := img.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
+ }
+ if wantedManifestMIMEType != actualManifestMIMEType {
+ layerInfos := img.LayerInfos()
+ for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about
+ layerInfos[i].CompressionOperation = types.Compress
+ layerInfos[i].CompressionAlgorithm = &compression.Gzip
+ }
+ updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{
+ LayerInfos: layerInfos,
+ })
+ if err != nil {
+ return fmt.Errorf("resetting recorded compression for %q: %w", transports.ImageName(img.Reference()), err)
+ }
+ secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{
+ ManifestMIMEType: wantedManifestMIMEType,
+ })
+ if err != nil {
+ return fmt.Errorf("converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err)
+ }
+ img = secondUpdatedImg
+ }
+ config, err := img.ConfigBlob(ctx)
+ if err != nil {
+ return fmt.Errorf("reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err)
+ }
+ if err := json.Unmarshal(config, dest); err != nil {
+ return fmt.Errorf("parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err)
+ }
+ return nil
+}
+
+func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.SystemContext) error {
+ if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
+ rawManifest, manifestMIMEType, err := img.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("reading image manifest for %q: %w", transports.ImageName(img.Reference()), err)
+ }
+ rawConfig, err := img.ConfigBlob(ctx)
+ if err != nil {
+ return fmt.Errorf("reading image configuration for %q: %w", transports.ImageName(img.Reference()), err)
+ }
+ b.Manifest = rawManifest
+ b.Config = rawConfig
+
+ dimage := docker.V2Image{}
+ if err := unmarshalConvertedConfig(ctx, &dimage, img, manifest.DockerV2Schema2MediaType); err != nil {
+ return err
+ }
+ b.Docker = dimage
+
+ oimage := ociv1.Image{}
+ if err := unmarshalConvertedConfig(ctx, &oimage, img, ociv1.MediaTypeImageManifest); err != nil {
+ return err
+ }
+ b.OCIv1 = oimage
+
+ if manifestMIMEType == ociv1.MediaTypeImageManifest {
+ // Attempt to recover format-specific data from the manifest.
+ v1Manifest := ociv1.Manifest{}
+ if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil {
+ return fmt.Errorf("parsing OCI manifest %q: %w", string(b.Manifest), err)
+ }
+ for k, v := range v1Manifest.Annotations {
+ b.ImageAnnotations[k] = v
+ }
+ }
+ }
+
+ b.setupLogger()
+ b.fixupConfig(sys)
+ return nil
+}
+
+func (b *Builder) fixupConfig(sys *types.SystemContext) {
+ if b.Docker.Config != nil {
+ // Prefer image-level settings over those from the container it was built from.
+ b.Docker.ContainerConfig = *b.Docker.Config
+ }
+ b.Docker.Config = &b.Docker.ContainerConfig
+ b.Docker.DockerVersion = ""
+ now := time.Now().UTC()
+ if b.Docker.Created.IsZero() {
+ b.Docker.Created = now
+ }
+ if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
+ b.OCIv1.Created = &now
+ }
+ if b.OS() == "" {
+ if sys != nil && sys.OSChoice != "" {
+ b.SetOS(sys.OSChoice)
+ } else {
+ b.SetOS(runtime.GOOS)
+ }
+ }
+ if b.Architecture() == "" {
+ if sys != nil && sys.ArchitectureChoice != "" {
+ b.SetArchitecture(sys.ArchitectureChoice)
+ } else {
+ b.SetArchitecture(runtime.GOARCH)
+ }
+ // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
+ ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
+ b.SetArchitecture(ps.Architecture)
+ b.SetVariant(ps.Variant)
+ }
+ if b.Variant() == "" {
+ if sys != nil && sys.VariantChoice != "" {
+ b.SetVariant(sys.VariantChoice)
+ }
+ // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
+ ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
+ b.SetArchitecture(ps.Architecture)
+ b.SetVariant(ps.Variant)
+ }
+ if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" {
+ b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID()))
+ }
+}
+
+func (b *Builder) setupLogger() {
+ if b.Logger == nil {
+ b.Logger = logrus.New()
+ b.Logger.SetOutput(os.Stderr)
+ b.Logger.SetLevel(logrus.GetLevel())
+ }
+}
+
+// Annotations returns a set of key-value pairs from the image's manifest.
+func (b *Builder) Annotations() map[string]string {
+ return copyStringStringMap(b.ImageAnnotations)
+}
+
+// SetAnnotation adds or overwrites a key's value from the image's manifest.
+// Note: this setting is not present in the Docker v2 image format, so it is
+// discarded when writing images using Docker v2 formats.
+func (b *Builder) SetAnnotation(key, value string) {
+ if b.ImageAnnotations == nil {
+ b.ImageAnnotations = map[string]string{}
+ }
+ b.ImageAnnotations[key] = value
+}
+
+// UnsetAnnotation removes a key and its value from the image's manifest, if
+// it's present.
+func (b *Builder) UnsetAnnotation(key string) {
+ delete(b.ImageAnnotations, key)
+}
+
+// ClearAnnotations removes all keys and their values from the image's
+// manifest.
+func (b *Builder) ClearAnnotations() {
+ b.ImageAnnotations = map[string]string{}
+}
+
+// CreatedBy returns a description of how this image was built.
+func (b *Builder) CreatedBy() string {
+ return b.ImageCreatedBy
+}
+
+// SetCreatedBy sets the description of how this image was built.
+func (b *Builder) SetCreatedBy(how string) {
+ b.ImageCreatedBy = how
+}
+
+// OS returns a name of the OS on which the container, or a container built
+// using an image built from this container, is intended to be run.
+func (b *Builder) OS() string {
+ return b.OCIv1.OS
+}
+
+// SetOS sets the name of the OS on which the container, or a container built
+// using an image built from this container, is intended to be run.
+func (b *Builder) SetOS(os string) {
+ b.OCIv1.OS = os
+ b.Docker.OS = os
+}
+
+// OSVersion returns a version of the OS on which the container, or a container
+// built using an image built from this container, is intended to be run.
+func (b *Builder) OSVersion() string {
+ return b.OCIv1.OSVersion
+}
+
+// SetOSVersion sets the version of the OS on which the container, or a
+// container built using an image built from this container, is intended to be
+// run.
+func (b *Builder) SetOSVersion(version string) {
+ b.OCIv1.OSVersion = version
+ b.Docker.OSVersion = version
+}
+
+// OSFeatures returns a list of OS features which the container, or a container
+// built using an image built from this container, depends on the OS supplying.
+func (b *Builder) OSFeatures() []string {
+ return copyStringSlice(b.OCIv1.OSFeatures)
+}
+
+// SetOSFeature adds a feature of the OS which the container, or a container
+// built using an image built from this container, depends on the OS supplying.
+func (b *Builder) SetOSFeature(feature string) {
+ if !util.StringInSlice(feature, b.OCIv1.OSFeatures) {
+ b.OCIv1.OSFeatures = append(b.OCIv1.OSFeatures, feature)
+ }
+ if !util.StringInSlice(feature, b.Docker.OSFeatures) {
+ b.Docker.OSFeatures = append(b.Docker.OSFeatures, feature)
+ }
+}
+
+// UnsetOSFeature removes a feature of the OS which the container, or a
+// container built using an image built from this container, depends on the OS
+// supplying.
+func (b *Builder) UnsetOSFeature(feature string) {
+ if util.StringInSlice(feature, b.OCIv1.OSFeatures) {
+ features := make([]string, 0, len(b.OCIv1.OSFeatures))
+ for _, f := range b.OCIv1.OSFeatures {
+ if f != feature {
+ features = append(features, f)
+ }
+ }
+ b.OCIv1.OSFeatures = features
+ }
+ if util.StringInSlice(feature, b.Docker.OSFeatures) {
+ features := make([]string, 0, len(b.Docker.OSFeatures))
+ for _, f := range b.Docker.OSFeatures {
+ if f != feature {
+ features = append(features, f)
+ }
+ }
+ b.Docker.OSFeatures = features
+ }
+}
+
+// ClearOSFeatures clears the list of features of the OS which the container,
+// or a container built using an image built from this container, depends on
+// the OS supplying.
+func (b *Builder) ClearOSFeatures() {
+ b.OCIv1.OSFeatures = []string{}
+ b.Docker.OSFeatures = []string{}
+}
+
+// Architecture returns a name of the architecture on which the container, or a
+// container built using an image built from this container, is intended to be
+// run.
+func (b *Builder) Architecture() string {
+ return b.OCIv1.Architecture
+}
+
+// SetArchitecture sets the name of the architecture on which the container, or
+// a container built using an image built from this container, is intended to
+// be run.
+func (b *Builder) SetArchitecture(arch string) {
+ b.OCIv1.Architecture = arch
+ b.Docker.Architecture = arch
+}
+
+// Variant returns a name of the architecture variant on which the container,
+// or a container built using an image built from this container, is intended
+// to be run.
+func (b *Builder) Variant() string {
+ return b.OCIv1.Variant
+}
+
+// SetVariant sets the name of the architecture variant on which the container,
+// or a container built using an image built from this container, is intended
+// to be run.
+func (b *Builder) SetVariant(variant string) {
+ b.Docker.Variant = variant
+ b.OCIv1.Variant = variant
+}
+
+// Maintainer returns contact information for the person who built the image.
+func (b *Builder) Maintainer() string {
+ return b.OCIv1.Author
+}
+
+// SetMaintainer sets contact information for the person who built the image.
+func (b *Builder) SetMaintainer(who string) {
+ b.OCIv1.Author = who
+ b.Docker.Author = who
+}
+
+// User returns information about the user as whom the container, or a
+// container built using an image built from this container, should be run.
+func (b *Builder) User() string {
+ return b.OCIv1.Config.User
+}
+
+// SetUser sets information about the user as whom the container, or a
+// container built using an image built from this container, should be run.
+// Acceptable forms are a user name or ID, optionally followed by a colon and a
+// group name or ID.
+func (b *Builder) SetUser(spec string) {
+ b.OCIv1.Config.User = spec
+ b.Docker.Config.User = spec
+}
+
+// OnBuild returns the OnBuild value from the container.
+func (b *Builder) OnBuild() []string {
+ return copyStringSlice(b.Docker.Config.OnBuild)
+}
+
+// ClearOnBuild removes all values from the OnBuild structure
+func (b *Builder) ClearOnBuild() {
+ b.Docker.Config.OnBuild = []string{}
+}
+
+// SetOnBuild sets a trigger instruction to be executed when the image is used
+// as the base of another image.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetOnBuild(onBuild string) {
+ if onBuild != "" && b.Format != define.Dockerv2ImageManifest {
+ b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild)
+ }
+ b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild)
+}
+
+// WorkDir returns the default working directory for running commands in the
+// container, or in a container built using an image built from this container.
+func (b *Builder) WorkDir() string {
+ return b.OCIv1.Config.WorkingDir
+}
+
+// SetWorkDir sets the location of the default working directory for running
+// commands in the container, or in a container built using an image built from
+// this container.
+func (b *Builder) SetWorkDir(there string) {
+ b.OCIv1.Config.WorkingDir = there
+ b.Docker.Config.WorkingDir = there
+}
+
+// Shell returns the default shell for running commands in the
+// container, or in a container built using an image built from this container.
+func (b *Builder) Shell() []string {
+ return copyStringSlice(b.Docker.Config.Shell)
+}
+
+// SetShell sets the default shell for running
+// commands in the container, or in a container built using an image built from
+// this container.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetShell(shell []string) {
+ if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest {
+ b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
+ }
+
+ b.Docker.Config.Shell = copyStringSlice(shell)
+}
+
+// Env returns a list of key-value pairs to be set when running commands in the
+// container, or in a container built using an image built from this container.
+func (b *Builder) Env() []string {
+ return copyStringSlice(b.OCIv1.Config.Env)
+}
+
+// SetEnv adds or overwrites a value to the set of environment strings which
+// should be set when running commands in the container, or in a container
+// built using an image built from this container.
+func (b *Builder) SetEnv(k string, v string) {
+ reset := func(s *[]string) {
+ n := []string{}
+ for i := range *s {
+ if !strings.HasPrefix((*s)[i], k+"=") {
+ n = append(n, (*s)[i])
+ }
+ }
+ n = append(n, k+"="+v)
+ *s = n
+ }
+ reset(&b.OCIv1.Config.Env)
+ reset(&b.Docker.Config.Env)
+}
+
+// UnsetEnv removes a value from the set of environment strings which should be
+// set when running commands in this container, or in a container built using
+// an image built from this container.
+func (b *Builder) UnsetEnv(k string) {
+ unset := func(s *[]string) {
+ n := []string{}
+ for i := range *s {
+ if !strings.HasPrefix((*s)[i], k+"=") {
+ n = append(n, (*s)[i])
+ }
+ }
+ *s = n
+ }
+ unset(&b.OCIv1.Config.Env)
+ unset(&b.Docker.Config.Env)
+}
+
+// ClearEnv removes all values from the set of environment strings which should
+// be set when running commands in this container, or in a container built
+// using an image built from this container.
+func (b *Builder) ClearEnv() {
+ b.OCIv1.Config.Env = []string{}
+ b.Docker.Config.Env = []string{}
+}
+
+// Cmd returns the default command, or command parameters if an Entrypoint is
+// set, to use when running a container built from an image built from this
+// container.
+func (b *Builder) Cmd() []string {
+ return copyStringSlice(b.OCIv1.Config.Cmd)
+}
+
+// SetCmd sets the default command, or command parameters if an Entrypoint is
+// set, to use when running a container built from an image built from this
+// container.
+func (b *Builder) SetCmd(cmd []string) {
+ b.OCIv1.Config.Cmd = copyStringSlice(cmd)
+ b.Docker.Config.Cmd = copyStringSlice(cmd)
+}
+
+// Entrypoint returns the command to be run for containers built from images
+// built from this container.
+func (b *Builder) Entrypoint() []string {
+ if len(b.OCIv1.Config.Entrypoint) > 0 {
+ return copyStringSlice(b.OCIv1.Config.Entrypoint)
+ }
+ return nil
+}
+
+// SetEntrypoint sets the command to be run for in containers built from images
+// built from this container.
+func (b *Builder) SetEntrypoint(ep []string) {
+ b.OCIv1.Config.Entrypoint = copyStringSlice(ep)
+ b.Docker.Config.Entrypoint = copyStringSlice(ep)
+}
+
+// Labels returns a set of key-value pairs from the image's runtime
+// configuration.
+func (b *Builder) Labels() map[string]string {
+ return copyStringStringMap(b.OCIv1.Config.Labels)
+}
+
+// SetLabel adds or overwrites a key's value from the image's runtime
+// configuration.
+func (b *Builder) SetLabel(k string, v string) {
+ if b.OCIv1.Config.Labels == nil {
+ b.OCIv1.Config.Labels = map[string]string{}
+ }
+ b.OCIv1.Config.Labels[k] = v
+ if b.Docker.Config.Labels == nil {
+ b.Docker.Config.Labels = map[string]string{}
+ }
+ b.Docker.Config.Labels[k] = v
+}
+
+// UnsetLabel removes a key and its value from the image's runtime
+// configuration, if it's present.
+func (b *Builder) UnsetLabel(k string) {
+ delete(b.OCIv1.Config.Labels, k)
+ delete(b.Docker.Config.Labels, k)
+}
+
+// ClearLabels removes all keys and their values from the image's runtime
+// configuration.
+func (b *Builder) ClearLabels() {
+ b.OCIv1.Config.Labels = map[string]string{}
+ b.Docker.Config.Labels = map[string]string{}
+}
+
+// Ports returns the set of ports which should be exposed when a container
+// based on an image built from this container is run.
+func (b *Builder) Ports() []string {
+ p := []string{}
+ for k := range b.OCIv1.Config.ExposedPorts {
+ p = append(p, k)
+ }
+ return p
+}
+
+// SetPort adds or overwrites an exported port in the set of ports which should
+// be exposed when a container based on an image built from this container is
+// run.
+func (b *Builder) SetPort(p string) {
+ if b.OCIv1.Config.ExposedPorts == nil {
+ b.OCIv1.Config.ExposedPorts = map[string]struct{}{}
+ }
+ b.OCIv1.Config.ExposedPorts[p] = struct{}{}
+ if b.Docker.Config.ExposedPorts == nil {
+ b.Docker.Config.ExposedPorts = make(docker.PortSet)
+ }
+ b.Docker.Config.ExposedPorts[docker.Port(p)] = struct{}{}
+}
+
+// UnsetPort removes an exposed port from the set of ports which should be
+// exposed when a container based on an image built from this container is run.
+func (b *Builder) UnsetPort(p string) {
+ delete(b.OCIv1.Config.ExposedPorts, p)
+ delete(b.Docker.Config.ExposedPorts, docker.Port(p))
+}
+
+// ClearPorts empties the set of ports which should be exposed when a container
+// based on an image built from this container is run.
+func (b *Builder) ClearPorts() {
+ b.OCIv1.Config.ExposedPorts = map[string]struct{}{}
+ b.Docker.Config.ExposedPorts = docker.PortSet{}
+}
+
+// Volumes returns a list of filesystem locations which should be mounted from
+// outside of the container when a container built from an image built from
+// this container is run.
+func (b *Builder) Volumes() []string {
+ v := []string{}
+ for k := range b.OCIv1.Config.Volumes {
+ v = append(v, k)
+ }
+ if len(v) > 0 {
+ return v
+ }
+ return nil
+}
+
+// CheckVolume returns True if the location exists in the image's list of locations
+// which should be mounted from outside of the container when a container
+// based on an image built from this container is run
+
+func (b *Builder) CheckVolume(v string) bool {
+ _, OCIv1Volume := b.OCIv1.Config.Volumes[v]
+ _, DockerVolume := b.Docker.Config.Volumes[v]
+ return OCIv1Volume || DockerVolume
+}
+
+// AddVolume adds a location to the image's list of locations which should be
+// mounted from outside of the container when a container based on an image
+// built from this container is run.
+func (b *Builder) AddVolume(v string) {
+ if b.OCIv1.Config.Volumes == nil {
+ b.OCIv1.Config.Volumes = map[string]struct{}{}
+ }
+ b.OCIv1.Config.Volumes[v] = struct{}{}
+ if b.Docker.Config.Volumes == nil {
+ b.Docker.Config.Volumes = map[string]struct{}{}
+ }
+ b.Docker.Config.Volumes[v] = struct{}{}
+}
+
+// RemoveVolume removes a location from the list of locations which should be
+// mounted from outside of the container when a container based on an image
+// built from this container is run.
+func (b *Builder) RemoveVolume(v string) {
+ delete(b.OCIv1.Config.Volumes, v)
+ delete(b.Docker.Config.Volumes, v)
+}
+
+// ClearVolumes removes all locations from the image's list of locations which
+// should be mounted from outside of the container when a container based on an
+// image built from this container is run.
+func (b *Builder) ClearVolumes() {
+ b.OCIv1.Config.Volumes = map[string]struct{}{}
+ b.Docker.Config.Volumes = map[string]struct{}{}
+}
+
+// Hostname returns the hostname which will be set in the container and in
+// containers built using images built from the container.
+func (b *Builder) Hostname() string {
+ return b.Docker.Config.Hostname
+}
+
+// SetHostname sets the hostname which will be set in the container and in
+// containers built using images built from the container.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetHostname(name string) {
+ b.Docker.Config.Hostname = name
+}
+
+// Domainname returns the domainname which will be set in the container and in
+// containers built using images built from the container.
+func (b *Builder) Domainname() string {
+ return b.Docker.Config.Domainname
+}
+
+// SetDomainname sets the domainname which will be set in the container and in
+// containers built using images built from the container.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetDomainname(name string) {
+ if name != "" && b.Format != define.Dockerv2ImageManifest {
+ b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name)
+ }
+ b.Docker.Config.Domainname = name
+}
+
+// SetDefaultMountsFilePath sets the mounts file path for testing purposes
+func (b *Builder) SetDefaultMountsFilePath(path string) {
+ b.DefaultMountsFilePath = path
+}
+
+// Comment returns the comment which will be set in the container and in
+// containers built using images built from the container
+func (b *Builder) Comment() string {
+ return b.Docker.Comment
+}
+
+// SetComment sets the comment which will be set in the container and in
+// containers built using images built from the container.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetComment(comment string) {
+ if comment != "" && b.Format != define.Dockerv2ImageManifest {
+ logrus.Warnf("COMMENT is not supported for OCI image format, comment %s will be ignored. Must use `docker` format", comment)
+ }
+ b.Docker.Comment = comment
+}
+
+// HistoryComment returns the comment which will be used in the history item
+// which will describe the latest layer when we commit an image.
+func (b *Builder) HistoryComment() string {
+ return b.ImageHistoryComment
+}
+
+// SetHistoryComment sets the comment which will be used in the history item
+// which will describe the latest layer when we commit an image.
+func (b *Builder) SetHistoryComment(comment string) {
+ b.ImageHistoryComment = comment
+}
+
+// StopSignal returns the signal which will be set in the container and in
+// containers built using images built from the container
+func (b *Builder) StopSignal() string {
+ return b.Docker.Config.StopSignal
+}
+
+// SetStopSignal sets the signal which will be set in the container and in
+// containers built using images built from the container.
+func (b *Builder) SetStopSignal(stopSignal string) {
+ b.OCIv1.Config.StopSignal = stopSignal
+ b.Docker.Config.StopSignal = stopSignal
+}
+
+// Healthcheck returns information that recommends how a container engine
+// should check if a running container is "healthy".
+func (b *Builder) Healthcheck() *docker.HealthConfig {
+ if b.Docker.Config.Healthcheck == nil {
+ return nil
+ }
+ return &docker.HealthConfig{
+ Test: copyStringSlice(b.Docker.Config.Healthcheck.Test),
+ Interval: b.Docker.Config.Healthcheck.Interval,
+ Timeout: b.Docker.Config.Healthcheck.Timeout,
+ StartPeriod: b.Docker.Config.Healthcheck.StartPeriod,
+ Retries: b.Docker.Config.Healthcheck.Retries,
+ }
+}
+
+// SetHealthcheck sets recommended commands to run in order to verify that a
+// running container based on this image is "healthy", along with information
+// specifying how often that test should be run, and how many times the test
+// should fail before the container should be considered unhealthy.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
+func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
+ b.Docker.Config.Healthcheck = nil
+ if config != nil {
+ if b.Format != define.Dockerv2ImageManifest {
+ b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format")
+ }
+ b.Docker.Config.Healthcheck = &docker.HealthConfig{
+ Test: copyStringSlice(config.Test),
+ Interval: config.Interval,
+ Timeout: config.Timeout,
+ StartPeriod: config.StartPeriod,
+ Retries: config.Retries,
+ }
+ }
+}
+
+// AddPrependedEmptyLayer adds an item to the history that we'll create when
+// committing the image, after any history we inherit from the base image, but
+// before the history item that we'll use to describe the new layer that we're
+// adding.
+func (b *Builder) AddPrependedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.PrependedEmptyLayers = append(b.PrependedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearPrependedEmptyLayers clears the list of history entries that we'll add
+// to the committed image before the entry for the layer that we're adding.
+func (b *Builder) ClearPrependedEmptyLayers() {
+ b.PrependedEmptyLayers = nil
+}
+
+// AddAppendedEmptyLayer adds an item to the history that we'll create when
+// committing the image, after the history item that we'll use to describe the
+// new layer that we're adding.
+func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.AppendedEmptyLayers = append(b.AppendedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearAppendedEmptyLayers clears the list of history entries that we'll add
+// to the committed image after the entry for the layer that we're adding.
+func (b *Builder) ClearAppendedEmptyLayers() {
+ b.AppendedEmptyLayers = nil
+}
diff --git a/contrib/buildahimage/Containerfile b/contrib/buildahimage/Containerfile
new file mode 100644
index 0000000..88f80bf
--- /dev/null
+++ b/contrib/buildahimage/Containerfile
@@ -0,0 +1,113 @@
+# [stable|testing|upstream]/Containerfile
+#
+# Build a Buildah container image from the latest version
+# of Fedora.
+#
+# FLAVOR defaults to stable if unset
+#
+# FLAVOR=stable acquires a stable version of Buildah
+# from the Fedoras Updates System.
+# FLAVOR=testing acquires a testing version of Buildah
+# from the Fedoras Updates System.
+# FLAVOR=upstream acquires a testing version of Buildah
+# from the Fedora Copr Buildsystem.
+# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
+#
+# https://bodhi.fedoraproject.org/updates/?search=buildah
+#
+# This image can be used to create a secured container
+# that runs safely with privileges within the container.
+#
+
+FROM registry.fedoraproject.org/fedora:latest
+ARG FLAVOR=stable
+
+label "io.containers.capabilities"="CHOWN,DAC_OVERRIDE,FOWNER,FSETID,KILL,NET_BIND_SERVICE,SETFCAP,SETGID,SETPCAP,SETUID,CHOWN,DAC_OVERRIDE,FOWNER,FSETID,KILL,NET_BIND_SERVICE,SETFCAP,SETGID,SETPCAP,SETUID,SYS_CHROOT"
+
+# When building for multiple-architectures in parallel using emulation
+# it's really easy for one/more dnf processes to timeout or mis-count
+# the minimum download rates. Bump both to be extremely forgiving of
+# an overworked host.
+RUN echo -e "\n\n# Added during image build" >> /etc/dnf/dnf.conf && \
+ echo -e "minrate=100\ntimeout=60\n" >> /etc/dnf/dnf.conf
+
+# Don't include container-selinux and remove
+# directories used by dnf that are just taking
+# up space.
+# TODO: rpm --setcaps... needed due to Fedora (base) image builds
+# being (maybe still?) affected by
+# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
+RUN dnf -y makecache && \
+ dnf -y update && \
+ rpm --setcaps shadow-utils 2>/dev/null && \
+ case "${FLAVOR}" in \
+ stable) \
+ dnf -y install buildah fuse-overlayfs cpp --exclude container-selinux \
+ ;; \
+ testing) \
+ dnf -y install --enablerepo=updates-testing buildah fuse-overlayfs cpp \
+ --exclude container-selinux \
+ ;; \
+ upstream) \
+ dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
+ dnf -y copr enable rhcontainerbot/podman-next && \
+ dnf -y install buildah fuse-overlayfs \
+ --exclude container-selinux \
+ --enablerepo=updates-testing \
+ ;; \
+ *) \
+ printf "\\nFLAVOR argument must be set and valid, currently: '${FLAVOR}'\\n\\n" 1>&2 && \
+ exit 1 \
+ ;; \
+ esac && \
+ dnf -y clean all && \
+ rm -rf /var/cache /var/log/dnf* /var/log/yum.*
+
+ADD ./containers.conf /etc/containers/
+
+# Setup internal Buildah to pass secrets/subscriptions down from host to internal container
+RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf
+
+# Copy & modify the defaults to provide reference if runtime changes needed.
+# Changes here are required for running with fuse-overlay storage inside container.
+RUN sed -e 's|^#mount_program|mount_program|g' \
+ -e '/additionalimage.*/a "/var/lib/shared",' \
+ -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
+ /usr/share/containers/storage.conf \
+ > /etc/containers/storage.conf && \
+ chmod 644 /etc/containers/storage.conf && \
+ chmod 644 /etc/containers/containers.conf
+
+RUN mkdir -p /var/lib/shared/overlay-images \
+ /var/lib/shared/overlay-layers \
+ /var/lib/shared/vfs-images \
+ /var/lib/shared/vfs-layers && \
+ touch /var/lib/shared/overlay-images/images.lock && \
+ touch /var/lib/shared/overlay-layers/layers.lock && \
+ touch /var/lib/shared/vfs-images/images.lock && \
+ touch /var/lib/shared/vfs-layers/layers.lock
+
+# Define uid/gid ranges for our user https://github.com/containers/buildah/issues/3053
+RUN useradd build && \
+ echo -e "build:1:999\nbuild:1001:64535" > /etc/subuid && \
+ echo -e "build:1:999\nbuild:1001:64535" > /etc/subgid && \
+ mkdir -p /home/build/.local/share/containers && \
+ mkdir -p /home/build/.config/containers && \
+ chown -R build:build /home/build
+# See: https://github.com/containers/buildah/issues/4669
+# Copy & modify the config for the `build` user and remove the global
+# `runroot` and `graphroot` which current `build` user cannot access,
+# in such case storage will choose a runroot in `/var/tmp`.
+RUN sed -e 's|^#mount_program|mount_program|g' \
+ -e 's|^graphroot|#graphroot|g' \
+ -e 's|^runroot|#runroot|g' \
+ /etc/containers/storage.conf \
+ > /home/build/.config/containers/storage.conf && \
+ chown build:build /home/build/.config/containers/storage.conf
+
+VOLUME /var/lib/containers
+VOLUME /home/build/.local/share/containers
+
+# Set an environment variable to default to chroot isolation for RUN
+# instructions and "buildah run".
+ENV BUILDAH_ISOLATION=chroot
diff --git a/contrib/buildahimage/README.md b/contrib/buildahimage/README.md
new file mode 100644
index 0000000..c7032eb
--- /dev/null
+++ b/contrib/buildahimage/README.md
@@ -0,0 +1,86 @@
+[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
+[comment]: <> ()
+[comment]: <> (ANY changes made to this file, once committed/merged must)
+[comment]: <> (be manually copy/pasted -in markdown- into the description)
+[comment]: <> (field on Quay at the following locations:)
+[comment]: <> ()
+[comment]: <> (https://quay.io/repository/containers/buildah)
+[comment]: <> (https://quay.io/repository/buildah/stable)
+[comment]: <> (https://quay.io/repository/buildah/testing)
+[comment]: <> (https://quay.io/repository/buildah/upstream)
+[comment]: <> ()
+[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# buildahimage
+
+## Overview
+
+This directory contains the Dockerfiles necessary to create the buildahimage container
+images that are housed on quay.io under the buildah account. All repositories where
+the images live are public and can be pulled without credentials. These container images are secured and the
+resulting containers can run safely with privileges within the container.
+
+The container images are built using the latest Fedora and then Buildah is installed into them.
+The PATH in the container images is set to the default PATH provided by Fedora. Also, the
+ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
+default to `/`.
+
+The container images are:
+
+ * `quay.io/containers/buildah:<version>` and `quay.io/buildah/stable:<version>` -
+ These images are built daily. They are intended to contain an unchanging
+ and stable version of buildah. For the most recent `<version>` tags (`vX`,
+ `vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
+ (especially) security upgrades. For build details, please [see the
+ configuration file](stable/Dockerfile).
+ * `quay.io/containers/buildah:latest` and `quay.io/buildah/stable:latest` -
+ Built daily using the same Dockerfile as above. The buildah version
+ will remain the "latest" available in Fedora, however the other image
+ contents may vary compared to the version-tagged images.
+ * `quay.io/buildah/testing:latest` - This image is built daily, using the
+ latest version of Buildah that was in the Fedora `updates-testing` repository.
+ The image is Built with [the testing Dockerfile](testing/Dockerfile).
+ * `quay.io/buildah/upstream:latest` - This image is built daily using the latest
+ code found in this GitHub repository. Due to the image changing frequently,
+ it's not guaranteed to be stable or even executable. The image is built with
+ [the upstream Dockerfile](upstream/Dockerfile). Note: The actual compilation
+ of upstream buildah [occurs continuously in
+ COPR](https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/).
+
+
+## Sample Usage
+
+Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
+
+```
+podman pull docker://quay.io/buildah/stable:latest
+
+podman run stable buildah version
+
+# Create a directory on the host to mount the container's
+# /var/lib/container directory to so containers can be
+# run within the container.
+mkdir /var/lib/mycontainer
+
+# Run the image detached using the host's network in a container name
+# buildahctr, turn off label and seccomp confinement in the container
+# and then do a little shell hackery to keep the container up and running.
+podman run --detach --name=buildahctr --net=host --security-opt label=disable --security-opt seccomp=unconfined --device /dev/fuse:rw -v /var/lib/mycontainer:/var/lib/containers:Z stable sh -c 'while true ;do sleep 100000 ; done'
+
+podman exec -it buildahctr /bin/sh
+
+# Now inside of the container
+
+buildah from alpine
+
+buildah images
+
+exit
+```
+
+**Note:** If you encounter a `fuse: device not found` error when running the container image, it is likely that
+the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the
+module and then run the container image. To enable this automatically at boot time, you can add a configuration
+file to `/etc/modules.load.d`. See `man modules-load.d` for more details.
diff --git a/contrib/buildahimage/containers.conf b/contrib/buildahimage/containers.conf
new file mode 100644
index 0000000..0bf45cd
--- /dev/null
+++ b/contrib/buildahimage/containers.conf
@@ -0,0 +1,2 @@
+[engine]
+cgroup_manager = "cgroupfs"
diff --git a/contrib/cirrus/bors-ng.png b/contrib/cirrus/bors-ng.png
new file mode 100644
index 0000000..9148d16
--- /dev/null
+++ b/contrib/cirrus/bors-ng.png
Binary files differ
diff --git a/contrib/cirrus/build.sh b/contrib/cirrus/build.sh
new file mode 100755
index 0000000..b478a13
--- /dev/null
+++ b/contrib/cirrus/build.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+set -e
+
+source $(dirname $0)/lib.sh
+
+req_env_vars IN_PODMAN IN_PODMAN_NAME GOSRC
+
+remove_packaged_buildah_files
+
+go version && go env
+
+cd "$GOSRC"
+if [[ "$IN_PODMAN" == "true" ]]
+then
+ in_podman --rm $IN_PODMAN_NAME $0
+else
+ echo "Compiling buildah (\$GOSRC=$GOSRC)"
+ showrun make clean all
+
+ echo "Installing buildah"
+ mkdir -p bin
+ showrun make install PREFIX=/usr
+ showrun ./bin/buildah info
+fi
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
new file mode 100755
index 0000000..3cdfb38
--- /dev/null
+++ b/contrib/cirrus/lib.sh
@@ -0,0 +1,342 @@
+
+
+# Library of common, shared utility functions. This file is intended
+# to be sourced by other scripts, not called directly.
+
+# BEGIN Global export of all variables
+set -a
+
+# Due to differences across platforms and runtime execution environments,
+# handling of the (otherwise) default shell setup is non-uniform. Rather
+# than attempt to workaround differences, simply force-load/set required
+# items every time this library is utilized.
+USER="$(whoami)"
+HOME="$(getent passwd $USER | cut -d : -f 6)"
+# Some platforms set and make this read-only
+[[ -n "$UID" ]] || \
+ UID=$(getent passwd $USER | cut -d : -f 3)
+
+# Automation library installed at image-build time,
+# defining $AUTOMATION_LIB_PATH in this file.
+if [[ -r "/etc/automation_environment" ]]; then
+ source /etc/automation_environment
+fi
+# shellcheck disable=SC2154
+if [[ -n "$AUTOMATION_LIB_PATH" ]]; then
+ # shellcheck source=/usr/share/automation/lib/common_lib.sh
+ source $AUTOMATION_LIB_PATH/common_lib.sh
+else
+ (
+ echo "WARNING: It does not appear that containers/automation was installed."
+ echo " Functionality of most of this library will be negatively impacted"
+ echo " This ${BASH_SOURCE[0]} was loaded by ${BASH_SOURCE[1]}"
+ ) > /dev/stderr
+fi
+
+# Required for proper GPG functioning under automation
+GPG_TTY="${GPG_TTY:-/dev/null}"
+
+# Essential default paths, many are overridden when executing under Cirrus-CI
+# others are duplicated here, to assist in debugging.
+GOPATH="${GOPATH:-/var/tmp/go}"
+if type -P go &> /dev/null
+then
+ # required for go 1.12+
+ GOCACHE="${GOCACHE:-$HOME/.cache/go-build}"
+ eval "$(go env)"
+ # Ensure compiled tooling is reachable
+ PATH="$PATH:$GOPATH/bin"
+fi
+CIRRUS_WORKING_DIR="${CIRRUS_WORKING_DIR:-$GOPATH/src/github.com/containers/buildah}"
+GOSRC="${GOSRC:-$CIRRUS_WORKING_DIR}"
+PATH="$GOSRC/tests/tools/build:$HOME/bin:$GOPATH/bin:/usr/local/bin:/usr/lib/cri-o-runc/sbin:$PATH"
+SCRIPT_BASE=${SCRIPT_BASE:-./contrib/cirrus}
+
+cd $GOSRC
+if type -P git &> /dev/null
+then
+ CIRRUS_CHANGE_IN_REPO=${CIRRUS_CHANGE_IN_REPO:-$(git show-ref --hash=8 HEAD || date +%s)}
+else # pick something unique and obviously not from Cirrus
+ CIRRUS_CHANGE_IN_REPO=${CIRRUS_CHANGE_IN_REPO:-unknown$(date +%s)}
+fi
+
+export CI="${CI:-false}"
+CIRRUS_CI="${CIRRUS_CI:-false}"
+CONTINUOUS_INTEGRATION="${CONTINUOUS_INTEGRATION:-false}"
+CIRRUS_REPO_NAME=${CIRRUS_REPO_NAME:-buildah}
+CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-unknown$(date +%d)} # difficult to reliably discover
+CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-unknown$(date +%s)} # must be short and unique enough
+CIRRUS_TASK_ID=${CIRRUS_BUILD_ID:-unknown$(date +%d)} # to prevent state thrashing when
+ # debugging with `hack/get_ci_vm.sh`
+# Regex defining all CI-related env. vars. necessary for all possible
+# testing operations on all platforms and versions. This is necessary
+# to avoid needlessly passing through global/system values across
+# contexts, such as host->container or root->rootless user
+#
+# List of envariables which must be EXACT matches
+# N/B: Don't include BUILDAH_ISOLATION, STORAGE_DRIVER, or CGROUP_MANAGER
+# here because they will negatively affect execution of the rootless
+# integration tests.
+PASSTHROUGH_ENV_EXACT='DEST_BRANCH|DISTRO_NV|GOPATH|GOSRC|ROOTLESS_USER|SCRIPT_BASE|IN_PODMAN_IMAGE'
+
+# List of envariable patterns which must match AT THE BEGINNING of the name.
+PASSTHROUGH_ENV_ATSTART='CI|TEST'
+
+# List of envariable patterns which can match ANYWHERE in the name
+PASSTHROUGH_ENV_ANYWHERE='_NAME|_FQIN'
+
+# Combine into one
+PASSTHROUGH_ENV_RE="(^($PASSTHROUGH_ENV_EXACT)\$)|(^($PASSTHROUGH_ENV_ATSTART))|($PASSTHROUGH_ENV_ANYWHERE)"
+
+# Unsafe env. vars for display
+SECRET_ENV_RE='ACCOUNT|GC[EP]..|SSH|PASSWORD|SECRET|TOKEN'
+
+# FQINs needed for testing
+REGISTRY_FQIN=${REGISTRY_FQIN:-docker.io/library/registry}
+ALPINE_FQIN=${ALPINE_FQIN:-docker.io/library/alpine}
+
+# for in-container testing
+IN_PODMAN_NAME="in_podman_$CIRRUS_TASK_ID"
+IN_PODMAN="${IN_PODMAN:-false}"
+
+# rootless_user
+ROOTLESS_USER="rootlessuser"
+
+# Downloaded, but not installed packages.
+PACKAGE_DOWNLOAD_DIR=/var/cache/download
+
+lilto() { err_retry 8 1000 "" "$@"; } # just over 4 minutes max
+bigto() { err_retry 7 5670 "" "$@"; } # 12 minutes max
+
+# Working with apt under automation is a PITA, make it easy
+# Avoid some ways of getting stuck waiting for user input
+export DEBIAN_FRONTEND=noninteractive
+# Short-cut for frequently used base command
+export APTGET='apt-get -qq --yes'
+# Short timeout for quick-running packaging command
+SHORT_APTGET="lilto $APTGET"
+SHORT_DNFY="lilto dnf -y"
+# Longer timeout for long-running packaging command
+LONG_APTGET="bigto $APTGET"
+LONG_DNFY="bigto dnf -y"
+
+# Allow easy substitution for debugging if needed
+CONTAINER_RUNTIME="showrun ${CONTAINER_RUNTIME:-podman}"
+
+# END Global export of all variables
+set +a
+
+bad_os_id_ver() {
+ die "Unknown/Unsupported distro. $OS_RELEASE_ID and/or version $OS_RELEASE_VER for $(basename $0)"
+}
+
+# Remove all files provided by the distro version of buildah.
+# All VM cache-images used for testing include the distro buildah because it
+# simplifies installing necessary dependencies which can change over time.
+# For general CI testing however, calling this function makes sure the system
+# can only run the compiled source version.
+remove_packaged_buildah_files() {
+ warn "Removing packaged buildah files to prevent conflicts with source build and testing."
+ req_env_vars OS_RELEASE_ID
+
+ if [[ "$OS_RELEASE_ID" =~ "debian" ]]
+ then
+ LISTING_CMD="dpkg-query -L buildah"
+ else
+ LISTING_CMD='rpm -ql buildah'
+ fi
+
+ # yum/dnf/dpkg may list system directories, only remove files
+ $LISTING_CMD | while read fullpath
+ do
+ # Sub-directories may contain unrelated/valuable stuff
+ if [[ -d "$fullpath" ]]; then continue; fi
+
+ rm -vf "$fullpath"
+ done
+
+ if [[ -z "$CONTAINER" ]]; then
+ # Be super extra sure and careful vs performant and completely safe
+ sync && echo 3 > /proc/sys/vm/drop_caches
+ fi
+}
+
+# Return a list of environment variables that should be passed through
+# to lower levels (tests in containers, or via ssh to rootless).
+# We return the variable names only, not their values. It is up to our
+# caller to reference values.
+passthrough_envars(){
+ warn "Will pass env. vars. matching the following regex:
+ $PASSTHROUGH_ENV_RE"
+ compgen -A variable | \
+ grep -Ev "$SECRET_ENV_RE" | \
+ grep -Ev "^PASSTHROUGH_" | \
+ grep -E "$PASSTHROUGH_ENV_RE"
+}
+
+in_podman() {
+ req_env_vars IN_PODMAN_NAME GOSRC GOPATH SECRET_ENV_RE HOME
+ [[ -n "$@" ]] || \
+ die "Must specify FQIN and command with arguments to execute"
+
+ # Line-separated arguments which include shell-escaped special characters
+ declare -a envargs
+ while read -r var; do
+ # Pass "-e VAR" on the command line, not "-e VAR=value". Podman can
+ # do a much better job of transmitting the value than we can,
+ # especially when value includes spaces.
+ envargs+=("-e" "$var")
+ done <<<"$(passthrough_envars)"
+
+ showrun podman run -i --name="$IN_PODMAN_NAME" \
+ --net="container:registry" \
+ --privileged \
+ --cgroupns=host \
+ "${envargs[@]}" \
+ -e BUILDAH_ISOLATION \
+ -e STORAGE_DRIVER \
+ -e "IN_PODMAN=false" \
+ -e "CONTAINER=podman" \
+ -e "CGROUP_MANAGER=cgroupfs" \
+ -v "$HOME/auth:$HOME/auth:ro" \
+ -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /dev/fuse:/dev/fuse:rw \
+ -v "$GOSRC:$GOSRC:z" \
+ --workdir "$GOSRC" \
+ "$@"
+}
+
+verify_local_registry(){
+ # On the unexpected/rare chance of a name-clash
+ local CUSTOM_FQIN=localhost:5000/my-alpine-$RANDOM
+ echo "Verifying local 'registry' container is operational"
+ showrun podman version
+ showrun podman info
+ showrun podman ps --all
+ showrun podman images
+ showrun ls -alF $HOME/auth
+ showrun podman pull $ALPINE_FQIN
+ showrun podman login --tls-verify=false localhost:5000 --username testuser --password testpassword
+ showrun podman tag $ALPINE_FQIN $CUSTOM_FQIN
+ showrun podman push --tls-verify=false --creds=testuser:testpassword $CUSTOM_FQIN
+ showrun podman ps --all
+ showrun podman images
+ showrun podman rmi $ALPINE_FQIN
+ showrun podman rmi $CUSTOM_FQIN
+ showrun podman pull --tls-verify=false --creds=testuser:testpassword $CUSTOM_FQIN
+ showrun podman ps --all
+ showrun podman images
+ echo "Success, local registry is working, cleaning up."
+ showrun podman rmi $CUSTOM_FQIN
+}
+
+execute_local_registry() {
+ if nc -4 -z 127.0.0.1 5000
+ then
+ warn "Found listener on localhost:5000, NOT starting up local registry server."
+ verify_local_registry
+ return 0
+ fi
+ req_env_vars CONTAINER_RUNTIME GOSRC
+ local authdirpath=$HOME/auth
+ cd $GOSRC
+
+ echo "Creating a self signed certificate and get it in the right places"
+ mkdir -p $authdirpath
+ openssl req \
+ -newkey rsa:4096 -nodes -sha256 -x509 -days 2 \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
+ -addext subjectAltName=DNS:localhost \
+ -keyout $authdirpath/domain.key \
+ -out $authdirpath/domain.crt
+
+ cp $authdirpath/domain.crt $authdirpath/domain.cert
+
+ echo "Creating http credentials file"
+ showrun htpasswd -Bbn testuser testpassword > $authdirpath/htpasswd
+
+ echo "Starting up the local 'registry' container"
+ showrun podman run -d -p 5000:5000 --name registry \
+ -v $authdirpath:$authdirpath:Z \
+ -e "REGISTRY_AUTH=htpasswd" \
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
+ -e REGISTRY_AUTH_HTPASSWD_PATH=$authdirpath/htpasswd \
+ -e REGISTRY_HTTP_TLS_CERTIFICATE=$authdirpath/domain.crt \
+ -e REGISTRY_HTTP_TLS_KEY=$authdirpath/domain.key \
+ $REGISTRY_FQIN
+
+ verify_local_registry
+}
+
+setup_rootless() {
+ req_env_vars GOPATH GOSRC SECRET_ENV_RE
+
+ local rootless_uid
+ local rootless_gid
+ local env_var_val
+ local akfilepath
+ local sshcmd
+
+ # Only do this once; established by setup_environment.sh
+ # shellcheck disable=SC2154
+ if passwd --status $ROOTLESS_USER
+ then
+ if [[ $PRIV_NAME = "rootless" ]]; then
+ msg "Updating $ROOTLESS_USER user permissions on possibly changed libpod code"
+ chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
+ return 0
+ fi
+ fi
+ msg "************************************************************"
+ msg "Setting up rootless user '$ROOTLESS_USER'"
+ msg "************************************************************"
+ cd $GOSRC || exit 1
+ # Guarantee independence from specific values
+ rootless_uid=$[RANDOM+1000]
+ rootless_gid=$[RANDOM+1000]
+ msg "creating $rootless_uid:$rootless_gid $ROOTLESS_USER user"
+ groupadd -g $rootless_gid $ROOTLESS_USER
+ useradd -g $rootless_gid -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
+
+ # We also set up rootless user for image-scp tests (running as root)
+ if [[ $PRIV_NAME = "rootless" ]]; then
+ chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
+ fi
+ echo "$ROOTLESS_USER ALL=(root) NOPASSWD: ALL" > /etc/sudoers.d/ci-rootless
+
+ mkdir -p "$HOME/.ssh" "/home/$ROOTLESS_USER/.ssh"
+
+ msg "Creating ssh key pairs"
+ [[ -r "$HOME/.ssh/id_rsa" ]] || \
+ ssh-keygen -t rsa -P "" -f "$HOME/.ssh/id_rsa"
+ ssh-keygen -t ed25519 -P "" -f "/home/$ROOTLESS_USER/.ssh/id_ed25519"
+ ssh-keygen -t rsa -P "" -f "/home/$ROOTLESS_USER/.ssh/id_rsa"
+
+ msg "Setup authorized_keys"
+ cat $HOME/.ssh/*.pub /home/$ROOTLESS_USER/.ssh/*.pub >> $HOME/.ssh/authorized_keys
+ cat $HOME/.ssh/*.pub /home/$ROOTLESS_USER/.ssh/*.pub >> /home/$ROOTLESS_USER/.ssh/authorized_keys
+
+ msg "Ensure the ssh daemon is up and running within 5 minutes"
+ systemctl start sshd
+ lilto systemctl is-active sshd
+
+ msg "Configure ssh file permissions"
+ chmod -R 700 "$HOME/.ssh"
+ chmod -R 700 "/home/$ROOTLESS_USER/.ssh"
+ chown -R $ROOTLESS_USER:$ROOTLESS_USER "/home/$ROOTLESS_USER/.ssh"
+
+ msg " setup known_hosts for $USER"
+ ssh-keyscan localhost > /root/.ssh/known_hosts
+
+ msg " setup known_hosts for $ROOTLESS_USER"
+ install -Z -m 700 -o $ROOTLESS_USER -g $ROOTLESS_USER \
+ /root/.ssh/known_hosts /home/$ROOTLESS_USER/.ssh/known_hosts
+
+ msg "Setting up pass-through env. vars for $ROOTLESS_USER"
+ while read -r env_var; do
+ # N/B: Some values contain spaces and other potential nasty-bits
+ # (i.e. $CIRRUS_COMMIT_MESSAGE). The %q conversion ensures proper
+ # bash-style escaping.
+ printf -- "export %s=%q\n" "${env_var}" "${!env_var}" | tee -a /home/$ROOTLESS_USER/ci_environment
+ done <<<"$(passthrough_envars)"
+}
diff --git a/contrib/cirrus/logcollector.sh b/contrib/cirrus/logcollector.sh
new file mode 100755
index 0000000..419b16e
--- /dev/null
+++ b/contrib/cirrus/logcollector.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+set -e
+
+source $(dirname $0)/lib.sh
+
+req_env_vars CI GOSRC OS_RELEASE_ID
+
+case $1 in
+ audit)
+ case $OS_RELEASE_ID in
+ debian) showrun cat /var/log/kern.log ;;
+ fedora) showrun cat /var/log/audit/audit.log ;;
+ *) bad_os_id_ver ;;
+ esac
+ ;;
+ df) showrun df -lhTx tmpfs ;;
+ journal) showrun journalctl -b ;;
+ podman) showrun podman system info ;;
+ buildah_version) showrun $GOSRC/bin/buildah version;;
+ buildah_info) showrun $GOSRC/bin/buildah info;;
+ golang) showrun go version;;
+ packages)
+ # These names are common to Fedora and Debian
+ PKG_NAMES=(\
+ buildah
+ conmon
+ container-selinux
+ containernetworking-plugins
+ containers-common
+ crun
+ cri-o-runc
+ libseccomp
+ libseccomp2
+ podman
+ runc
+ skopeo
+ slirp4netns
+ )
+ case $OS_RELEASE_ID in
+ fedora*)
+ if [[ "$OS_RELEASE_VER" -ge 36 ]]; then
+ PKG_NAMES+=(aardvark-dns netavark)
+ fi
+ PKG_LST_CMD='rpm -q --qf=%{N}-%{V}-%{R}-%{ARCH}\n'
+ ;;
+ debian*)
+ PKG_LST_CMD='dpkg-query --show --showformat=${Package}-${Version}-${Architecture}\n'
+ ;;
+ *) bad_os_id_ver ;;
+ esac
+ # Any not-present packages will be listed as such
+ $PKG_LST_CMD ${PKG_NAMES[@]} | sort -u
+ ;;
+ *) die "Warning, $(basename $0) doesn't know how to handle the parameter '$1'"
+esac
diff --git a/contrib/cirrus/setup.sh b/contrib/cirrus/setup.sh
new file mode 100755
index 0000000..4044196
--- /dev/null
+++ b/contrib/cirrus/setup.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+
+set -e
+
+# N/B: In most (but not all) cases, these packages will already be installed
+# in the VM image at build-time (from libpod repo.). Running package install
+# again here, ensures that all cases are covered, and there is never any
+# expectation mismatch.
+source $(dirname $0)/lib.sh
+
+req_env_vars OS_RELEASE_ID OS_RELEASE_VER GOSRC IN_PODMAN_IMAGE CIRRUS_CHANGE_TITLE
+
+msg "Disabling git repository owner-check system-wide."
+# Newer versions of git bark if repo. files are unexpectedly owned.
+# This mainly affects rootless and containerized testing. But
+# the testing environment is disposable, so we don't care.=
+git config --system --add safe.directory $GOSRC
+
+# Support optional/draft testing using latest/greatest
+# podman-next COPR packages. This requires a draft PR
+# to ensure changes also pass CI w/o package updates.
+if [[ "$OS_RELEASE_ID" =~ "fedora" ]] && \
+ [[ "$CIRRUS_CHANGE_TITLE" =~ CI:NEXT ]]
+then
+ # shellcheck disable=SC2154
+ if [[ "$CIRRUS_PR_DRAFT" != "true" ]]; then
+ die "Magic 'CI:NEXT' string can only be used on DRAFT PRs"
+ fi
+
+ showrun dnf copr enable rhcontainerbot/podman-next -y
+ showrun dnf upgrade -y
+fi
+
+msg "Setting up $OS_RELEASE_ID $OS_RELEASE_VER"
+cd $GOSRC
+case "$OS_RELEASE_ID" in
+ fedora)
+ warn "Hard-coding podman to use crun"
+ cat > /etc/containers/containers.conf <<EOF
+[engine]
+runtime="crun"
+EOF
+
+ # Executing tests in a container requires SELinux boolean set on the host
+ if [[ "$IN_PODMAN" == "true" ]]
+ then
+ showrun setsebool -P container_manage_cgroup true
+ fi
+ ;;
+ debian)
+ if [[ "$1" == "conformance" ]]; then
+ msg "Installing previously downloaded/cached Docker packages"
+ dpkg -i \
+ $PACKAGE_DOWNLOAD_DIR/containerd.io*.deb \
+ $PACKAGE_DOWNLOAD_DIR/docker-ce*.deb
+ fi
+ ;;
+ *)
+ bad_os_id_ver
+ ;;
+esac
+
+# Required to be defined by caller: Are we testing as root or a regular user
+case "$PRIV_NAME" in
+ root)
+ if [[ "$TEST_FLAVOR" = "sys" ]]; then
+ # Used in local image-scp testing
+ setup_rootless
+ fi
+ ;;
+ rootless)
+ # load kernel modules since the rootless user has no permission to do so
+ modprobe ip6_tables || :
+ modprobe ip6table_nat || :
+ setup_rootless
+ ;;
+ *) die_unknown PRIV_NAME
+esac
+
+# Previously, golang was not installed
+source $(dirname $0)/lib.sh
+
+echo "Configuring /etc/containers/registries.conf"
+mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'registry.fedoraproject.org', 'quay.io']" | tee /etc/containers/registries.conf
+
+show_env_vars
+
+if [[ -z "$CONTAINER" ]]; then
+ # Discovered reemergence of BFQ scheduler bug in kernel 5.8.12-200
+ # which causes a kernel panic when system is under heavy I/O load.
+ # Previously discovered in F32beta and confirmed fixed. It's been
+ # observed in F31 kernels as well. Deploy workaround for all VMs
+ # to ensure a more stable I/O scheduler (elevator).
+ echo "mq-deadline" > /sys/block/sda/queue/scheduler
+ warn "I/O scheduler: $(cat /sys/block/sda/queue/scheduler)"
+fi
+
+execute_local_registry # checks for existing port 5000 listener
+
+if [[ "$IN_PODMAN" == "true" ]]
+then
+ req_env_vars IN_PODMAN_IMAGE IN_PODMAN_NAME
+ echo "Setting up image to use for \$IN_PODMAN=true testing"
+ cd $GOSRC
+ in_podman $IN_PODMAN_IMAGE $0
+ showrun podman commit $IN_PODMAN_NAME $IN_PODMAN_NAME
+ showrun podman rm -f $IN_PODMAN_NAME
+fi
diff --git a/contrib/cirrus/test.sh b/contrib/cirrus/test.sh
new file mode 100755
index 0000000..412bfe0
--- /dev/null
+++ b/contrib/cirrus/test.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+
+set -e
+
+source $(dirname $0)/lib.sh
+
+req_env_vars IN_PODMAN IN_PODMAN_NAME GOSRC 1
+
+# shellcheck disable=SC2154
+if [[ "$PRIV_NAME" == "rootless" ]] && [[ "$UID" -eq 0 ]]; then
+ # Remove /var/lib/cni, it is not required for rootless cni.
+ # We have to test that it works without this directory.
+ # https://github.com/containers/podman/issues/10857
+ rm -rf /var/lib/cni
+
+ # change permission of go src and cache directory
+ # so rootless user can access it
+ chown -R $ROOTLESS_USER:root /var/tmp/go
+ chmod -R g+rwx /var/tmp/go
+
+ req_env_vars ROOTLESS_USER
+ msg "Re-executing test through ssh as user '$ROOTLESS_USER'"
+ msg "************************************************************"
+ set -x
+ exec ssh $ROOTLESS_USER@localhost \
+ -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ -o CheckHostIP=no $GOSRC/$SCRIPT_BASE/test.sh $1
+ # Does not return!
+elif [[ "$UID" -ne 0 ]]; then
+ # Load important env. vars written during setup.sh (run as root)
+ # call to setup_rootless()
+ source /home/$ROOTLESS_USER/ci_environment
+fi
+# else: not running rootless, do nothing special
+
+msg "Test-time env. var. definitions (filtered):"
+show_env_vars
+
+if [[ "$IN_PODMAN" == "true" ]]
+then
+ cd $GOSRC
+ # Host build environment != container environment
+ showrun make clean
+ in_podman --rm $IN_PODMAN_NAME:latest $0 $1
+else
+ cd $GOSRC
+
+ showrun make
+ showrun make install.tools
+
+ case $1 in
+ validate)
+ showrun ooe.sh git remote add upstream "$CIRRUS_REPO_CLONE_URL"
+ showrun ooe.sh git remote update
+ if [[ -n "$CIRRUS_PR" ]]; then
+ echo "Validating a PR"
+ export GITVALIDATE_EPOCH="$CIRRUS_BASE_SHA"
+ elif [[ -n "$CIRRUS_TAG" ]]; then
+ echo "Refusing to validating a Tag"
+ return 0
+ else
+ echo "Validating a Branch"
+ export GITVALIDATE_EPOCH="$CIRRUS_LAST_GREEN_CHANGE"
+ fi
+ echo "Linting & Validating from ${GITVALIDATE_EPOCH:-default EPOCH}"
+ showrun make lint LINTFLAGS="--deadline=20m --color=always -j1"
+ showrun make validate
+ ;;
+ unit)
+ showrun make test-unit
+ ;;
+ conformance)
+ # Typically it's undesirable to install packages at runtime.
+ # This test compares images built with the "latest" version
+ # of docker, against images built with buildah. Runtime installs
+ # are required to ensure the latest docker version is used.
+ [[ "$OS_RELEASE_ID" == "debian" ]] || \
+ bad_os_id_ver
+
+ systemctl enable --now docker
+ showrun make test-conformance
+ ;;
+ integration)
+ showrun make test-integration
+ ;;
+ *)
+ die "First parameter to $(basename $0) not supported: '$1'"
+ ;;
+ esac
+fi
diff --git a/contrib/cirrus/timestamp.awk b/contrib/cirrus/timestamp.awk
new file mode 100644
index 0000000..95b312e
--- /dev/null
+++ b/contrib/cirrus/timestamp.awk
@@ -0,0 +1,20 @@
+
+
+# This script is intended to be piped into by automation, in order to
+# mark output lines with timing information. For example:
+# /path/to/command |& awk --file timestamp.awk
+
+BEGIN {
+ STARTTIME=systime()
+ printf "[%s] START", strftime("%T")
+ printf " - All [+xxxx] lines that follow are relative to right now.\n"
+}
+
+{
+ printf "[%+05ds] %s\n", systime()-STARTTIME, $0
+}
+
+END {
+ printf "[%s] END", strftime("%T")
+ printf " - [%+05ds] total duration since START\n", systime()-STARTTIME
+}
diff --git a/contrib/completions/bash/buildah b/contrib/completions/bash/buildah
new file mode 100644
index 0000000..ad86176
--- /dev/null
+++ b/contrib/completions/bash/buildah
@@ -0,0 +1,1257 @@
+# bash completion file for buildah command
+#
+# This script provides completion of:
+# - commands and their options
+# - filepaths
+#
+# To enable the completions either:
+# - place this file in /usr/share/bash-completion/completions
+# or
+# - copy this file to e.g. ~/.buildah-completion.sh and add the line
+# below to your .bashrc after bash completion features are loaded
+# . ~/.buildah-completion.sh
+#
+# Configuration:
+#
+
+# __buildah_to_alternatives transforms a multiline list of strings into a single line
+# string with the words separated by `|`.
+# This is used to prepare arguments to __buildah_pos_first_nonflag().
+__buildah_to_alternatives() {
+ local parts=( $1 )
+ local IFS='|'
+ echo "${parts[*]}"
+}
+
+# __buildah_to_extglob transforms a multiline list of options into an extglob pattern
+# suitable for use in case statements.
+__buildah_to_extglob() {
+ local extglob=$( __buildah_to_alternatives "$1" )
+ echo "@($extglob)"
+}
+
+# __buildah_pos_first_nonflag finds the position of the first word that is neither
+# option nor an option's argument. If there are options that require arguments,
+# you should pass a glob describing those options, e.g. "--option1|-o|--option2"
+# Use this function to restrict completions to exact positions after the argument list.
+__buildah_pos_first_nonflag() {
+ local argument_flags=$1
+
+ local counter=$((${subcommand_pos:-${command_pos}} + 1))
+ while [ $counter -le $cword ]; do
+ if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
+ (( counter++ ))
+ # eat "=" in case of --option=arg syntax
+ [ "${words[$counter]}" = "=" ] && (( counter++ ))
+ else
+ case "${words[$counter]}" in
+ -*)
+ ;;
+ *)
+ break
+ ;;
+ esac
+ fi
+
+ # Bash splits words at "=", retaining "=" as a word, examples:
+ # "--log-level=error" => 3 words, "--log-opt syslog-facility=daemon" => 4 words
+ while [ "${words[$counter + 1]}" = "=" ] ; do
+ counter=$(( counter + 2))
+ done
+
+ (( counter++ ))
+ done
+
+ echo $counter
+}
+
+# Note for developers:
+# Please arrange options sorted alphabetically by long name with the short
+# options immediately following their corresponding long form.
+# This order should be applied to lists, alternatives and code blocks.
+
+__buildah_previous_extglob_setting=$(shopt -p extglob)
+shopt -s extglob
+
+# __buildah_list_mounted
+__buildah_list_mounted() {
+ COMPREPLY=($(compgen -W "$(buildah mount | awk '{print $1}')" -- $cur))
+}
+
+__buildah_list_containers() {
+ COMPREPLY=($(compgen -W "$(buildah containers --format '{{.ContainerName}} {{.ContainerID}}' )" -- $cur))
+}
+__buildah_list_images() {
+ COMPREPLY=($(compgen -W "$(buildah images --format '{{.ID}} {{.Name}}' )" -- $cur))
+}
+__buildah_list_images_scratch() {
+ COMPREPLY=($(compgen -W "$(buildah images --format '{{.ID}} {{.Name}}' ) scratch" -- $cur))
+}
+__buildah_list_containers_images() {
+ COMPREPLY=($(compgen -W "$(buildah containers --format '{{.ContainerName}} {{.ContainerID}}') $(buildah images --format '{{.ID}} {{.Name}}')" -- $cur))
+}
+
+__buildah_pos_first_nonflag() {
+ local argument_flags=$1
+
+ local counter=$((${subcommand_pos:-${command_pos}} + 1))
+ while [ $counter -le $cword ]; do
+ if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
+ ((counter++))
+ else
+ case "${words[$counter]}" in
+ -*) ;;
+ *)
+ break
+ ;;
+ esac
+ fi
+ ((counter++))
+ done
+
+ echo $counter
+}
+
+# Transforms a multiline list of strings into a single line string
+# with the words separated by "|".
+# This is used to prepare arguments to __buildah_pos_first_nonflag().
+__buildah_to_alternatives() {
+ local parts=($1)
+ local IFS='|'
+ echo "${parts[*]}"
+}
+
+# Transforms a multiline list of options into an extglob pattern
+# suitable for use in case statements.
+__buildah_to_extglob() {
+ local extglob=$(__buildah_to_alternatives "$1")
+ echo "@($extglob)"
+}
+
+# Subcommand processing.
+# Locates the first occurrence of any of the subcommands contained in the
+# first argument. In case of a match, calls the corresponding completion
+# function and returns 0.
+# If no match is found, 1 is returned. The calling function can then
+# continue processing its completion.
+#
+# TODO if the preceding command has options that accept arguments and an
+# argument is equal or one of the subcommands, this is falsely detected as
+# a match.
+__buildah_subcommands() {
+ local subcommands="$1"
+
+ local counter=$(($command_pos + 1))
+ while [ $counter -lt $cword ]; do
+ case "${words[$counter]}" in
+ $(__buildah_to_extglob "$subcommands") )
+subcommand_pos=$counter
+local subcommand=${words[$counter]}
+local completions_func=_buildah_${command}_${subcommand}
+declare -F $completions_func >/dev/null && $completions_func
+return 0
+;;
+esac
+(( counter++ ))
+done
+return 1
+}
+
+ # suppress trailing whitespace
+ __buildah_nospace() {
+ # compopt is not available in ancient bash versions
+ type compopt &>/dev/null && compopt -o nospace
+ }
+
+
+ # global options that may appear after the buildah command
+ _buildah_buildah() {
+ local boolean_options="
+ --help -h
+ --version -v
+ "
+ local options_with_args="
+ --cgroup-manager
+ --registries-conf
+ --registries-conf-dir
+ --root
+ --runroot
+ --storage-driver
+ --storage-opt
+ --userns-uid-map
+ --userns-gid-map
+ "
+
+ case "$prev" in
+ --root | --runroot)
+ case "$cur" in
+ *:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
+ '')
+ COMPREPLY=($(compgen -W '/' -- "$cur"))
+ __buildah_nospace
+ ;;
+ *)
+ _filedir
+ __buildah_nospace
+ ;;
+ esac
+ return
+ ;;
+ --storage-driver)
+ COMPREPLY=($(compgen -W 'overlay' 'vfs' -- "$cur"))
+ return
+ ;;
+ --cgroup-manager)
+ COMPREPLY=($(compgen -W 'cgroupfs systemd' -- "$cur"))
+ return
+ ;;
+ $(__buildah_to_extglob "$options_with_args"))
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ local counter=$(__buildah_pos_first_nonflag $(__buildah_to_extglob "$options_with_args"))
+ if [ $cword -eq $counter ]; then
+ COMPREPLY=($(compgen -W "${commands[*]} help" -- "$cur"))
+ fi
+ ;;
+ esac
+}
+
+ _buildah_rmi() {
+ local boolean_options="
+ --all
+ -a
+ --prune
+ -p
+ --force
+ -f
+ --help
+ -h
+ "
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_images
+ ;;
+ esac
+ }
+
+ _buildah_rm() {
+ local boolean_options="
+ --all
+ -a
+ --help
+ -h
+ "
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers
+ ;;
+ esac
+ }
+
+ _buildah_help() {
+ local counter=$(__buildah_pos_first_nonflag)
+ if [ $cword -eq $counter ]; then
+ COMPREPLY=($(compgen -W "${commands[*]}" -- "$cur"))
+ fi
+ }
+
+ _buildah_config() {
+ local boolean_options="
+ --add-history
+ --help
+ -h
+ "
+
+ local options_with_args="
+ --annotation
+ -a
+ --arch
+ --author
+ --cmd
+ --comment
+ --created-by
+ --domainname
+ --entrypoint
+ --env
+ -e
+ --healthcheck
+ --healthcheck-interval
+ --healthcheck-retries
+ --healthcheck-start-period
+ --healthcheck-timeout
+ --history-comment
+ --hostname
+ --label
+ -l
+ --onbuild
+ --os
+ --port
+ -p
+ --shell
+ --stop-signal
+ --user
+ -u
+ --variant
+ --volume
+ -v
+ --workingdir
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers
+ ;;
+ esac
+ }
+
+ _buildah_commit() {
+ local boolean_options="
+ --help
+ -h
+ --disable-compression
+ -D
+ --manifest
+ --quiet
+ -q
+ --rm
+ --squash
+ --tls-verify
+ --omit-timestamp
+ "
+
+ local options_with_args="
+ --authfile
+ --cert-dir
+ --creds
+ --format
+ -f
+ --iidfile
+ --sign-by
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$prev" in
+ --signature-policy)
+ case "$cur" in
+ *:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
+ '')
+ COMPREPLY=($(compgen -W '/' -- "$cur"))
+ __buildah_nospace
+ ;;
+ *)
+ _filedir
+ __buildah_nospace
+ ;;
+ esac
+ return
+ ;;
+
+ $(__buildah_to_extglob "$options_with_args"))
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers
+ ;;
+ esac
+ }
+
+ _buildah_bud() {
+ local boolean_options="
+ --all-platforms
+ --help
+ -h
+ --layers
+ --no-cache
+ --omit-timestamp
+ --pull
+ --pull-always
+ --pull-never
+ --quiet
+ -q
+ --squash
+ --tls-verify
+ "
+
+ local options_with_args="
+ --arch
+ --add-host
+ --annotation
+ --authfile
+ --build-arg
+ --cap-add
+ --cap-drop
+ --cert-dir
+ --cgroup-parent
+ --cpu-period
+ --cpu-quota
+ --cpu-shares
+ --cpuset-cpus
+ --cpuset-mems
+ --creds
+ --decryption-key
+ --device
+ --dns-search
+ --dns
+ --dns-option
+ -f
+ --file
+ --format
+ --http-proxy
+ --ignorefile
+ --iidfile
+ --isolation
+ --ipc
+ --label
+ --manifest
+ -m
+ --memory
+ --memory-swap
+ --net
+ --network
+ --no-pivot
+ --os
+ --pid
+ --platform
+ --runtime
+ --runtime-flag
+ --security-opt
+ --shm-size
+ --sign-by
+ -t
+ --tag
+ --target
+ --ulimit
+ --userns
+ --userns-uid-map
+ --userns-gid-map
+ --userns-uid-map-user
+ --userns-gid-map-group
+ --uts
+ --variant
+ --volume
+ -v
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$prev" in
+ --runtime)
+ COMPREPLY=($(compgen -W 'runc runv' -- "$cur"))
+ ;;
+ $(__buildah_to_extglob "$options_with_args"))
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+}
+
+ _buildah_build_using_dockerfile() {
+ _buildah_bud "$@"
+}
+
+ _buildah_run() {
+ local boolean_options="
+ --add-history
+ --help
+ -t
+ --terminal
+ -h
+ "
+
+ local options_with_args="
+ --cap-add
+ --cap-drop
+ --hostname
+ --ipc
+ --isolation
+ --mount
+ --net
+ --network
+ --pid
+ --runtime
+ --runtime-flag
+ --user
+ --uts
+ --volume
+ -v
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$prev" in
+ --runtime)
+ COMPREPLY=($(compgen -W 'runc runv' -- "$cur"))
+ ;;
+ $(__buildah_to_extglob "$options_with_args"))
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers
+ ;;
+ esac
+}
+
+ _buildah_copy() {
+ local boolean_options="
+ --add-history
+ --help
+ -h
+ --quiet
+ -q
+ --tls-verify
+ --remove-signatures
+ "
+
+ local options_with_args="
+ --chown
+ --chmod
+ --contextdir
+ --ignorefile
+ --from
+ --authfile
+ --cert-dir
+ --creds
+ --decryption-key
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_add() {
+ local boolean_options="
+ --add-history
+ --help
+ -h
+ --quiet
+ -q
+ --tls-verify
+ --remove-signatures
+ "
+
+ local options_with_args="
+ --chown
+ --chmod
+ --contextdir
+ --ignorefile
+ --from
+ --authfile
+ --cert-dir
+ --creds
+ --decryption-key
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_unmount() {
+ _buildah_umount $@
+ }
+
+ _buildah_umount() {
+ local boolean_options="
+ --all
+ -a
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_mounted
+ ;;
+ esac
+ }
+
+ _buildah_pull() {
+ local boolean_options="
+ --all-tags
+ -a
+ --help
+ -h
+ --quiet
+ -q
+ --tls-verify
+ --remove-signatures
+ "
+
+ local options_with_args="
+ --authfile
+ --cert-dir
+ --creds
+ --decryption-key
+ --policy
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_push() {
+ local boolean_options="
+ --all
+ --help
+ -h
+ --disable-compression
+ -D
+ --quiet
+ -q
+ --rm
+ --tls-verify
+ --remove-signatures
+ "
+
+ local options_with_args="
+ --authfile
+ --cert-dir
+ --creds
+ --encrypt-layer
+ --encryption-key
+ --format
+ -f
+ --sign-by
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$prev" in
+ --signature-policy)
+ case "$cur" in
+ *:*) ;; # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
+ '')
+ COMPREPLY=($(compgen -W '/' -- "$cur"))
+ __buildah_nospace
+ ;;
+ *)
+ _filedir
+ __buildah_nospace
+ ;;
+ esac
+ return
+ ;;
+
+ $(__buildah_to_extglob "$options_with_args"))
+ return
+ ;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_images
+ ;;
+ esac
+ }
+
+ _buildah_logout() {
+ local boolean_options="
+ --help
+ -h
+ --all
+ -a
+ "
+
+ local options_with_args="
+ --authfile
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_login() {
+ local boolean_options="
+ --help
+ -h
+ --get-login
+ --tls-verify
+ "
+
+ local options_with_args="
+ --authfile
+ --cert-dir
+ --password string
+ -p
+ --password-stdin
+ --username
+ -u
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest() {
+ local boolean_options="
+ --help
+ -h
+ --all
+ "
+ subcommands="
+ add
+ annotate
+ create
+ inspect
+ push
+ remove
+ rm
+ "
+ __buildah_subcommands "$subcommands" && return
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options " -- "$cur"))
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
+ ;;
+ esac
+
+}
+ _buildah_manifest_add() {
+ local boolean_options="
+ --help
+ -h
+ --all
+ --tls-verify
+ "
+
+ local options_with_args="
+ --authfile
+ --annotation
+ --arch
+ --cert-dir
+ --creds
+ --features
+ --os
+ --os-features
+ --os-version
+ --variant
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_annotate() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ --annotation
+ --arch
+ --features
+ --os
+ --os-features
+ --os-version
+ --variant
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_create() {
+ local boolean_options="
+ --help
+ -h
+ --all
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_inspect() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_push() {
+ local boolean_options="
+ --help
+ -h
+ --all
+ --remove-signatures
+ --tls-verify
+ "
+
+ local options_with_args="
+ --authfile
+ --cert-dir
+ --creds
+ --digestfile
+ --format
+ -f
+ --rm
+ --sign-by
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_remove() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_manifest_rm() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_mount() {
+ local boolean_options="
+ --help
+ -h
+ --notruncate
+ "
+
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers
+ ;;
+ esac
+ }
+
+ _buildah_ps() {
+ _buildah_containers
+ }
+
+_buildah_list() {
+ _buildah_containers
+ }
+
+_buildah_ls() {
+ _buildah_containers
+ }
+
+_buildah_containers() {
+ local boolean_options="
+ --help
+ -h
+ --json
+ --quiet
+ -q
+ --noheading
+ -n
+ --notruncate
+ -a
+ --all
+ "
+
+ local options_with_args="
+ --filter
+ -f
+ --format
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_images() {
+ local boolean_options="
+ --all
+ -a
+ --digests
+ --help
+ -h
+ --history
+ --json
+ --quiet
+ -q
+ --noheading
+ -n
+ --no-trunc
+ --notruncate
+ "
+
+ local options_with_args="
+ --filter
+ -f
+ --format
+ "
+
+ local all_options="$options_with_args $boolean_options"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ esac
+ }
+
+ _buildah_info() {
+ local options_with_args="
+ --log-level
+ --D
+ --format
+ "
+
+ local all_options="$options_with_args"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$options_with_args" -- "$cur"))
+ ;;
+ esac
+}
+
+ _buildah_inspect() {
+ local options_with_args="
+ --format
+ -f
+ --type
+ -t
+ "
+
+ local all_options="$options_with_args"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_containers_images
+ ;;
+
+esac
+ }
+
+ _buildah_tag() {
+ local options_with_args="
+ "
+
+ local all_options="$options_with_args"
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_images
+ ;;
+ esac
+ }
+
+ _buildah_from() {
+ local boolean_options="
+ --help
+ -h
+ --pull
+ --pull-always
+ --pull-never
+ --quiet
+ -q
+ --tls-verify
+ "
+
+ local options_with_args="
+ --add-host
+ --arch
+ --authfile
+ --cap-add
+ --cap-drop
+ --cert-dir
+ --cgroup-parent
+ --cidfile
+ --cpu-period
+ --cpu-quota
+ --cpu-shares
+ --cpuset-cpus
+ --cpuset-mems
+ --creds
+ --device
+ --http-proxy
+ --ipc
+ --isolation
+ -m
+ --memory
+ --memory-swap
+ --name
+ --net
+ --network
+ --os
+ --pid
+ --platform
+ --security-opt
+ --shm-size
+ --ulimit
+ --userns
+ --userns-uid-map
+ --userns-gid-map
+ --userns-uid-map-user
+ --userns-gid-map-group
+ --uts
+ --variant
+ --volume
+ "
+
+
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ __buildah_list_images_scratch
+ ;;
+ esac
+ }
+
+ _buildah_unshare() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ --mount
+ "
+ }
+
+ _buildah_rename() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+ }
+
+ _buildah_version() {
+ local boolean_options="
+ --help
+ -h
+ "
+
+ local options_with_args="
+ "
+ }
+
+ _buildah() {
+ local previous_extglob_setting=$(shopt -p extglob)
+ shopt -s extglob
+
+ local commands=(
+ add
+ bud
+ build
+ commit
+ config
+ containers
+ copy
+ delete
+ from
+ images
+ info
+ inspect
+ list
+ ls
+ manifest
+ mount
+ pull
+ push
+ ps
+ rename
+ rm
+ rmi
+ run
+ tag
+ umount
+ unmount
+ unshare
+ version
+ )
+
+ COMPREPLY=()
+ local cur prev words cword
+ _get_comp_words_by_ref -n : cur prev words cword
+
+ local command='buildah' command_pos=0 subcommand_pos
+ local counter=1
+ while [ $counter -lt $cword ]; do
+ case "${words[$counter]}" in
+ $(__buildah_to_extglob "$global_options_with_args") )
+ (( counter++ ))
+ ;;
+ -*)
+ ;;
+ =)
+ (( counter++ ))
+ ;;
+ *)
+ command="${words[$counter]}"
+ command_pos=$counter
+ break
+ ;;
+ esac
+ (( counter++ ))
+done
+
+ local binary="${words[0]}"
+
+ local completions_func=_buildah_${command/-/_}
+ declare -F $completions_func >/dev/null && $completions_func
+
+ eval "$previous_extglob_setting"
+ return 0
+ }
+
+ eval "$__buildah_previous_extglob_setting"
+ unset __buildah_previous_extglob_setting
+
+ complete -F _buildah buildah
diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile
new file mode 100644
index 0000000..785bb22
--- /dev/null
+++ b/contrib/docker/Dockerfile
@@ -0,0 +1,9 @@
+FROM fedora
+RUN dnf -y update && dnf -y clean all
+RUN dnf -y install btrfs-progs-devel containers-common golang go-md2man gpgme-devel libassuan-devel libseccomp-devel make net-tools runc shadow-utils glibc-static libselinux-static libseccomp-static && dnf -y clean all
+COPY . /go/src/github.com/containers/buildah
+RUN env GOPATH=/go make -C /go/src/github.com/containers/buildah clean all install
+RUN sed -i -r -e 's,driver = ".*",driver = "vfs",g' /etc/containers/storage.conf
+ENV BUILDAH_ISOLATION chroot
+WORKDIR /root
+CMD /bin/bash
diff --git a/convertcw.go b/convertcw.go
new file mode 100644
index 0000000..85576f4
--- /dev/null
+++ b/convertcw.go
@@ -0,0 +1,217 @@
+package buildah
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal/mkcw"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// CWConvertImageOptions provides both required and optional bits of
+// configuration for CWConvertImage().
+type CWConvertImageOptions struct {
+ // Required parameters.
+ InputImage string
+
+ // If supplied, we'll tag the resulting image with the specified name.
+ Tag string
+ OutputImage types.ImageReference
+
+ // If supplied, we'll register the workload with this server.
+ // Practically necessary if DiskEncryptionPassphrase is not set, in
+ // which case we'll generate one and throw it away after.
+ AttestationURL string
+
+ // Used to measure the environment. If left unset (0), defaults will be applied.
+ CPUs int
+ Memory int
+
+ // Can be manually set. If left unset ("", false, nil), reasonable values will be used.
+ TeeType define.TeeType
+ IgnoreAttestationErrors bool
+ WorkloadID string
+ DiskEncryptionPassphrase string
+ Slop string
+ FirmwareLibrary string
+ BaseImage string
+ Logger *logrus.Logger
+
+ // Passed through to BuilderOptions. Most settings won't make
+ // sense to be made available here because we don't launch a process.
+ ContainerSuffix string
+ PullPolicy PullPolicy
+ BlobDirectory string
+ SignaturePolicyPath string
+ ReportWriter io.Writer
+ IDMappingOptions *IDMappingOptions
+ Format string
+ MaxPullRetries int
+ PullRetryDelay time.Duration
+ OciDecryptConfig *encconfig.DecryptConfig
+ MountLabel string
+}
+
+// CWConvertImage takes the rootfs and configuration from one image, generates a
+// LUKS-encrypted disk image that more or less includes them both, and puts the
+// result into a new container image.
+// Returns the new image's ID and digest on success, along with a canonical
+// reference for it if a repository name was specified.
+func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options CWConvertImageOptions) (string, reference.Canonical, digest.Digest, error) {
+ // Apply our defaults if some options aren't set.
+ logger := options.Logger
+ if logger == nil {
+ logger = logrus.StandardLogger()
+ }
+
+ // Now create the target working container, pulling the base image if
+ // there is one and it isn't present.
+ builderOptions := BuilderOptions{
+ FromImage: options.BaseImage,
+ SystemContext: systemContext,
+ Logger: logger,
+
+ ContainerSuffix: options.ContainerSuffix,
+ PullPolicy: options.PullPolicy,
+ BlobDirectory: options.BlobDirectory,
+ SignaturePolicyPath: options.SignaturePolicyPath,
+ ReportWriter: options.ReportWriter,
+ IDMappingOptions: options.IDMappingOptions,
+ Format: options.Format,
+ MaxPullRetries: options.MaxPullRetries,
+ PullRetryDelay: options.PullRetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
+ MountLabel: options.MountLabel,
+ }
+ target, err := NewBuilder(ctx, store, builderOptions)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("creating container from target image: %w", err)
+ }
+ defer func() {
+ if err := target.Delete(); err != nil {
+ logrus.Warnf("deleting target container: %v", err)
+ }
+ }()
+ targetDir, err := target.Mount("")
+ if err != nil {
+ return "", nil, "", fmt.Errorf("mounting target container: %w", err)
+ }
+ defer func() {
+ if err := target.Unmount(); err != nil {
+ logrus.Warnf("unmounting target container: %v", err)
+ }
+ }()
+
+ // Mount the source image, pulling it first if necessary.
+ builderOptions = BuilderOptions{
+ FromImage: options.InputImage,
+ SystemContext: systemContext,
+ Logger: logger,
+
+ ContainerSuffix: options.ContainerSuffix,
+ PullPolicy: options.PullPolicy,
+ BlobDirectory: options.BlobDirectory,
+ SignaturePolicyPath: options.SignaturePolicyPath,
+ ReportWriter: options.ReportWriter,
+ IDMappingOptions: options.IDMappingOptions,
+ Format: options.Format,
+ MaxPullRetries: options.MaxPullRetries,
+ PullRetryDelay: options.PullRetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
+ MountLabel: options.MountLabel,
+ }
+ source, err := NewBuilder(ctx, store, builderOptions)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("creating container from source image: %w", err)
+ }
+ defer func() {
+ if err := source.Delete(); err != nil {
+ logrus.Warnf("deleting source container: %v", err)
+ }
+ }()
+ sourceInfo := GetBuildInfo(source)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("retrieving info about source image: %w", err)
+ }
+ sourceImageID := sourceInfo.FromImageID
+ sourceSize, err := store.ImageSize(sourceImageID)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("computing size of source image: %w", err)
+ }
+ sourceDir, err := source.Mount("")
+ if err != nil {
+ return "", nil, "", fmt.Errorf("mounting source container: %w", err)
+ }
+ defer func() {
+ if err := source.Unmount(); err != nil {
+ logrus.Warnf("unmounting source container: %v", err)
+ }
+ }()
+
+ // Generate the image contents.
+ archiveOptions := mkcw.ArchiveOptions{
+ AttestationURL: options.AttestationURL,
+ CPUs: options.CPUs,
+ Memory: options.Memory,
+ TempDir: targetDir,
+ TeeType: options.TeeType,
+ IgnoreAttestationErrors: options.IgnoreAttestationErrors,
+ ImageSize: sourceSize,
+ WorkloadID: options.WorkloadID,
+ DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
+ Slop: options.Slop,
+ FirmwareLibrary: options.FirmwareLibrary,
+ Logger: logger,
+ }
+ rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions)
+ if err != nil {
+ return "", nil, "", fmt.Errorf("generating encrypted image content: %w", err)
+ }
+ if err = archive.Untar(rc, targetDir, &archive.TarOptions{}); err != nil {
+ if err = rc.Close(); err != nil {
+ logger.Warnf("cleaning up: %v", err)
+ }
+ return "", nil, "", fmt.Errorf("saving encrypted image content: %w", err)
+ }
+ if err = rc.Close(); err != nil {
+ return "", nil, "", fmt.Errorf("cleaning up: %w", err)
+ }
+
+ // Commit the image. Clear out most of the configuration (if there is any — we default
+ // to scratch as a base) so that an engine that doesn't or can't set up a TEE will just
+ // run the static entrypoint. The rest of the configuration which the runtime consults
+ // is in the .krun_config.json file in the encrypted filesystem.
+ logger.Log(logrus.DebugLevel, "committing disk image")
+ target.ClearAnnotations()
+ target.ClearEnv()
+ target.ClearLabels()
+ target.ClearOnBuild()
+ target.ClearPorts()
+ target.ClearVolumes()
+ target.SetCmd(nil)
+ target.SetCreatedBy(fmt.Sprintf(": convert %q for use with %q", sourceImageID, workloadConfig.Type))
+ target.SetDomainname("")
+ target.SetEntrypoint([]string{"/entrypoint"})
+ target.SetHealthcheck(nil)
+ target.SetHostname("")
+ target.SetMaintainer("")
+ target.SetShell(nil)
+ target.SetUser("")
+ target.SetWorkDir("")
+ commitOptions := CommitOptions{
+ SystemContext: systemContext,
+ }
+ if options.Tag != "" {
+ commitOptions.AdditionalTags = append(commitOptions.AdditionalTags, options.Tag)
+ }
+ return target.Commit(ctx, options.OutputImage, commitOptions)
+}
diff --git a/convertcw_test.go b/convertcw_test.go
new file mode 100644
index 0000000..7e52639
--- /dev/null
+++ b/convertcw_test.go
@@ -0,0 +1,163 @@
+package buildah
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ "github.com/containers/buildah/internal/mkcw"
+ mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// dummyAttestationHandler replies with a fixed response code to requests to
+// the right path, and caches passphrases indexed by workload ID
+type dummyAttestationHandler struct {
+ t *testing.T
+ status int
+ passphrases map[string]string
+ passphrasesLock sync.Mutex
+}
+
+func (d *dummyAttestationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ var body bytes.Buffer
+ if req.Body != nil {
+ if _, err := io.Copy(&body, req.Body); err != nil {
+ d.t.Logf("reading request body: %v", err)
+ return
+ }
+ req.Body.Close()
+ }
+ if req.URL != nil && req.URL.Path == "/kbs/v0/register_workload" {
+ var registrationRequest mkcwtypes.RegistrationRequest
+ // if we can't decode the client request, bail
+ if err := json.Unmarshal(body.Bytes(), &registrationRequest); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ // cache the passphrase
+ d.passphrasesLock.Lock()
+ if d.passphrases == nil {
+ d.passphrases = make(map[string]string)
+ }
+ d.passphrases[registrationRequest.WorkloadID] = registrationRequest.Passphrase
+ d.passphrasesLock.Unlock()
+ // return the predetermined status
+ status := d.status
+ if status == 0 {
+ status = http.StatusOK
+ }
+ rw.WriteHeader(status)
+ return
+ }
+ // no such handler
+ rw.WriteHeader(http.StatusInternalServerError)
+}
+
+func TestCWConvertImage(t *testing.T) {
+ ctx := context.TODO()
+ systemContext := &types.SystemContext{}
+ for _, status := range []int{http.StatusOK, http.StatusInternalServerError} {
+ for _, ignoreChainRetrievalErrors := range []bool{false, true} {
+ for _, ignoreAttestationErrors := range []bool{false, true} {
+ t.Run(fmt.Sprintf("status=%d,ignoreChainRetrievalErrors=%v,ignoreAttestationErrors=%v", status, ignoreChainRetrievalErrors, ignoreAttestationErrors), func(t *testing.T) {
+ // create a per-test Store object
+ storeOptions := storage.StoreOptions{
+ GraphRoot: t.TempDir(),
+ RunRoot: t.TempDir(),
+ GraphDriverName: "vfs",
+ }
+ store, err := storage.GetStore(storeOptions)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ if _, err := store.Shutdown(true); err != nil {
+ t.Logf("store.Shutdown(%q): %v", t.Name(), err)
+ }
+ })
+ // listen on a system-assigned port
+ listener, err := net.Listen("tcp", ":0")
+ require.NoError(t, err)
+ // keep track of our listener address
+ addr := listener.Addr()
+ // serve requests on that listener
+ handler := &dummyAttestationHandler{t: t, status: status}
+ server := http.Server{
+ Handler: handler,
+ }
+ go func() {
+ if err := server.Serve(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ t.Logf("serve: %v", err)
+ }
+ }()
+ // clean up at the end of this test
+ t.Cleanup(func() { assert.NoError(t, server.Close()) })
+ // convert an image
+ options := CWConvertImageOptions{
+ InputImage: "docker.io/library/busybox",
+ Tag: "localhost/busybox:encrypted",
+ AttestationURL: "http://" + addr.String(),
+ IgnoreAttestationErrors: ignoreAttestationErrors,
+ Slop: "16MB",
+ }
+ id, _, _, err := CWConvertImage(ctx, systemContext, store, options)
+ if status != http.StatusOK && !ignoreAttestationErrors {
+ assert.Error(t, err)
+ return
+ }
+ if ignoreChainRetrievalErrors && ignoreAttestationErrors {
+ assert.NoError(t, err)
+ }
+ if err != nil {
+ t.Skipf("%s: %v", t.Name(), err)
+ return
+ }
+ // mount the image
+ path, err := store.MountImage(id, nil, "")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ if _, err := store.UnmountImage(id, true); err != nil {
+ t.Logf("store.UnmountImage(%q): %v", t.Name(), err)
+ }
+ })
+ // check that the image's contents look like what we expect: disk
+ disk := filepath.Join(path, "disk.img")
+ require.FileExists(t, disk)
+ workloadConfig, err := mkcw.ReadWorkloadConfigFromImage(disk)
+ require.NoError(t, err)
+ handler.passphrasesLock.Lock()
+ decryptionPassphrase := handler.passphrases[workloadConfig.WorkloadID]
+ handler.passphrasesLock.Unlock()
+ err = mkcw.CheckLUKSPassphrase(disk, decryptionPassphrase)
+ assert.NoError(t, err)
+ // check that the image's contents look like what we expect: config file
+ config := filepath.Join(path, "krun-sev.json")
+ require.FileExists(t, config)
+ workloadConfigBytes, err := os.ReadFile(config)
+ require.NoError(t, err)
+ var workloadConfigTwo mkcwtypes.WorkloadConfig
+ err = json.Unmarshal(workloadConfigBytes, &workloadConfigTwo)
+ require.NoError(t, err)
+ assert.Equal(t, workloadConfig, workloadConfigTwo)
+ // check that the image's contents look like what we expect: an executable entry point
+ entrypoint := filepath.Join(path, "entrypoint")
+ require.FileExists(t, entrypoint)
+ st, err := os.Stat(entrypoint)
+ require.NoError(t, err)
+ assert.Equal(t, st.Mode().Type(), os.FileMode(0)) // regular file
+ })
+ }
+ }
+ }
+}
diff --git a/copier/copier.go b/copier/copier.go
new file mode 100644
index 0000000..babab38
--- /dev/null
+++ b/copier/copier.go
@@ -0,0 +1,1986 @@
+package copier
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net"
+ "os"
+ "os/user"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/fileutils"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ copierCommand = "buildah-copier"
+ maxLoopsFollowed = 64
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
+ cISUID = 04000 // Set uid, from archive/tar
+ cISGID = 02000 // Set gid, from archive/tar
+ cISVTX = 01000 // Save text (sticky bit), from archive/tar
+)
+
+func init() {
+ reexec.Register(copierCommand, copierMain)
+}
+
+// isArchivePath returns true if the specified path can be read like a (possibly
+// compressed) tarball.
+func isArchivePath(path string) bool {
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return false
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ _, err = tr.Next()
+ return err == nil
+}
+
+// requestType encodes exactly what kind of request this is.
+type requestType string
+
+const (
+ requestEval requestType = "EVAL"
+ requestStat requestType = "STAT"
+ requestGet requestType = "GET"
+ requestPut requestType = "PUT"
+ requestMkdir requestType = "MKDIR"
+ requestRemove requestType = "REMOVE"
+ requestQuit requestType = "QUIT"
+)
+
+// Request encodes a single request.
+type request struct {
+ Request requestType
+ Root string // used by all requests
+ preservedRoot string
+ rootPrefix string // used to reconstruct paths being handed back to the caller
+ Directory string // used by all requests
+ preservedDirectory string
+ Globs []string `json:",omitempty"` // used by stat, get
+ preservedGlobs []string
+ StatOptions StatOptions `json:",omitempty"`
+ GetOptions GetOptions `json:",omitempty"`
+ PutOptions PutOptions `json:",omitempty"`
+ MkdirOptions MkdirOptions `json:",omitempty"`
+ RemoveOptions RemoveOptions `json:",omitempty"`
+}
+
+func (req *request) Excludes() []string {
+ switch req.Request {
+ case requestEval:
+ return nil
+ case requestStat:
+ return req.StatOptions.Excludes
+ case requestGet:
+ return req.GetOptions.Excludes
+ case requestPut:
+ return nil
+ case requestMkdir:
+ return nil
+ case requestRemove:
+ return nil
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) UIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestEval:
+ return nil
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.UIDMap
+ case requestPut:
+ return req.PutOptions.UIDMap
+ case requestMkdir:
+ return req.MkdirOptions.UIDMap
+ case requestRemove:
+ return nil
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+func (req *request) GIDMap() []idtools.IDMap {
+ switch req.Request {
+ case requestEval:
+ return nil
+ case requestStat:
+ return nil
+ case requestGet:
+ return req.GetOptions.GIDMap
+ case requestPut:
+ return req.PutOptions.GIDMap
+ case requestMkdir:
+ return req.MkdirOptions.GIDMap
+ case requestRemove:
+ return nil
+ case requestQuit:
+ return nil
+ default:
+ panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
+ }
+}
+
+// Response encodes a single response.
+type response struct {
+ Error string `json:",omitempty"`
+ Stat statResponse `json:",omitempty"`
+ Eval evalResponse `json:",omitempty"`
+ Get getResponse `json:",omitempty"`
+ Put putResponse `json:",omitempty"`
+ Mkdir mkdirResponse `json:",omitempty"`
+ Remove removeResponse `json:",omitempty"`
+}
+
+// statResponse encodes a response for a single Stat request.
+type statResponse struct {
+ Globs []*StatsForGlob
+}
+
+// evalResponse encodes a response for a single Eval request.
+type evalResponse struct {
+ Evaluated string
+}
+
+// StatsForGlob encode results for a single glob pattern passed to Stat().
+type StatsForGlob struct {
+ Error string `json:",omitempty"` // error if the Glob pattern was malformed
+ Glob string // input pattern to which this result corresponds
+ Globbed []string // a slice of zero or more names that match the glob
+ Results map[string]*StatForItem // one for each Globbed value if there are any, or for Glob
+}
+
+// StatForItem encode results for a single filesystem item, as returned by Stat().
+type StatForItem struct {
+ Error string `json:",omitempty"`
+ Name string
+ Size int64 // dereferenced value for symlinks
+ Mode os.FileMode // dereferenced value for symlinks
+ ModTime time.Time // dereferenced value for symlinks
+ IsSymlink bool
+ IsDir bool // dereferenced value for symlinks
+ IsRegular bool // dereferenced value for symlinks
+ IsArchive bool // dereferenced value for symlinks
+ ImmediateTarget string `json:",omitempty"` // raw link content
+}
+
+// getResponse encodes a response for a single Get request.
+type getResponse struct {
+}
+
+// putResponse encodes a response for a single Put request.
+type putResponse struct {
+}
+
+// mkdirResponse encodes a response for a single Mkdir request.
+type mkdirResponse struct {
+}
+
+// removeResponse encodes a response for a single Remove request.
+type removeResponse struct {
+}
+
+// EvalOptions controls parts of Eval()'s behavior.
+type EvalOptions struct {
+}
+
+// Eval evaluates the directory's path, including any intermediate symbolic
+// links.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, evaluation is performed in a chrooted context.
+// If the directory is specified as an absolute path, it should either be the
+// root directory or a subdirectory of the root directory. Otherwise, the
+// directory is treated as a path relative to the root directory.
+func Eval(root string, directory string, options EvalOptions) (string, error) {
+ req := request{
+ Request: requestEval,
+ Root: root,
+ Directory: directory,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return "", err
+ }
+ if resp.Error != "" {
+ return "", errors.New(resp.Error)
+ }
+ return resp.Eval.Evaluated, nil
+}
+
+// StatOptions controls parts of Stat()'s behavior.
+type StatOptions struct {
+ CheckForArchives bool // check for and populate the IsArchive bit in returned values
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+}
+
+// Stat globs the specified pattern in the specified directory and returns its
+// results.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the stat() is performed in a chrooted context.
+// If the directory is specified as an absolute path, it should either be the
+// root directory or a subdirectory of the root directory. Otherwise, the
+// directory is treated as a path relative to the root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Stat(root string, directory string, options StatOptions, globs []string) ([]*StatsForGlob, error) {
+ req := request{
+ Request: requestStat,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != "" {
+ return nil, errors.New(resp.Error)
+ }
+ return resp.Stat.Globs, nil
+}
+
+// GetOptions controls parts of Get()'s behavior.
+type GetOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the output archive
+ Excludes []string // contents to pretend don't exist, using the OS-specific path separator
+ ExpandArchives bool // extract the contents of named items that are archives
+ ChownDirs *idtools.IDPair // set ownership on directories. no effect on archives being extracted
+ ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted
+ ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted
+ ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted
+ StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
+ StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
+ StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
+ StripXattrs bool // don't record extended attributes of items being copied. no effect on archives being extracted
+ KeepDirectoryNames bool // don't strip the top directory's basename from the paths of items in subdirectories
+ Rename map[string]string // rename items with the specified names, or under the specified names
+ NoDerefSymlinks bool // don't follow symlinks when globs match them
+ IgnoreUnreadable bool // ignore errors reading items, instead of returning an error
+ NoCrossDevice bool // if a subdirectory is a mountpoint with a different device number, include it but skip its contents
+}
+
+// Get produces an archive containing items that match the specified glob
+// patterns and writes it to bulkWriter.
+// If root and directory are both not specified, the current root directory is
+// used, and relative names in the globs list are treated as being relative to
+// the current working directory.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the contents are read in a chrooted context.
+// If the directory is specified as an absolute path, it should either be the
+// root directory or a subdirectory of the root directory. Otherwise, the
+// directory is treated as a path relative to the root directory.
+// Relative names in the glob list are treated as being relative to the
+// directory.
+func Get(root string, directory string, options GetOptions, globs []string, bulkWriter io.Writer) error {
+ req := request{
+ Request: requestGet,
+ Root: root,
+ Directory: directory,
+ Globs: append([]string{}, globs...),
+ StatOptions: StatOptions{
+ CheckForArchives: options.ExpandArchives,
+ },
+ GetOptions: options,
+ }
+ resp, err := copier(nil, bulkWriter, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// PutOptions controls parts of Put()'s behavior.
+type PutOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when writing contents to disk
+ DefaultDirOwner *idtools.IDPair // set ownership of implicitly-created directories, default is ChownDirs, or 0:0 if ChownDirs not set
+ DefaultDirMode *os.FileMode // set permissions on implicitly-created directories, default is ChmodDirs, or 0755 if ChmodDirs not set
+ ChownDirs *idtools.IDPair // set ownership of newly-created directories
+ ChmodDirs *os.FileMode // set permissions on newly-created directories
+ ChownFiles *idtools.IDPair // set ownership of newly-created files
+ ChmodFiles *os.FileMode // set permissions on newly-created files
+ StripSetuidBit bool // strip the setuid bit off of items being written
+ StripSetgidBit bool // strip the setgid bit off of items being written
+ StripStickyBit bool // strip the sticky bit off of items being written
+ StripXattrs bool // don't bother trying to set extended attributes of items being copied
+ IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes
+ IgnoreDevices bool // ignore items which are character or block devices
+ NoOverwriteDirNonDir bool // instead of quietly overwriting directories with non-directories, return an error
+ NoOverwriteNonDirDir bool // instead of quietly overwriting non-directories with directories, return an error
+ Rename map[string]string // rename items with the specified names, or under the specified names
+}
+
+// Put extracts an archive from the bulkReader at the specified directory.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the contents are written in a chrooted
+// context. If the directory is specified as an absolute path, it should
+// either be the root directory or a subdirectory of the root directory.
+// Otherwise, the directory is treated as a path relative to the root
+// directory.
+func Put(root string, directory string, options PutOptions, bulkReader io.Reader) error {
+ req := request{
+ Request: requestPut,
+ Root: root,
+ Directory: directory,
+ PutOptions: options,
+ }
+ resp, err := copier(bulkReader, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// MkdirOptions controls parts of Mkdir()'s behavior.
+type MkdirOptions struct {
+ UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories
+ ChownNew *idtools.IDPair // set ownership of newly-created directories
+ ChmodNew *os.FileMode // set permissions on newly-created directories
+}
+
+// Mkdir ensures that the specified directory exists. Any directories which
+// need to be created will be given the specified ownership and permissions.
+// If root and directory are both not specified, the current root directory is
+// used.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the directory is created in a chrooted
+// context. If the directory is specified as an absolute path, it should
+// either be the root directory or a subdirectory of the root directory.
+// Otherwise, the directory is treated as a path relative to the root
+// directory.
+func Mkdir(root string, directory string, options MkdirOptions) error {
+ req := request{
+ Request: requestMkdir,
+ Root: root,
+ Directory: directory,
+ MkdirOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// RemoveOptions controls parts of Remove()'s behavior.
+type RemoveOptions struct {
+ All bool // if Directory is a directory, remove its contents as well
+}
+
+// Remove removes the specified directory or item, traversing any intermediate
+// symbolic links.
+// If the root directory is not specified, the current root directory is used.
+// If root is specified and the current OS supports it, and the calling process
+// has the necessary privileges, the remove() is performed in a chrooted context.
+// If the item to remove is specified as an absolute path, it should either be
+// in the root directory or in a subdirectory of the root directory. Otherwise,
+// the directory is treated as a path relative to the root directory.
+func Remove(root string, item string, options RemoveOptions) error {
+ req := request{
+ Request: requestRemove,
+ Root: root,
+ Directory: item,
+ RemoveOptions: options,
+ }
+ resp, err := copier(nil, nil, req)
+ if err != nil {
+ return err
+ }
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ return nil
+}
+
+// cleanerReldirectory resolves relative path candidate lexically, attempting
+// to ensure that when joined as a subdirectory of another directory, it does
+// not reference anything outside of that other directory.
+func cleanerReldirectory(candidate string) string {
+ cleaned := strings.TrimPrefix(filepath.Clean(string(os.PathSeparator)+candidate), string(os.PathSeparator))
+ if cleaned == "" {
+ return "."
+ }
+ return cleaned
+}
+
+// convertToRelSubdirectory returns the path of directory, bound and relative to
+// root, as a relative path, or an error if that path can't be computed or if
+// the two directories are on different volumes
+func convertToRelSubdirectory(root, directory string) (relative string, err error) {
+ if root == "" || !filepath.IsAbs(root) {
+ return "", fmt.Errorf("expected root directory to be an absolute path, got %q", root)
+ }
+ if directory == "" || !filepath.IsAbs(directory) {
+ return "", fmt.Errorf("expected directory to be an absolute path, got %q", root)
+ }
+ if filepath.VolumeName(root) != filepath.VolumeName(directory) {
+ return "", fmt.Errorf("%q and %q are on different volumes", root, directory)
+ }
+ rel, err := filepath.Rel(root, directory)
+ if err != nil {
+ return "", fmt.Errorf("computing path of %q relative to %q: %w", directory, root, err)
+ }
+ return cleanerReldirectory(rel), nil
+}
+
+func currentVolumeRoot() (string, error) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", fmt.Errorf("getting current working directory: %w", err)
+ }
+ return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
+}
+
+func isVolumeRoot(candidate string) (bool, error) {
+ abs, err := filepath.Abs(candidate)
+ if err != nil {
+ return false, fmt.Errorf("converting %q to an absolute path: %w", candidate, err)
+ }
+ return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
+}
+
+func looksLikeAbs(candidate string) bool {
+ return candidate[0] == os.PathSeparator && (len(candidate) == 1 || candidate[1] != os.PathSeparator)
+}
+
+func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ if req.Directory == "" {
+ if req.Root == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, fmt.Errorf("getting current working directory: %w", err)
+ }
+ req.Directory = wd
+ } else {
+ req.Directory = req.Root
+ }
+ }
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ return nil, fmt.Errorf("determining root of current volume: %w", err)
+ }
+ req.Root = root
+ }
+ if filepath.IsAbs(req.Directory) {
+ _, err := convertToRelSubdirectory(req.Root, req.Directory)
+ if err != nil {
+ return nil, fmt.Errorf("rewriting %q to be relative to %q: %w", req.Directory, req.Root, err)
+ }
+ }
+ isAlreadyRoot, err := isVolumeRoot(req.Root)
+ if err != nil {
+ return nil, fmt.Errorf("checking if %q is a root directory: %w", req.Root, err)
+ }
+ if !isAlreadyRoot && canChroot {
+ return copierWithSubprocess(bulkReader, bulkWriter, req)
+ }
+ return copierWithoutSubprocess(bulkReader, bulkWriter, req)
+}
+
+func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for _, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Root, string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(glob)))
+ }
+ }
+ req.Globs = absoluteGlobs
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, req)
+ if err != nil {
+ return nil, err
+ }
+ if cb != nil {
+ if err = cb(); err != nil {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+func closeIfNotNilYet(f **os.File, what string) {
+ if f != nil && *f != nil {
+ err := (*f).Close()
+ *f = nil
+ if err != nil {
+ logrus.Debugf("error closing %s: %v", what, err)
+ }
+ }
+}
+
+func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (resp *response, err error) {
+ if bulkReader == nil {
+ bulkReader = bytes.NewReader([]byte{})
+ }
+ if bulkWriter == nil {
+ bulkWriter = io.Discard
+ }
+ cmd := reexec.Command(copierCommand)
+ stdinRead, stdinWrite, err := os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("pipe: %w", err)
+ }
+ defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
+ defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
+ encoder := json.NewEncoder(stdinWrite)
+ stdoutRead, stdoutWrite, err := os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("pipe: %w", err)
+ }
+ defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
+ defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
+ decoder := json.NewDecoder(stdoutRead)
+ bulkReaderRead, bulkReaderWrite, err := os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("pipe: %w", err)
+ }
+ defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
+ defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
+ bulkWriterRead, bulkWriterWrite, err := os.Pipe()
+ if err != nil {
+ return nil, fmt.Errorf("pipe: %w", err)
+ }
+ defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
+ defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
+ cmd.Dir = "/"
+ cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...)
+
+ errorBuffer := bytes.Buffer{}
+ cmd.Stdin = stdinRead
+ cmd.Stdout = stdoutWrite
+ cmd.Stderr = &errorBuffer
+ cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
+ if err = cmd.Start(); err != nil {
+ return nil, fmt.Errorf("starting subprocess: %w", err)
+ }
+ cmdToWaitFor := cmd
+ defer func() {
+ if cmdToWaitFor != nil {
+ if err := cmdToWaitFor.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ logrus.Debug(errorBuffer.String())
+ }
+ }
+ }
+ }()
+ stdinRead.Close()
+ stdinRead = nil
+ stdoutWrite.Close()
+ stdoutWrite = nil
+ bulkReaderRead.Close()
+ bulkReaderRead = nil
+ bulkWriterWrite.Close()
+ bulkWriterWrite = nil
+ killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
+ if err2 := cmd.Process.Kill(); err2 != nil {
+ return nil, fmt.Errorf("killing subprocess: %v; %s: %w", err2, step, err)
+ }
+ return nil, fmt.Errorf("%v: %w", step, err)
+ }
+ if err = encoder.Encode(req); err != nil {
+ return killAndReturn(err, "error encoding request for copier subprocess")
+ }
+ if err = decoder.Decode(&resp); err != nil {
+ if errors.Is(err, io.EOF) && errorBuffer.Len() > 0 {
+ return killAndReturn(errors.New(errorBuffer.String()), "error in copier subprocess")
+ }
+ return killAndReturn(err, "error decoding response from copier subprocess")
+ }
+ if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
+ return killAndReturn(err, "error encoding request for copier subprocess")
+ }
+ stdinWrite.Close()
+ stdinWrite = nil
+ stdoutRead.Close()
+ stdoutRead = nil
+ var wg sync.WaitGroup
+ var readError, writeError error
+ wg.Add(1)
+ go func() {
+ _, writeError = io.Copy(bulkWriter, bulkWriterRead)
+ bulkWriterRead.Close()
+ bulkWriterRead = nil
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ _, readError = io.Copy(bulkReaderWrite, bulkReader)
+ bulkReaderWrite.Close()
+ bulkReaderWrite = nil
+ wg.Done()
+ }()
+ wg.Wait()
+ cmdToWaitFor = nil
+ if err = cmd.Wait(); err != nil {
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ if cmd.ProcessState.Exited() && !cmd.ProcessState.Success() {
+ err = fmt.Errorf("subprocess exited with error")
+ if errorBuffer.String() != "" {
+ err = fmt.Errorf("%s", errorBuffer.String())
+ }
+ return nil, err
+ }
+ loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n")
+ if len(loggedOutput) > 0 {
+ for _, output := range strings.Split(loggedOutput, "\n") {
+ logrus.Debug(output)
+ }
+ }
+ if readError != nil {
+ return nil, fmt.Errorf("passing bulk input to subprocess: %w", readError)
+ }
+ if writeError != nil {
+ return nil, fmt.Errorf("passing bulk output from subprocess: %w", writeError)
+ }
+ return resp, nil
+}
+
+func copierMain() {
+ var chrooted bool
+ decoder := json.NewDecoder(os.Stdin)
+ encoder := json.NewEncoder(os.Stdout)
+ previousRequestRoot := ""
+
+ // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic
+ // modules to handle looking up user and host information) to load modules that match the libc
+ // our binary is currently using. Hopefully they're loaded on first use, so that they won't
+ // need to be loaded after we've chrooted into the rootfs, which could include modules that
+ // don't match our libc and which can't be loaded, or modules which we don't want to execute
+ // because we don't trust their code.
+ _, _ = user.Lookup("buildah")
+ _, _ = net.LookupHost("localhost")
+
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ }
+
+ // Set up descriptors for receiving and sending tarstreams.
+ bulkReader := os.NewFile(3, "bulk-reader")
+ bulkWriter := os.NewFile(4, "bulk-writer")
+
+ for {
+ // Read a request.
+ req := new(request)
+ if err := decoder.Decode(req); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding request from copier parent process: %v", err)
+ os.Exit(1)
+ }
+ if req.Request == requestQuit {
+ // Making Quit a specific request means that we could
+ // run Stat() at a caller's behest before using the
+ // same process for Get() or Put(). Maybe later.
+ break
+ }
+
+ // Multiple requests should list the same root, because we
+ // can't un-chroot to chroot to some other location.
+ if previousRequestRoot != "" {
+ // Check that we got the same input value for
+ // where-to-chroot-to.
+ if req.Root != previousRequestRoot {
+ fmt.Fprintf(os.Stderr, "error: can't change location of chroot from %q to %q", previousRequestRoot, req.Root)
+ os.Exit(1)
+ }
+ previousRequestRoot = req.Root
+ } else {
+ // Figure out where to chroot to, if we weren't told.
+ if req.Root == "" {
+ root, err := currentVolumeRoot()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error determining root of current volume: %v", err)
+ os.Exit(1)
+ }
+ req.Root = root
+ }
+ // Change to the specified root directory.
+ var err error
+ chrooted, err = chroot(req.Root)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ os.Exit(1)
+ }
+ }
+
+ req.preservedRoot = req.Root
+ req.rootPrefix = string(os.PathSeparator)
+ req.preservedDirectory = req.Directory
+ req.preservedGlobs = append([]string{}, req.Globs...)
+ if chrooted {
+ // We'll need to adjust some things now that the root
+ // directory isn't what it was. Make the directory and
+ // globs absolute paths for simplicity's sake.
+ absoluteDirectory := req.Directory
+ if !filepath.IsAbs(req.Directory) {
+ absoluteDirectory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ relativeDirectory, err := convertToRelSubdirectory(req.preservedRoot, absoluteDirectory)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", absoluteDirectory, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ req.Directory = filepath.Clean(string(os.PathSeparator) + relativeDirectory)
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
+ os.Exit(1)
+ }
+ absoluteGlobs = append(absoluteGlobs, filepath.Clean(string(os.PathSeparator)+relativeGlob))
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ req.rootPrefix = req.Root
+ req.Root = string(os.PathSeparator)
+ } else {
+ // Make the directory and globs absolute paths for
+ // simplicity's sake.
+ if !filepath.IsAbs(req.Directory) {
+ req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
+ }
+ absoluteGlobs := make([]string, 0, len(req.Globs))
+ for i, glob := range req.preservedGlobs {
+ if filepath.IsAbs(glob) {
+ absoluteGlobs = append(absoluteGlobs, req.Globs[i])
+ } else {
+ absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
+ }
+ }
+ req.Globs = absoluteGlobs
+ }
+ resp, cb, err := copierHandler(bulkReader, bulkWriter, *req)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error handling request %#v from copier parent process: %v", *req, err)
+ os.Exit(1)
+ }
+ // Encode the response.
+ if err := encoder.Encode(resp); err != nil {
+ fmt.Fprintf(os.Stderr, "error encoding response %#v for copier parent process: %v", *req, err)
+ os.Exit(1)
+ }
+ // If there's bulk data to transfer, run the callback to either
+ // read or write it.
+ if cb != nil {
+ if err = cb(); err != nil {
+ fmt.Fprintf(os.Stderr, "error during bulk transfer for %#v: %v", *req, err)
+ os.Exit(1)
+ }
+ }
+ }
+}
+
+func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, func() error, error) {
+ // NewPatternMatcher splits patterns into components using
+ // os.PathSeparator, implying that it expects OS-specific naming
+ // conventions.
+ excludes := req.Excludes()
+ pm, err := fileutils.NewPatternMatcher(excludes)
+ if err != nil {
+ return nil, nil, fmt.Errorf("processing excludes list %v: %w", excludes, err)
+ }
+
+ var idMappings *idtools.IDMappings
+ uidMap, gidMap := req.UIDMap(), req.GIDMap()
+ if len(uidMap) > 0 && len(gidMap) > 0 {
+ idMappings = idtools.NewIDMappingsFromMaps(uidMap, gidMap)
+ }
+
+ switch req.Request {
+ default:
+ return nil, nil, fmt.Errorf("not an implemented request type: %q", req.Request)
+ case requestEval:
+ resp := copierHandlerEval(req)
+ return resp, nil, nil
+ case requestStat:
+ resp := copierHandlerStat(req, pm)
+ return resp, nil, nil
+ case requestGet:
+ return copierHandlerGet(bulkWriter, req, pm, idMappings)
+ case requestPut:
+ return copierHandlerPut(bulkReader, req, idMappings)
+ case requestMkdir:
+ return copierHandlerMkdir(req, idMappings)
+ case requestRemove:
+ resp := copierHandlerRemove(req)
+ return resp, nil, nil
+ case requestQuit:
+ return nil, nil, nil
+ }
+}
+
+// pathIsExcluded computes path relative to root, then asks the pattern matcher
+// if the result is excluded. Returns the relative path and the matcher's
+// results.
+func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", false, fmt.Errorf("copier: error computing path of %q relative to root %q: %w", path, root, err)
+ }
+ if pm == nil {
+ return rel, false, nil
+ }
+ if rel == "." {
+ // special case
+ return rel, false, nil
+ }
+ // Matches uses filepath.FromSlash() to convert candidates before
+ // checking if they match the patterns it's been given, implying that
+ // it expects Unix-style paths.
+ matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
+ if err != nil {
+ return rel, false, fmt.Errorf("copier: error checking if %q is excluded: %w", rel, err)
+ }
+ if matches {
+ return rel, true, nil
+ }
+ return rel, false, nil
+}
+
+// resolvePath resolves symbolic links in paths, treating the specified
+// directory as the root.
+// Resolving the path this way, and using the result, is in no way secure
+// against another process manipulating the content that we're looking at, and
+// it is not expected to be.
+// This helps us approximate chrooted behavior on systems and in test cases
+// where chroot isn't available.
+func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) {
+ rel, err := convertToRelSubdirectory(root, path)
+ if err != nil {
+ return "", fmt.Errorf("making path %q relative to %q", path, root)
+ }
+ workingPath := root
+ followed := 0
+ components := strings.Split(rel, string(os.PathSeparator))
+ excluded := false
+ for len(components) > 0 {
+ // if anything we try to examine is excluded, then resolution has to "break"
+ _, thisExcluded, err := pathIsExcluded(root, filepath.Join(workingPath, components[0]), pm)
+ if err != nil {
+ return "", err
+ }
+ excluded = excluded || thisExcluded
+ if !excluded {
+ if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && !(len(components) == 1 && !evaluateFinalComponent) {
+ followed++
+ if followed > maxLoopsFollowed {
+ return "", &os.PathError{
+ Op: "open",
+ Path: path,
+ Err: syscall.ELOOP,
+ }
+ }
+ if filepath.IsAbs(target) || looksLikeAbs(target) {
+ // symlink to an absolute path - prepend the
+ // root directory to that absolute path to
+ // replace the current location, and resolve
+ // the remaining components
+ workingPath = root
+ components = append(strings.Split(target, string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ // symlink to a relative path - add the link target to
+ // the current location to get the next location, and
+ // resolve the remaining components
+ rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
+ if err != nil {
+ return "", fmt.Errorf("making path %q relative to %q", filepath.Join(workingPath, target), root)
+ }
+ workingPath = root
+ components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
+ continue
+ }
+ }
+ // append the current component's name to get the next location
+ workingPath = filepath.Join(workingPath, components[0])
+ if workingPath == filepath.Join(root, "..") {
+ // attempted to go above the root using a relative path .., scope it
+ workingPath = root
+ }
+ // ready to handle the next component
+ components = components[1:]
+ }
+ return workingPath, nil
+}
+
+func copierHandlerEval(req request) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Eval: evalResponse{}}
+ }
+ resolvedTarget, err := resolvePath(req.Root, req.Directory, true, nil)
+ if err != nil {
+ return errorResponse("copier: eval: error resolving %q: %v", req.Directory, err)
+ }
+ return &response{Eval: evalResponse{Evaluated: filepath.Join(req.rootPrefix, resolvedTarget)}}
+}
+
+func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: stat: expected at least one glob pattern, got none")
+ }
+ var stats []*StatsForGlob
+ for i, glob := range req.Globs {
+ s := StatsForGlob{
+ Glob: req.preservedGlobs[i],
+ }
+ // glob this pattern
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
+ }
+
+ if len(globMatched) == 0 && strings.ContainsAny(glob, "*?[") {
+ continue
+ }
+ // collect the matches
+ s.Globbed = make([]string, 0, len(globMatched))
+ s.Results = make(map[string]*StatForItem)
+ for _, globbed := range globMatched {
+ rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
+ if err != nil {
+ return errorResponse("copier: stat: %v", err)
+ }
+ if excluded {
+ continue
+ }
+ // if the glob was an absolute path, reconstruct the
+ // path that we should hand back for the match
+ var resultName string
+ if filepath.IsAbs(req.preservedGlobs[i]) {
+ resultName = filepath.Join(req.rootPrefix, globbed)
+ } else {
+ relResult := rel
+ if req.Directory != req.Root {
+ relResult, err = convertToRelSubdirectory(req.Directory, globbed)
+ if err != nil {
+ return errorResponse("copier: stat: error making %q relative to %q: %v", globbed, req.Directory, err)
+ }
+ }
+ resultName = relResult
+ }
+ result := StatForItem{Name: resultName}
+ s.Globbed = append(s.Globbed, resultName)
+ s.Results[resultName] = &result
+ // lstat the matched value
+ linfo, err := os.Lstat(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ result.Size = linfo.Size()
+ result.Mode = linfo.Mode()
+ result.ModTime = linfo.ModTime()
+ result.IsDir = linfo.IsDir()
+ result.IsRegular = result.Mode.IsRegular()
+ result.IsSymlink = (linfo.Mode() & os.ModeType) == os.ModeSymlink
+ checkForArchive := req.StatOptions.CheckForArchives
+ if result.IsSymlink {
+ // if the match was a symbolic link, read it
+ immediateTarget, err := os.Readlink(globbed)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // record where it points, both by itself (it
+ // could be a relative link) and in the context
+ // of the chroot
+ result.ImmediateTarget = immediateTarget
+ resolvedTarget, err := resolvePath(req.Root, globbed, true, pm)
+ if err != nil {
+ return errorResponse("copier: stat: error resolving %q: %v", globbed, err)
+ }
+ // lstat the thing that we point to
+ info, err := os.Lstat(resolvedTarget)
+ if err != nil {
+ result.Error = err.Error()
+ continue
+ }
+ // replace IsArchive/IsDir/IsRegular with info about the target
+ if info.Mode().IsRegular() && req.StatOptions.CheckForArchives {
+ result.IsArchive = isArchivePath(resolvedTarget)
+ checkForArchive = false
+ }
+ result.IsDir = info.IsDir()
+ result.IsRegular = info.Mode().IsRegular()
+ }
+ if result.IsRegular && checkForArchive {
+ // we were asked to check on this, and it
+ // wasn't a symlink, in which case we'd have
+ // already checked what the link points to
+ result.IsArchive = isArchivePath(globbed)
+ }
+ }
+ // no unskipped matches -> error
+ if len(s.Globbed) == 0 {
+ s.Globbed = nil
+ s.Results = nil
+ s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT)
+ }
+ stats = append(stats, &s)
+ }
+ // no matches -> error
+ if len(stats) == 0 {
+ s := StatsForGlob{
+ Error: fmt.Sprintf("copier: stat: %q: %v", req.Globs, syscall.ENOENT),
+ }
+ stats = append(stats, &s)
+ }
+ return &response{Stat: statResponse{Globs: stats}}
+}
+
+func errorIsPermission(err error) bool {
+ if err == nil {
+ return false
+ }
+ return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
+}
+
+func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ statRequest := req
+ statRequest.Request = requestStat
+ statResponse := copierHandlerStat(req, pm)
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
+ }
+ if statResponse.Error != "" {
+ return errorResponse("%s", statResponse.Error)
+ }
+ if len(req.Globs) == 0 {
+ return errorResponse("copier: get: expected at least one glob pattern, got 0")
+ }
+ // build a queue of items by globbing
+ var queue []string
+ globMatchedCount := 0
+ for _, glob := range req.Globs {
+ globMatched, err := filepath.Glob(glob)
+ if err != nil {
+ return errorResponse("copier: get: glob %q: %v", glob, err)
+ }
+ globMatchedCount += len(globMatched)
+ queue = append(queue, globMatched...)
+ }
+ // no matches -> error
+ if len(queue) == 0 {
+ return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT)
+ }
+ topInfo, err := os.Stat(req.Directory)
+ if err != nil {
+ return errorResponse("copier: get: error reading info about directory %q: %v", req.Directory, err)
+ }
+ cb := func() error {
+ tw := tar.NewWriter(bulkWriter)
+ defer tw.Close()
+ hardlinkChecker := new(hardlinkChecker)
+ itemsCopied := 0
+ for i, item := range queue {
+ // if we're not discarding the names of individual directories, keep track of this one
+ relNamePrefix := ""
+ if req.GetOptions.KeepDirectoryNames {
+ relNamePrefix = filepath.Base(item)
+ }
+ // if the named thing-to-read is a symlink, dereference it
+ info, err := os.Lstat(item)
+ if err != nil {
+ return fmt.Errorf("copier: get: lstat %q: %w", item, err)
+ }
+ // chase links. if we hit a dead end, we should just fail
+ followedLinks := 0
+ const maxFollowedLinks = 16
+ for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
+ path, err := os.Readlink(item)
+ if err != nil {
+ continue
+ }
+ if filepath.IsAbs(path) || looksLikeAbs(path) {
+ path = filepath.Join(req.Root, path)
+ } else {
+ path = filepath.Join(filepath.Dir(item), path)
+ }
+ item = path
+ if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
+ return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
+ }
+ if info, err = os.Lstat(item); err != nil {
+ return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
+ }
+ followedLinks++
+ }
+ if followedLinks >= maxFollowedLinks {
+ return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
+ }
+ // evaluate excludes relative to the root directory
+ if info.Mode().IsDir() {
+ // we don't expand any of the contents that are archives
+ options := req.GetOptions
+ options.ExpandArchives = false
+ walkfn := func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ if options.IgnoreUnreadable && errorIsPermission(err) {
+ if info != nil && d.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ } else if errors.Is(err, os.ErrNotExist) {
+ logrus.Warningf("copier: file disappeared while reading: %q", path)
+ return nil
+ }
+ return fmt.Errorf("copier: get: error reading %q: %w", path, err)
+ }
+ if d.Type() == os.ModeSocket {
+ logrus.Warningf("copier: skipping socket %q", d.Name())
+ return nil
+ }
+ // compute the path of this item
+ // relative to the top-level directory,
+ // for the tar header
+ rel, relErr := convertToRelSubdirectory(item, path)
+ if relErr != nil {
+ return fmt.Errorf("copier: get: error computing path of %q relative to top directory %q: %w", path, item, relErr)
+ }
+ // prefix the original item's name if we're keeping it
+ if relNamePrefix != "" {
+ rel = filepath.Join(relNamePrefix, rel)
+ }
+ if rel == "" || rel == "." {
+ // skip the "." entry
+ return nil
+ }
+ skippedPath, skip, err := pathIsExcluded(req.Root, path, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ if d.IsDir() {
+ // if there are no "include
+ // this anyway" patterns at
+ // all, we don't need to
+ // descend into this particular
+ // directory if it's a directory
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+ // if there are exclusion
+ // patterns for which this
+ // path is a prefix, we
+ // need to keep descending
+ for _, pattern := range pm.Patterns() {
+ if !pattern.Exclusion() {
+ continue
+ }
+ spec := strings.Trim(pattern.String(), string(os.PathSeparator))
+ trimmedPath := strings.Trim(skippedPath, string(os.PathSeparator))
+ if strings.HasPrefix(spec+string(os.PathSeparator), trimmedPath) {
+ // we can't just skip over
+ // this directory
+ return nil
+ }
+ }
+ // there are exclusions, but
+ // none of them apply here
+ return filepath.SkipDir
+ }
+ // skip this item, but if we're
+ // a directory, a more specific
+ // but-include-this for
+ // something under it might
+ // also be in the excludes list
+ return nil
+ }
+ // if it's a symlink, read its target
+ symlinkTarget := ""
+ if d.Type() == os.ModeSymlink {
+ target, err := os.Readlink(path)
+ if err != nil {
+ return fmt.Errorf("copier: get: readlink(%q(%q)): %w", rel, path, err)
+ }
+ symlinkTarget = target
+ }
+ info, err := d.Info()
+ if err != nil {
+ return err
+ }
+ // if it's a directory and we're staying on one device, and it's on a
+ // different device than the one we started from, skip its contents
+ var ok error
+ if d.IsDir() && req.GetOptions.NoCrossDevice {
+ if !sameDevice(topInfo, info) {
+ ok = filepath.SkipDir
+ }
+ }
+ // add the item to the outgoing tar stream
+ if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
+ if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
+ return ok
+ } else if errors.Is(err, os.ErrNotExist) {
+ logrus.Warningf("copier: file disappeared while reading: %q", path)
+ return nil
+ }
+ return err
+ }
+ return ok
+ }
+ // walk the directory tree, checking/adding items individually
+ if err := filepath.WalkDir(item, walkfn); err != nil {
+ return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
+ }
+ itemsCopied++
+ } else {
+ _, skip, err := pathIsExcluded(req.Root, item, pm)
+ if err != nil {
+ return err
+ }
+ if skip {
+ continue
+ }
+ // add the item to the outgoing tar stream. in
+ // cases where this was a symlink that we
+ // dereferenced, be sure to use the name of the
+ // link.
+ if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
+ if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
+ continue
+ }
+ return fmt.Errorf("copier: get: %q: %w", queue[i], err)
+ }
+ itemsCopied++
+ }
+ }
+ if itemsCopied == 0 {
+ return fmt.Errorf("copier: get: copied no items: %w", syscall.ENOENT)
+ }
+ return nil
+ }
+ return &response{Stat: statResponse.Stat, Get: getResponse{}}, cb, nil
+}
+
+func handleRename(rename map[string]string, name string) string {
+ if rename == nil {
+ return name
+ }
+ // header names always use '/', so use path instead of filepath to manipulate it
+ if directMapping, ok := rename[name]; ok {
+ return directMapping
+ }
+ prefix, remainder := path.Split(name)
+ for prefix != "" {
+ if mappedPrefix, ok := rename[prefix]; ok {
+ return path.Join(mappedPrefix, remainder)
+ }
+ if prefix[len(prefix)-1] == '/' {
+ prefix = prefix[:len(prefix)-1]
+ if mappedPrefix, ok := rename[prefix]; ok {
+ return path.Join(mappedPrefix, remainder)
+ }
+ }
+ newPrefix, middlePart := path.Split(prefix)
+ if newPrefix == prefix {
+ return name
+ }
+ prefix = newPrefix
+ remainder = path.Join(middlePart, remainder)
+ }
+ return name
+}
+
+func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *hardlinkChecker, idMappings *idtools.IDMappings) error {
+ // build the header using the name provided
+ hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
+ if err != nil {
+ return fmt.Errorf("generating tar header for %s (%s): %w", contentPath, symlinkTarget, err)
+ }
+ if name != "" {
+ hdr.Name = filepath.ToSlash(name)
+ }
+ if options.Rename != nil {
+ hdr.Name = handleRename(options.Rename, hdr.Name)
+ }
+ if options.StripSetuidBit {
+ hdr.Mode &^= cISUID
+ }
+ if options.StripSetgidBit {
+ hdr.Mode &^= cISGID
+ }
+ if options.StripStickyBit {
+ hdr.Mode &^= cISVTX
+ }
+ // read extended attributes
+ var xattrs map[string]string
+ if !options.StripXattrs {
+ xattrs, err = Lgetxattrs(contentPath)
+ if err != nil {
+ return fmt.Errorf("getting extended attributes for %q: %w", contentPath, err)
+ }
+ }
+ hdr.Xattrs = xattrs // nolint:staticcheck
+ if hdr.Typeflag == tar.TypeReg {
+ // if it's an archive and we're extracting archives, read the
+ // file and spool out its contents in-line. (if we just
+ // inlined the whole file, we'd also be inlining the EOF marker
+ // it contains)
+ if options.ExpandArchives && isArchivePath(contentPath) {
+ f, err := os.Open(contentPath)
+ if err != nil {
+ return fmt.Errorf("opening file for reading archive contents: %w", err)
+ }
+ defer f.Close()
+ rc, _, err := compression.AutoDecompress(f)
+ if err != nil {
+ return fmt.Errorf("decompressing %s: %w", contentPath, err)
+ }
+ defer rc.Close()
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for err == nil {
+ if options.Rename != nil {
+ hdr.Name = handleRename(options.Rename, hdr.Name)
+ }
+ if err = tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("writing tar header from %q to pipe: %w", contentPath, err)
+ }
+ if hdr.Size != 0 {
+ n, err := io.Copy(tw, tr)
+ if err != nil {
+ return fmt.Errorf("extracting content from archive %s: %s: %w", contentPath, hdr.Name, err)
+ }
+ if n != hdr.Size {
+ return fmt.Errorf("extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
+ }
+ tw.Flush()
+ }
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return fmt.Errorf("extracting contents of archive %s: %w", contentPath, err)
+ }
+ return nil
+ }
+ // if this regular file is hard linked to something else we've
+ // already added, set up to output a TypeLink entry instead of
+ // a TypeReg entry
+ target := hardlinkChecker.Check(srcfi)
+ if target != "" {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = filepath.ToSlash(target)
+ hdr.Size = 0
+ } else {
+ // note the device/inode pair for this file
+ hardlinkChecker.Add(srcfi, name)
+ }
+ }
+ // map the ownership for the archive
+ if idMappings != nil && !idMappings.Empty() {
+ hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
+ if err != nil {
+ return fmt.Errorf("mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
+ }
+ }
+ // force ownership and/or permissions, if requested
+ if hdr.Typeflag == tar.TypeDir {
+ if options.ChownDirs != nil {
+ hdr.Uid, hdr.Gid = options.ChownDirs.UID, options.ChownDirs.GID
+ }
+ if options.ChmodDirs != nil {
+ hdr.Mode = int64(*options.ChmodDirs)
+ }
+ } else {
+ if options.ChownFiles != nil {
+ hdr.Uid, hdr.Gid = options.ChownFiles.UID, options.ChownFiles.GID
+ }
+ if options.ChmodFiles != nil {
+ hdr.Mode = int64(*options.ChmodFiles)
+ }
+ }
+ // read fflags, if any
+ if err := archive.ReadFileFlagsToTarHeader(contentPath, hdr); err != nil {
+ return fmt.Errorf("getting fflags: %w", err)
+ }
+ var f *os.File
+ if hdr.Typeflag == tar.TypeReg {
+ // open the file first so that we don't write a header for it if we can't actually read it
+ f, err = os.Open(contentPath)
+ if err != nil {
+ return fmt.Errorf("opening file for adding its contents to archive: %w", err)
+ }
+ defer f.Close()
+ } else if hdr.Typeflag == tar.TypeDir {
+ // open the directory file first to make sure we can access it.
+ f, err = os.Open(contentPath)
+ if err != nil {
+ return fmt.Errorf("opening directory for adding its contents to archive: %w", err)
+ }
+ defer f.Close()
+ }
+ // output the header
+ if err = tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("writing header for %s (%s): %w", contentPath, hdr.Name, err)
+ }
+ if hdr.Typeflag == tar.TypeReg {
+ // output the content
+ n, err := io.Copy(tw, f)
+ if err != nil {
+ return fmt.Errorf("copying %s: %w", contentPath, err)
+ }
+ if n != hdr.Size {
+ return fmt.Errorf("copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
+ }
+ tw.Flush()
+ }
+ return nil
+}
+
+func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil
+ }
+ dirUID, dirGID, defaultDirUID, defaultDirGID := 0, 0, 0, 0
+ if req.PutOptions.ChownDirs != nil {
+ dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID
+ defaultDirUID, defaultDirGID = dirUID, dirGID
+ }
+ defaultDirMode := os.FileMode(0755)
+ if req.PutOptions.ChmodDirs != nil {
+ defaultDirMode = *req.PutOptions.ChmodDirs
+ }
+ if req.PutOptions.DefaultDirOwner != nil {
+ defaultDirUID, defaultDirGID = req.PutOptions.DefaultDirOwner.UID, req.PutOptions.DefaultDirOwner.GID
+ }
+ if req.PutOptions.DefaultDirMode != nil {
+ defaultDirMode = *req.PutOptions.DefaultDirMode
+ }
+ var fileUID, fileGID *int
+ if req.PutOptions.ChownFiles != nil {
+ fileUID, fileGID = &req.PutOptions.ChownFiles.UID, &req.PutOptions.ChownFiles.GID
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ defaultDirUID, defaultDirGID = hostDirPair.UID, hostDirPair.GID
+ if req.PutOptions.ChownFiles != nil {
+ containerFilePair := idtools.IDPair{UID: *fileUID, GID: *fileGID}
+ hostFilePair, err := idMappings.ToHost(containerFilePair)
+ if err != nil {
+ return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", fileUID, fileGID, err)
+ }
+ fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID
+ }
+ }
+ directoryModes := make(map[string]os.FileMode)
+ ensureDirectoryUnderRoot := func(directory string) error {
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err)
+ }
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = lchown(path, defaultDirUID, defaultDirGID); err != nil {
+ return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
+ }
+ // make a conditional note to set this directory's permissions
+ // later, but not if we already had an explictly-provided mode
+ if _, ok := directoryModes[path]; !ok {
+ directoryModes[path] = defaultDirMode
+ }
+ } else {
+ // FreeBSD can return EISDIR for "mkdir /":
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
+ if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) {
+ return fmt.Errorf("copier: put: error checking directory %q: %w", path, err)
+ }
+ }
+ }
+ return nil
+ }
+ makeDirectoryWriteable := func(directory string) error {
+ if _, ok := directoryModes[directory]; !ok {
+ st, err := os.Lstat(directory)
+ if err != nil {
+ return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err)
+ }
+ mode := st.Mode()
+ directoryModes[directory] = mode
+ }
+ if err := os.Chmod(directory, 0o700); err != nil {
+ return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err)
+ }
+ return nil
+ }
+ createFile := func(path string, tr *tar.Reader) (int64, error) {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ if err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() {
+ return 0, fmt.Errorf("copier: put: error creating file at %q: %w", path, err)
+ }
+ }
+ if err = os.RemoveAll(path); err != nil {
+ if os.IsPermission(err) {
+ if err := makeDirectoryWriteable(filepath.Dir(path)); err != nil {
+ return 0, err
+ }
+ err = os.RemoveAll(path)
+ }
+ if err != nil {
+ return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err)
+ }
+ }
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ }
+ if err != nil && os.IsPermission(err) {
+ if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil {
+ return 0, err
+ }
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
+ }
+ if err != nil {
+ return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err)
+ }
+ defer f.Close()
+ n, err := io.Copy(f, tr)
+ if err != nil {
+ return n, fmt.Errorf("copier: put: error writing file %q: %w", path, err)
+ }
+ return n, nil
+ }
+ targetDirectory, err := resolvePath(req.Root, req.Directory, true, nil)
+ if err != nil {
+ return errorResponse("copier: put: error resolving %q: %v", req.Directory, err)
+ }
+ info, err := os.Lstat(targetDirectory)
+ if err == nil {
+ if !info.IsDir() {
+ return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory)
+ }
+ } else {
+ if !errors.Is(err, os.ErrNotExist) {
+ return errorResponse("copier: put: %s: %v", req.Directory, err)
+ }
+ if err := ensureDirectoryUnderRoot(req.Directory); err != nil {
+ return errorResponse("copier: put: %v", err)
+ }
+ }
+ cb := func() error {
+ type directoryAndTimes struct {
+ directory string
+ atime, mtime time.Time
+ }
+ var directoriesAndTimes []directoryAndTimes
+ defer func() {
+ for i := range directoriesAndTimes {
+ directoryAndTimes := directoriesAndTimes[len(directoriesAndTimes)-i-1]
+ if err := lutimes(false, directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime); err != nil {
+ logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err)
+ }
+ }
+ for directory, mode := range directoryModes {
+ if err := os.Chmod(directory, mode); err != nil {
+ logrus.Debugf("error setting permissions of %q to 0%o: %v", directory, uint32(mode), err)
+ }
+ }
+ }()
+ ignoredItems := make(map[string]struct{})
+ tr := tar.NewReader(bulkReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ nameBeforeRenaming := hdr.Name
+ if len(hdr.Name) == 0 {
+ // no name -> ignore the entry
+ ignoredItems[nameBeforeRenaming] = struct{}{}
+ hdr, err = tr.Next()
+ continue
+ }
+ if req.PutOptions.Rename != nil {
+ hdr.Name = handleRename(req.PutOptions.Rename, hdr.Name)
+ }
+ // figure out who should own this new item
+ if idMappings != nil && !idMappings.Empty() {
+ containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ hostPair, err := idMappings.ToHost(containerPair)
+ if err != nil {
+ return fmt.Errorf("mapping container filesystem owner 0,0 to host filesystem owners: %w", err)
+ }
+ hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
+ }
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChownDirs != nil {
+ hdr.Uid, hdr.Gid = dirUID, dirGID
+ }
+ } else {
+ if req.PutOptions.ChownFiles != nil {
+ hdr.Uid, hdr.Gid = *fileUID, *fileGID
+ }
+ }
+ // make sure the parent directory exists, including for tar.TypeXGlobalHeader entries
+ // that we otherwise ignore, because that's what docker build does with them
+ path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name)))
+ if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil {
+ return err
+ }
+ // figure out what the permissions should be
+ if req.PutOptions.StripSetuidBit && hdr.Mode&cISUID == cISUID {
+ hdr.Mode &^= cISUID
+ }
+ if req.PutOptions.StripSetgidBit && hdr.Mode&cISGID == cISGID {
+ hdr.Mode &^= cISGID
+ }
+ if req.PutOptions.StripStickyBit && hdr.Mode&cISVTX == cISVTX {
+ hdr.Mode &^= cISVTX
+ }
+ if hdr.Typeflag == tar.TypeDir {
+ if req.PutOptions.ChmodDirs != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodDirs)
+ }
+ } else {
+ if req.PutOptions.ChmodFiles != nil {
+ hdr.Mode = int64(*req.PutOptions.ChmodFiles)
+ }
+ }
+ // create the new item
+ devMajor := uint32(hdr.Devmajor)
+ devMinor := uint32(hdr.Devminor)
+ mode := os.FileMode(hdr.Mode) & os.ModePerm
+ switch hdr.Typeflag {
+ // no type flag for sockets
+ default:
+ return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
+ case tar.TypeReg:
+ var written int64
+ written, err = createFile(path, tr)
+ // only check the length if there wasn't an error, which we'll
+ // check along with errors for other types of entries
+ if err == nil && written != hdr.Size {
+ return fmt.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
+ }
+ case tar.TypeLink:
+ var linkTarget string
+ if _, ignoredTarget := ignoredItems[hdr.Linkname]; ignoredTarget {
+ // hard link to an ignored item: skip this, too
+ ignoredItems[nameBeforeRenaming] = struct{}{}
+ goto nextHeader
+ }
+ if req.PutOptions.Rename != nil {
+ hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
+ }
+ if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil {
+ return fmt.Errorf("resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
+ }
+ if err = os.Link(linkTarget, path); err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err := os.Lstat(path); err == nil && st.IsDir() {
+ break
+ }
+ }
+ if err = os.RemoveAll(path); err == nil {
+ err = os.Link(linkTarget, path)
+ }
+ }
+ case tar.TypeSymlink:
+ // if req.PutOptions.Rename != nil {
+ // todo: the general solution requires resolving to an absolute path, handling
+ // renaming, and then possibly converting back to a relative symlink
+ // }
+ if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err := os.Lstat(path); err == nil && st.IsDir() {
+ break
+ }
+ }
+ if err = os.RemoveAll(path); err == nil {
+ err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path))
+ }
+ }
+ case tar.TypeChar:
+ if req.PutOptions.IgnoreDevices {
+ ignoredItems[nameBeforeRenaming] = struct{}{}
+ goto nextHeader
+ }
+ if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err := os.Lstat(path); err == nil && st.IsDir() {
+ break
+ }
+ }
+ if err = os.RemoveAll(path); err == nil {
+ err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeBlock:
+ if req.PutOptions.IgnoreDevices {
+ ignoredItems[nameBeforeRenaming] = struct{}{}
+ goto nextHeader
+ }
+ if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err := os.Lstat(path); err == nil && st.IsDir() {
+ break
+ }
+ }
+ if err = os.RemoveAll(path); err == nil {
+ err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
+ }
+ }
+ case tar.TypeDir:
+ // FreeBSD can return EISDIR for "mkdir /":
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
+ if err = os.Mkdir(path, 0700); err != nil && (errors.Is(err, os.ErrExist) || errors.Is(err, syscall.EISDIR)) {
+ if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() {
+ if req.PutOptions.NoOverwriteNonDirDir {
+ break
+ }
+ if err = os.Remove(path); err == nil {
+ err = os.Mkdir(path, 0700)
+ }
+ } else {
+ err = stErr
+ }
+ // either we removed it and retried, or it was a directory,
+ // in which case we want to just add the new stuff under it
+ }
+ // make a note of the directory's times. we
+ // might create items under it, which will
+ // cause the mtime to change after we correct
+ // it, so we'll need to correct it again later
+ directoriesAndTimes = append(directoriesAndTimes, directoryAndTimes{
+ directory: path,
+ atime: hdr.AccessTime,
+ mtime: hdr.ModTime,
+ })
+ // set the mode here unconditionally, in case the directory is in
+ // the archive more than once for whatever reason
+ directoryModes[path] = mode
+ case tar.TypeFifo:
+ if err = mkfifo(path, 0600); err != nil && errors.Is(err, os.ErrExist) {
+ if req.PutOptions.NoOverwriteDirNonDir {
+ if st, err := os.Lstat(path); err == nil && st.IsDir() {
+ break
+ }
+ }
+ if err = os.RemoveAll(path); err == nil {
+ err = mkfifo(path, 0600)
+ }
+ }
+ case tar.TypeXGlobalHeader:
+ // Per archive/tar, PAX uses these to specify key=value information
+ // applies to all subsequent entries. The one in reported in #2717,
+ // https://www.openssl.org/source/openssl-1.1.1g.tar.gz, includes a
+ // comment=(40 byte hex string) at the start, possibly a digest.
+ // Don't try to create whatever path was used for the header.
+ goto nextHeader
+ }
+ // check for errors
+ if err != nil {
+ return fmt.Errorf("copier: put: error creating %q: %w", path, err)
+ }
+ // set ownership
+ if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
+ return fmt.Errorf("copier: put: error setting ownership of %q to %d:%d: %w", path, hdr.Uid, hdr.Gid, err)
+ }
+ // set permissions, except for symlinks, since we don't
+ // have an lchmod, and directories, which we'll fix up
+ // on our way out so that we don't get tripped up by
+ // directories which we're not supposed to be able to
+ // write to, but which we'll need to create content in
+ if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir {
+ if err = os.Chmod(path, mode); err != nil {
+ return fmt.Errorf("copier: put: error setting permissions on %q to 0%o: %w", path, mode, err)
+ }
+ }
+ // set other bits that might have been reset by chown()
+ if hdr.Typeflag != tar.TypeSymlink {
+ if hdr.Mode&cISUID == cISUID {
+ mode |= os.ModeSetuid
+ }
+ if hdr.Mode&cISGID == cISGID {
+ mode |= os.ModeSetgid
+ }
+ if hdr.Mode&cISVTX == cISVTX {
+ mode |= os.ModeSticky
+ }
+ if hdr.Typeflag == tar.TypeDir {
+ // if/when we do the final setting of permissions on this
+ // directory, make sure to incorporate these bits, too
+ directoryModes[path] = mode
+ }
+ if err = os.Chmod(path, mode); err != nil {
+ return fmt.Errorf("copier: put: setting additional permissions on %q to 0%o: %w", path, mode, err)
+ }
+ }
+ // set xattrs, including some that might have been reset by chown()
+ if !req.PutOptions.StripXattrs {
+ if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
+ if !req.PutOptions.IgnoreXattrErrors {
+ return fmt.Errorf("copier: put: error setting extended attributes on %q: %w", path, err)
+ }
+ }
+ }
+ // set time
+ if hdr.AccessTime.IsZero() || hdr.AccessTime.Before(hdr.ModTime) {
+ hdr.AccessTime = hdr.ModTime
+ }
+ if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return fmt.Errorf("setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err)
+ }
+ // set fflags if supported
+ if err := archive.WriteFileFlagsFromTarHeader(path, hdr); err != nil {
+ return fmt.Errorf("copier: put: error setting fflags on %q: %w", path, err)
+ }
+ nextHeader:
+ hdr, err = tr.Next()
+ }
+ if err != io.EOF {
+ return fmt.Errorf("reading tar stream: expected EOF: %w", err)
+ }
+ return nil
+ }
+ return &response{Error: "", Put: putResponse{}}, cb, nil
+}
+
+func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
+ errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
+ }
+ dirUID, dirGID := 0, 0
+ if req.MkdirOptions.ChownNew != nil {
+ dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID
+ }
+ dirMode := os.FileMode(0755)
+ if req.MkdirOptions.ChmodNew != nil {
+ dirMode = *req.MkdirOptions.ChmodNew
+ }
+ if idMappings != nil && !idMappings.Empty() {
+ containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
+ hostDirPair, err := idMappings.ToHost(containerDirPair)
+ if err != nil {
+ return errorResponse("copier: mkdir: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
+ }
+ dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
+ }
+
+ directory, err := resolvePath(req.Root, req.Directory, true, nil)
+ if err != nil {
+ return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err)
+ }
+
+ rel, err := convertToRelSubdirectory(req.Root, directory)
+ if err != nil {
+ return errorResponse("copier: mkdir: error computing path of %q relative to %q: %v", directory, req.Root, err)
+ }
+
+ subdir := ""
+ for _, component := range strings.Split(rel, string(os.PathSeparator)) {
+ subdir = filepath.Join(subdir, component)
+ path := filepath.Join(req.Root, subdir)
+ if err := os.Mkdir(path, 0700); err == nil {
+ if err = chown(path, dirUID, dirGID); err != nil {
+ return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err)
+ }
+ if err = chmod(path, dirMode); err != nil {
+ return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
+ }
+ } else {
+ // FreeBSD can return EISDIR for "mkdir /":
+ // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
+ if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) {
+ return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
+ }
+ }
+ }
+
+ return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
+}
+
+func copierHandlerRemove(req request) *response {
+ errorResponse := func(fmtspec string, args ...interface{}) *response {
+ return &response{Error: fmt.Sprintf(fmtspec, args...), Remove: removeResponse{}}
+ }
+ resolvedTarget, err := resolvePath(req.Root, req.Directory, false, nil)
+ if err != nil {
+ return errorResponse("copier: remove: %v", err)
+ }
+ if req.RemoveOptions.All {
+ err = os.RemoveAll(resolvedTarget)
+ } else {
+ err = os.Remove(resolvedTarget)
+ }
+ if err != nil {
+ return errorResponse("copier: remove %q: %v", req.Directory, err)
+ }
+ return &response{Error: "", Remove: removeResponse{}}
+}
diff --git a/copier/copier_linux_test.go b/copier/copier_linux_test.go
new file mode 100644
index 0000000..93a8a3a
--- /dev/null
+++ b/copier/copier_linux_test.go
@@ -0,0 +1,195 @@
+package copier
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/storage/pkg/mount"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/syndtr/gocapability/capability"
+ "golang.org/x/sys/unix"
+)
+
+func init() {
+ reexec.Register("get", getWrappedMain)
+}
+
+type getWrappedOptions struct {
+ Root, Directory string
+ GetOptions GetOptions
+ Globs []string
+ DropCaps []capability.Cap
+}
+
+func getWrapped(root string, directory string, getOptions GetOptions, globs []string, dropCaps []capability.Cap, bulkWriter io.Writer) error {
+ options := getWrappedOptions{
+ Root: root,
+ Directory: directory,
+ GetOptions: getOptions,
+ Globs: globs,
+ DropCaps: dropCaps,
+ }
+ encoded, err := json.Marshal(&options)
+ if err != nil {
+ return fmt.Errorf("marshalling options: %w", err)
+ }
+ cmd := reexec.Command("get")
+ cmd.Env = append(cmd.Env, "OPTIONS="+string(encoded))
+ cmd.Stdout = bulkWriter
+ stderrBuf := bytes.Buffer{}
+ cmd.Stderr = &stderrBuf
+ err = cmd.Run()
+ if stderrBuf.Len() > 0 {
+ if err != nil {
+ return fmt.Errorf("%v: %s", err, stderrBuf.String())
+ }
+ return fmt.Errorf("%s", stderrBuf.String())
+ }
+ return err
+}
+
+func getWrappedMain() {
+ var options getWrappedOptions
+ if err := json.Unmarshal([]byte(os.Getenv("OPTIONS")), &options); err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ os.Exit(1)
+ }
+ if len(options.DropCaps) > 0 {
+ caps, err := capability.NewPid2(0)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ os.Exit(1)
+ }
+ if err := caps.Load(); err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ os.Exit(1)
+ }
+ for _, capType := range []capability.CapType{
+ capability.AMBIENT,
+ capability.BOUNDING,
+ capability.INHERITABLE,
+ capability.PERMITTED,
+ capability.EFFECTIVE,
+ } {
+ for _, cap := range options.DropCaps {
+ if caps.Get(capType, cap) {
+ caps.Unset(capType, cap)
+ }
+ }
+ if err := caps.Apply(capType); err != nil {
+ fmt.Fprintf(os.Stderr, "error dropping capability %+v: %v", options.DropCaps, err)
+ os.Exit(1)
+ }
+ }
+ }
+ if err := Get(options.Root, options.Directory, options.GetOptions, options.Globs, os.Stdout); err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ os.Exit(1)
+ }
+}
+
+func TestGetPermissionErrorNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testGetPermissionError(t)
+ canChroot = couldChroot
+}
+
+func TestGetPermissionErrorChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skipf("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testGetPermissionError(t)
+ canChroot = couldChroot
+}
+
+func testGetPermissionError(t *testing.T) {
+ dropCaps := []capability.Cap{capability.CAP_DAC_OVERRIDE, capability.CAP_DAC_READ_SEARCH}
+ tmp := t.TempDir()
+ err := os.Mkdir(filepath.Join(tmp, "unreadable-directory"), 0000)
+ require.NoError(t, err, "error creating an unreadable directory")
+ err = os.Mkdir(filepath.Join(tmp, "readable-directory"), 0755)
+ require.NoError(t, err, "error creating a readable directory")
+ err = os.Mkdir(filepath.Join(tmp, "readable-directory", "unreadable-subdirectory"), 0000)
+ require.NoError(t, err, "error creating an unreadable subdirectory")
+ err = os.WriteFile(filepath.Join(tmp, "unreadable-file"), []byte("hi, i'm a file that you can't read"), 0000)
+ require.NoError(t, err, "error creating an unreadable file")
+ err = os.WriteFile(filepath.Join(tmp, "readable-file"), []byte("hi, i'm also a file, and you can read me"), 0644)
+ require.NoError(t, err, "error creating a readable file")
+ err = os.WriteFile(filepath.Join(tmp, "readable-directory", "unreadable-file"), []byte("hi, i'm also a file that you can't read"), 0000)
+ require.NoError(t, err, "error creating an unreadable file in a readable directory")
+ for _, ignore := range []bool{false, true} {
+ t.Run(fmt.Sprintf("ignore=%v", ignore), func(t *testing.T) {
+ var buf bytes.Buffer
+ err = getWrapped(tmp, tmp, GetOptions{IgnoreUnreadable: ignore}, []string{"."}, dropCaps, &buf)
+ if ignore {
+ assert.NoError(t, err, "expected no errors")
+ tr := tar.NewReader(&buf)
+ items := 0
+ _, err := tr.Next()
+ for err == nil {
+ items++
+ _, err = tr.Next()
+ }
+ assert.True(t, errors.Is(err, io.EOF), "expected EOF to finish read contents")
+ assert.Equalf(t, 2, items, "expected two readable items, got %d", items)
+ } else {
+ assert.Error(t, err, "expected an error")
+ assert.Truef(t, errorIsPermission(err), "expected the error (%v) to be a permission error", err)
+ }
+ })
+ }
+}
+
+func TestGetNoCrossDevice(t *testing.T) {
+ if uid != 0 {
+ t.Skip("test requires root privileges, skipping")
+ }
+
+ tmpdir := t.TempDir()
+
+ err := unix.Unshare(unix.CLONE_NEWNS)
+ require.NoError(t, err, "error creating new mount namespace")
+
+ subdir := filepath.Join(tmpdir, "subdir")
+ err = os.Mkdir(subdir, 0755)
+ require.NoErrorf(t, err, "error creating %q", subdir)
+
+ err = mount.Mount("tmpfs", subdir, "tmpfs", "rw")
+ require.NoErrorf(t, err, "error mounting tmpfs at %q", subdir)
+ defer func() {
+ err := mount.Unmount(subdir)
+ assert.NoErrorf(t, err, "error unmounting %q", subdir)
+ }()
+
+ skipped := filepath.Join(subdir, "skipped.txt")
+ err = os.WriteFile(skipped, []byte("this file should have been skipped\n"), 0644)
+ require.NoErrorf(t, err, "error writing file at %q", skipped)
+
+ var buf bytes.Buffer
+ err = Get(tmpdir, tmpdir, GetOptions{NoCrossDevice: true}, []string{"/"}, &buf) // grab contents of tmpdir
+ require.NoErrorf(t, err, "error reading contents at %q", tmpdir)
+
+ tr := tar.NewReader(&buf)
+ th, err := tr.Next() // should be the "subdir" directory
+ require.NoError(t, err, "error reading first entry archived")
+ assert.Equal(t, "subdir", th.Name, `first entry in archive was not named "subdir"`)
+
+ th, err = tr.Next()
+ assert.Error(t, err, "should not have gotten a second entry in archive")
+ assert.True(t, errors.Is(err, io.EOF), "expected an EOF trying to read a second entry in archive")
+ if err == nil {
+ t.Logf("got unexpected entry for %q", th.Name)
+ }
+}
diff --git a/copier/copier_test.go b/copier/copier_test.go
new file mode 100644
index 0000000..dc5c52b
--- /dev/null
+++ b/copier/copier_test.go
@@ -0,0 +1,1873 @@
+package copier
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ flag.Parse()
+ if testing.Verbose() {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+// makeFileContents creates contents for a file of a specified size
+func makeContents(length int64) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+ buffered := bufio.NewWriter(pipeWriter)
+ go func() {
+ count := int64(0)
+ for count < length {
+ if _, err := buffered.Write([]byte{"0123456789abcdef"[count%16]}); err != nil {
+ buffered.Flush()
+ pipeWriter.CloseWithError(err) // nolint:errcheck
+ return
+ }
+ count++
+ }
+ buffered.Flush()
+ pipeWriter.Close()
+ }()
+ return pipeReader
+}
+
+// makeArchiveSlice creates an archive from the set of headers and returns a byte slice.
+func makeArchiveSlice(headers []tar.Header) []byte {
+ rc := makeArchive(headers, nil)
+ defer rc.Close()
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, rc); err != nil {
+ panic("error creating in-memory archive")
+ }
+ return buf.Bytes()
+}
+
+// makeArchive creates an archive from the set of headers.
+func makeArchive(headers []tar.Header, contents map[string][]byte) io.ReadCloser {
+ if contents == nil {
+ contents = make(map[string][]byte)
+ }
+ pipeReader, pipeWriter := io.Pipe()
+ go func() {
+ var err error
+ buffered := bufio.NewWriter(pipeWriter)
+ tw := tar.NewWriter(buffered)
+ for _, header := range headers {
+ var fileContent []byte
+ switch header.Typeflag {
+ case tar.TypeLink, tar.TypeSymlink:
+ header.Size = 0
+ case tar.TypeReg:
+ fileContent = contents[header.Name]
+ if len(fileContent) != 0 {
+ header.Size = int64(len(fileContent))
+ }
+ }
+ if err = tw.WriteHeader(&header); err != nil {
+ break
+ }
+ if header.Typeflag == tar.TypeReg && header.Size > 0 {
+ var fileContents io.Reader
+ if len(fileContent) > 0 {
+ fileContents = bytes.NewReader(fileContent)
+ } else {
+ rc := makeContents(header.Size)
+ defer rc.Close()
+ fileContents = rc
+ }
+ if _, err = io.Copy(tw, fileContents); err != nil {
+ break
+ }
+ }
+ }
+ tw.Close()
+ buffered.Flush()
+ if err != nil {
+ pipeWriter.CloseWithError(err) // nolint:errcheck
+ } else {
+ pipeWriter.Close()
+ }
+ }()
+ return pipeReader
+}
+
+// makeContextFromArchive creates a temporary directory, and a subdirectory
+// inside of it, from an archive and returns its location. It can be removed
+// once it's no longer needed.
+func makeContextFromArchive(t *testing.T, archive io.ReadCloser, subdir string) (string, error) {
+ tmp := t.TempDir()
+ uidMap := []idtools.IDMap{{HostID: os.Getuid(), ContainerID: 0, Size: 1}}
+ gidMap := []idtools.IDMap{{HostID: os.Getgid(), ContainerID: 0, Size: 1}}
+ err := Put(tmp, path.Join(tmp, subdir), PutOptions{UIDMap: uidMap, GIDMap: gidMap}, archive)
+ archive.Close()
+ if err != nil {
+ return "", err
+ }
+ return tmp, err
+}
+
+// enumerateFiles walks a directory, returning the items it contains as a slice
+// of names relative to that directory.
+func enumerateFiles(directory string) ([]enumeratedFile, error) {
+ var results []enumeratedFile
+ err := filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
+ if info == nil || err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(directory, path)
+ if err != nil {
+ return err
+ }
+ if rel != "" && rel != "." {
+ results = append(results, enumeratedFile{
+ name: rel,
+ mode: info.Mode() & os.ModePerm,
+ isSymlink: info.Mode()&os.ModeSymlink == os.ModeSymlink,
+ date: info.ModTime().UTC().String(),
+ })
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return results, nil
+}
+
+type expectedError struct {
+ inSubdir bool
+ name string
+ err error
+}
+
+type enumeratedFile struct {
+ name string
+ mode os.FileMode
+ isSymlink bool
+ date string
+}
+
+var (
+ testDate = time.Unix(1485449953, 0)
+
+ uid = os.Getuid()
+
+ testArchiveSlice = makeArchiveSlice([]tar.Header{
+ {Name: "item-0", Typeflag: tar.TypeReg, Size: 123, Mode: 0600, ModTime: testDate},
+ {Name: "item-1", Typeflag: tar.TypeReg, Size: 456, Mode: 0600, ModTime: testDate},
+ {Name: "item-2", Typeflag: tar.TypeReg, Size: 789, Mode: 0600, ModTime: testDate},
+ })
+
+ testArchives = []struct {
+ name string
+ rootOnly bool
+ headers []tar.Header
+ contents map[string][]byte
+ excludes []string
+ expectedGetErrors []expectedError
+ subdirContents map[string][]string
+ renames []struct {
+ name string
+ renames map[string]string
+ expected []string
+ }
+ }{
+ {
+ name: "regular",
+ rootOnly: false,
+ headers: []tar.Header{
+ {Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600, ModTime: testDate},
+ {Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "file-c", Typeflag: tar.TypeLink, Linkname: "file-a", Mode: 0600, ModTime: testDate},
+ {Name: "file-u", Typeflag: tar.TypeReg, Size: 23, Mode: cISUID | 0755, ModTime: testDate},
+ {Name: "file-g", Typeflag: tar.TypeReg, Size: 23, Mode: cISGID | 0755, ModTime: testDate},
+ {Name: "file-t", Typeflag: tar.TypeReg, Size: 23, Mode: cISVTX | 0755, ModTime: testDate},
+ {Name: "link-0", Typeflag: tar.TypeSymlink, Linkname: "../file-0", Size: 123456789, Mode: 0777, ModTime: testDate},
+ {Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0777, ModTime: testDate},
+ {Name: "link-b", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
+ {Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
+ {Name: "hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "hlink-b", Typeflag: tar.TypeLink, Linkname: "../file-b", Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
+ {Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660, ModTime: testDate},
+ {Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 34, Mode: 0660, ModTime: testDate},
+ {Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
+ {Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0777, ModTime: testDate},
+ {Name: "subdir-a/file-c", Typeflag: tar.TypeSymlink, Linkname: "/file-c", Size: 23, Mode: 0777, ModTime: testDate},
+ {Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
+ {Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660, ModTime: testDate},
+ {Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660, ModTime: testDate},
+ {Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
+ {Name: "subdir-c/file-n", Typeflag: tar.TypeReg, Size: 432, Mode: 0666, ModTime: testDate},
+ {Name: "subdir-c/file-o", Typeflag: tar.TypeReg, Size: 56, Mode: 0666, ModTime: testDate},
+ {Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
+ {Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
+ {Name: "subdir-d/hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "subdir-d/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600, ModTime: testDate},
+ {Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
+ {Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0500, ModTime: testDate},
+ {Name: "subdir-e/file-p", Typeflag: tar.TypeReg, Size: 890, Mode: 0600, ModTime: testDate},
+ },
+ contents: map[string][]byte{
+ "archive-a": testArchiveSlice,
+ },
+ expectedGetErrors: []expectedError{
+ {inSubdir: false, name: "link-0", err: syscall.ENOENT},
+ {inSubdir: false, name: "link-b", err: syscall.ENOENT},
+ {inSubdir: false, name: "subdir-a/file-b", err: syscall.ENOENT},
+ {inSubdir: true, name: "link-0", err: syscall.ENOENT},
+ {inSubdir: true, name: "link-b", err: syscall.ENOENT},
+ {inSubdir: true, name: "subdir-a/file-b", err: syscall.ENOENT},
+ {inSubdir: true, name: "subdir-a/file-c", err: syscall.ENOENT},
+ },
+ renames: []struct {
+ name string
+ renames map[string]string
+ expected []string
+ }{
+ {
+ name: "no-match-dir",
+ renames: map[string]string{"subdir-z": "subdir-y"},
+ expected: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "file-c",
+ "file-u",
+ "file-g",
+ "file-t",
+ "link-0",
+ "link-a",
+ "link-b",
+ "hlink-0",
+ "hlink-a",
+ "hlink-b",
+ "subdir-a",
+ "subdir-a/file-n",
+ "subdir-a/file-o",
+ "subdir-a/file-a",
+ "subdir-a/file-b",
+ "subdir-a/file-c",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-n",
+ "subdir-c/file-o",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-d/hlink-a",
+ "subdir-d/hlink-b",
+ "subdir-e",
+ "subdir-e/file-p",
+ "archive-a",
+ },
+ },
+ {
+ name: "no-match-file",
+ renames: map[string]string{"file-n": "file-z"},
+ expected: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "file-c",
+ "file-u",
+ "file-g",
+ "file-t",
+ "link-0",
+ "link-a",
+ "link-b",
+ "hlink-0",
+ "hlink-a",
+ "hlink-b",
+ "subdir-a",
+ "subdir-a/file-n",
+ "subdir-a/file-o",
+ "subdir-a/file-a",
+ "subdir-a/file-b",
+ "subdir-a/file-c",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-n",
+ "subdir-c/file-o",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-d/hlink-a",
+ "subdir-d/hlink-b",
+ "subdir-e",
+ "subdir-e/file-p",
+ "archive-a",
+ },
+ },
+ {
+ name: "directory",
+ renames: map[string]string{"subdir-a": "subdir-z"},
+ expected: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "file-c",
+ "file-u",
+ "file-g",
+ "file-t",
+ "link-0",
+ "link-a",
+ "link-b",
+ "hlink-0",
+ "hlink-a",
+ "hlink-b",
+ "subdir-z",
+ "subdir-z/file-n",
+ "subdir-z/file-o",
+ "subdir-z/file-a",
+ "subdir-z/file-b",
+ "subdir-z/file-c",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-n",
+ "subdir-c/file-o",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-d/hlink-a",
+ "subdir-d/hlink-b",
+ "subdir-e",
+ "subdir-e/file-p",
+ "archive-a",
+ },
+ },
+ {
+ name: "file-in-directory",
+ renames: map[string]string{"subdir-a/file-n": "subdir-a/file-z"},
+ expected: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "file-c",
+ "file-u",
+ "file-g",
+ "file-t",
+ "link-0",
+ "link-a",
+ "link-b",
+ "hlink-0",
+ "hlink-a",
+ "hlink-b",
+ "subdir-a",
+ "subdir-a/file-z",
+ "subdir-a/file-o",
+ "subdir-a/file-a",
+ "subdir-a/file-b",
+ "subdir-a/file-c",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-n",
+ "subdir-c/file-o",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-d/hlink-a",
+ "subdir-d/hlink-b",
+ "subdir-e",
+ "subdir-e/file-p",
+ "archive-a",
+ },
+ },
+ },
+ },
+ {
+ name: "devices",
+ rootOnly: true,
+ headers: []tar.Header{
+ {Name: "char-dev", Typeflag: tar.TypeChar, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
+ {Name: "blk-dev", Typeflag: tar.TypeBlock, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
+ },
+ },
+ }
+)
+
+func TestPutNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testPut(t)
+ canChroot = couldChroot
+}
+
+func testPut(t *testing.T) {
+ uidMap := []idtools.IDMap{{HostID: os.Getuid(), ContainerID: 0, Size: 1}}
+ gidMap := []idtools.IDMap{{HostID: os.Getgid(), ContainerID: 0, Size: 1}}
+
+ for i := range testArchives {
+ for _, topdir := range []string{"", ".", "top"} {
+ t.Run(fmt.Sprintf("archive=%s,topdir=%s", testArchives[i].name, topdir), func(t *testing.T) {
+ if uid != 0 && testArchives[i].rootOnly {
+ t.Skipf("test archive %q can only be tested with root privileges, skipping", testArchives[i].name)
+ }
+
+ dir, err := makeContextFromArchive(t, makeArchive(testArchives[i].headers, testArchives[i].contents), topdir)
+ require.NoErrorf(t, err, "error creating context from archive %q, topdir=%q", testArchives[i].name, topdir)
+
+ // enumerate what we expect to have created
+ expected := make([]enumeratedFile, 0, len(testArchives[i].headers)+1)
+ if topdir != "" && topdir != "." {
+ info, err := os.Stat(filepath.Join(dir, topdir))
+ require.NoErrorf(t, err, "error statting directory %q", filepath.Join(dir, topdir))
+ expected = append(expected, enumeratedFile{
+ name: filepath.FromSlash(topdir),
+ mode: info.Mode() & os.ModePerm,
+ isSymlink: info.Mode()&os.ModeSymlink == os.ModeSymlink,
+ date: info.ModTime().UTC().String(),
+ })
+ }
+ for _, hdr := range testArchives[i].headers {
+ expected = append(expected, enumeratedFile{
+ name: filepath.Join(filepath.FromSlash(topdir), filepath.FromSlash(hdr.Name)),
+ mode: os.FileMode(hdr.Mode) & os.ModePerm,
+ isSymlink: hdr.Typeflag == tar.TypeSymlink,
+ date: hdr.ModTime.UTC().String(),
+ })
+ }
+ sort.Slice(expected, func(i, j int) bool { return strings.Compare(expected[i].name, expected[j].name) < 0 })
+
+ // enumerate what we actually created
+ fileList, err := enumerateFiles(dir)
+ require.NoErrorf(t, err, "error walking context directory for archive %q, topdir=%q", testArchives[i].name, topdir)
+ sort.Slice(fileList, func(i, j int) bool { return strings.Compare(fileList[i].name, fileList[j].name) < 0 })
+
+ // make sure they're the same
+ moddedEnumeratedFiles := func(enumerated []enumeratedFile) []enumeratedFile {
+ m := make([]enumeratedFile, 0, len(enumerated))
+ for i := range enumerated {
+ e := enumeratedFile{
+ name: enumerated[i].name,
+ mode: os.FileMode(int64(enumerated[i].mode) & testModeMask),
+ isSymlink: enumerated[i].isSymlink,
+ date: enumerated[i].date,
+ }
+ if testIgnoreSymlinkDates && e.isSymlink {
+ e.date = ""
+ }
+ m = append(m, e)
+ }
+ return m
+ }
+ if !reflect.DeepEqual(expected, fileList) && reflect.DeepEqual(moddedEnumeratedFiles(expected), moddedEnumeratedFiles(fileList)) {
+ logrus.Warn("chmod() lost some bits and possibly timestamps on symlinks, otherwise we match the source archive")
+ } else {
+ require.Equal(t, expected, fileList, "list of files in context directory for archive %q under topdir %q should match the archived used to populate it", testArchives[i].name, topdir)
+ }
+ })
+ }
+
+ for _, renames := range testArchives[i].renames {
+ t.Run(fmt.Sprintf("archive=%s,rename=%s", testArchives[i].name, renames.name), func(t *testing.T) {
+ if uid != 0 && testArchives[i].rootOnly {
+ t.Skipf("test archive %q can only be tested with root privileges, skipping", testArchives[i].name)
+ }
+
+ tmp := t.TempDir()
+
+ archive := makeArchive(testArchives[i].headers, testArchives[i].contents)
+ err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, Rename: renames.renames}, archive)
+ require.NoErrorf(t, err, "error extracting archive %q to directory %q", testArchives[i].name, tmp)
+
+ var found []string
+ err = filepath.WalkDir(tmp, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(tmp, path)
+ if err != nil {
+ return err
+ }
+ if rel == "." {
+ return nil
+ }
+ found = append(found, rel)
+ return nil
+ })
+ require.NoErrorf(t, err, "error walking context directory for archive %q under %q", testArchives[i].name, tmp)
+ sort.Strings(found)
+
+ expected := renames.expected
+ sort.Strings(expected)
+ assert.Equal(t, expected, found, "renaming did not work as expected")
+ })
+ }
+ }
+
+ // Overwrite directory
+ for _, overwrite := range []bool{false, true} {
+ for _, typeFlag := range []byte{tar.TypeReg, tar.TypeLink, tar.TypeSymlink, tar.TypeChar, tar.TypeBlock, tar.TypeFifo} {
+ t.Run(fmt.Sprintf("overwrite (dir)=%v,type=%c", overwrite, typeFlag), func(t *testing.T) {
+ archive := makeArchiveSlice([]tar.Header{
+ {Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0755, ModTime: testDate},
+ {Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0755, ModTime: testDate},
+ {Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0755, ModTime: testDate},
+ {Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0755, Linkname: "target", ModTime: testDate},
+ })
+ tmp := t.TempDir()
+ err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, NoOverwriteDirNonDir: !overwrite}, bytes.NewReader(archive))
+ if overwrite {
+ if !errors.Is(err, syscall.EPERM) {
+ assert.Nilf(t, err, "expected to overwrite directory with type %c: %v", typeFlag, err)
+ }
+ } else {
+ assert.Errorf(t, err, "expected an error trying to overwrite directory with type %c", typeFlag)
+ }
+ })
+ }
+ }
+
+ // Overwrite non-directory
+ for _, overwrite := range []bool{false, true} {
+ for _, typeFlag := range []byte{tar.TypeReg, tar.TypeLink, tar.TypeSymlink, tar.TypeChar, tar.TypeBlock, tar.TypeFifo} {
+ t.Run(fmt.Sprintf("overwrite (non-dir)=%v,type=%c", overwrite, typeFlag), func(t *testing.T) {
+ archive := makeArchiveSlice([]tar.Header{
+ {Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
+ {Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0755, ModTime: testDate},
+ {Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0755, Linkname: "target", ModTime: testDate},
+ {Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0755, ModTime: testDate},
+ {Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0755, ModTime: testDate},
+ })
+ tmp := t.TempDir()
+ err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, NoOverwriteNonDirDir: !overwrite}, bytes.NewReader(archive))
+ if overwrite {
+ if !errors.Is(err, syscall.EPERM) {
+ assert.Nilf(t, err, "expected to overwrite file with type %c: %v", typeFlag, err)
+ }
+ } else {
+ assert.Errorf(t, err, "expected an error trying to overwrite file of type %c", typeFlag)
+ }
+ })
+ }
+ }
+ for _, ignoreDevices := range []bool{false, true} {
+ for _, typeFlag := range []byte{tar.TypeChar, tar.TypeBlock} {
+ t.Run(fmt.Sprintf("ignoreDevices=%v,type=%c", ignoreDevices, typeFlag), func(t *testing.T) {
+ if uid != 0 && !ignoreDevices {
+ t.Skip("can only test !IgnoreDevices with root privileges, skipping")
+ }
+ archive := makeArchiveSlice([]tar.Header{
+ {Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0600, ModTime: testDate, Devmajor: 0, Devminor: 0},
+ {Name: "link", Typeflag: tar.TypeLink, Size: 0, Mode: 0600, ModTime: testDate, Linkname: "test"},
+ {Name: "unrelated", Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
+ })
+ tmp := t.TempDir()
+ err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, IgnoreDevices: ignoreDevices}, bytes.NewReader(archive))
+ require.Nilf(t, err, "expected to extract content with typeflag %c without an error: %v", typeFlag, err)
+ fileList, err := enumerateFiles(tmp)
+ require.Nilf(t, err, "unexpected error scanning the contents of extraction directory for typeflag %c: %v", typeFlag, err)
+ expectedItems := 3
+ if ignoreDevices {
+ expectedItems = 1
+ }
+ require.Equalf(t, expectedItems, len(fileList), "didn't extract as many things as expected for typeflag %c", typeFlag)
+ })
+ }
+ }
+
+ for _, stripSetuidBit := range []bool{false, true} {
+ for _, stripSetgidBit := range []bool{false, true} {
+ for _, stripStickyBit := range []bool{false, true} {
+ t.Run(fmt.Sprintf("stripSetuidBit=%v,stripSetgidBit=%v,stripStickyBit=%v", stripSetuidBit, stripSetgidBit, stripStickyBit), func(t *testing.T) {
+ mode := int64(0o700) | cISUID | cISGID | cISVTX
+ archive := makeArchiveSlice([]tar.Header{
+ {Name: "test", Typeflag: tar.TypeReg, Size: 0, Mode: mode, ModTime: testDate},
+ })
+ tmp := t.TempDir()
+ putOptions := PutOptions{
+ UIDMap: uidMap,
+ GIDMap: gidMap,
+ StripSetuidBit: stripSetuidBit,
+ StripSetgidBit: stripSetgidBit,
+ StripStickyBit: stripStickyBit,
+ }
+ err := Put(tmp, tmp, putOptions, bytes.NewReader(archive))
+ require.Nilf(t, err, "unexpected error writing sample file", err)
+ st, err := os.Stat(filepath.Join(tmp, "test"))
+ require.Nilf(t, err, "unexpected error checking permissions of file", err)
+ assert.Equalf(t, stripSetuidBit, st.Mode()&os.ModeSetuid == 0, "setuid bit was not set/stripped correctly")
+ assert.Equalf(t, stripSetgidBit, st.Mode()&os.ModeSetgid == 0, "setgid bit was not set/stripped correctly")
+ assert.Equalf(t, stripStickyBit, st.Mode()&os.ModeSticky == 0, "sticky bit was not set/stripped correctly")
+ })
+ }
+ }
+ }
+}
+
+func isExpectedError(err error, inSubdir bool, name string, expectedErrors []expectedError) bool {
+ // if we couldn't read that content, check if it's one of the expected failures
+ for _, expectedError := range expectedErrors {
+ if expectedError.inSubdir != inSubdir {
+ continue
+ }
+ if expectedError.name != name {
+ continue
+ }
+ if !strings.Contains(err.Error(), expectedError.err.Error()) {
+ // not expecting this specific error
+ continue
+ }
+ // it's an expected failure
+ return true
+ }
+ return false
+}
+
+func TestStatNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testStat(t)
+ canChroot = couldChroot
+}
+
+func testStat(t *testing.T) {
+ for _, absolute := range []bool{false, true} {
+ for _, topdir := range []string{"", ".", "top"} {
+ for _, testArchive := range testArchives {
+ if uid != 0 && testArchive.rootOnly {
+ t.Logf("test archive %q can only be tested with root privileges, skipping", testArchive.name)
+ continue
+ }
+
+ dir, err := makeContextFromArchive(t, makeArchive(testArchive.headers, testArchive.contents), topdir)
+ require.NoErrorf(t, err, "error creating context from archive %q", testArchive.name)
+
+ root := dir
+
+ for _, testItem := range testArchive.headers {
+ name := filepath.FromSlash(testItem.Name)
+ if absolute {
+ name = filepath.Join(root, topdir, name)
+ }
+ t.Run(fmt.Sprintf("absolute=%t,topdir=%s,archive=%s,item=%s", absolute, topdir, testArchive.name, name), func(t *testing.T) {
+ // read stats about this item
+ var excludes []string
+ for _, exclude := range testArchive.excludes {
+ excludes = append(excludes, filepath.FromSlash(exclude))
+ }
+ options := StatOptions{
+ CheckForArchives: false,
+ Excludes: excludes,
+ }
+ stats, err := Stat(root, topdir, options, []string{name})
+ require.NoErrorf(t, err, "error statting %q: %v", name, err)
+ for _, st := range stats {
+ // should not have gotten an error
+ require.Emptyf(t, st.Error, "expected no error from stat %q", st.Glob)
+ // no matching characters -> should have matched one item
+ require.NotEmptyf(t, st.Globbed, "expected at least one match on glob %q", st.Glob)
+ matches := 0
+ for _, glob := range st.Globbed {
+ matches++
+ require.Equal(t, st.Glob, glob, "expected entry for %q", st.Glob)
+ require.NotNil(t, st.Results[glob], "%q globbed %q, but there are no results for it", st.Glob, glob)
+ toStat := glob
+ if !absolute {
+ toStat = filepath.Join(root, topdir, name)
+ }
+ _, err = os.Lstat(toStat)
+ require.NoErrorf(t, err, "got error on lstat() of returned value %q(%q(%q)): %v", toStat, glob, name, err)
+ result := st.Results[glob]
+
+ switch testItem.Typeflag {
+ case tar.TypeReg:
+ if actualContent, ok := testArchive.contents[testItem.Name]; ok {
+ testItem.Size = int64(len(actualContent))
+ }
+ require.Equal(t, testItem.Size, result.Size, "unexpected size difference for %q", name)
+ require.True(t, result.IsRegular, "expected %q.IsRegular to be true", glob)
+ require.False(t, result.IsDir, "expected %q.IsDir to be false", glob)
+ require.False(t, result.IsSymlink, "expected %q.IsSymlink to be false", glob)
+ case tar.TypeDir:
+ require.False(t, result.IsRegular, "expected %q.IsRegular to be false", glob)
+ require.True(t, result.IsDir, "expected %q.IsDir to be true", glob)
+ require.False(t, result.IsSymlink, "expected %q.IsSymlink to be false", glob)
+ case tar.TypeSymlink:
+ require.True(t, result.IsSymlink, "%q is supposed to be a symbolic link, but is not", name)
+ require.Equal(t, filepath.FromSlash(testItem.Linkname), result.ImmediateTarget, "%q is supposed to point to %q, but points to %q", glob, testItem.Linkname, result.ImmediateTarget)
+ case tar.TypeBlock, tar.TypeChar:
+ require.False(t, result.IsRegular, "%q is a regular file, but is not supposed to be", name)
+ require.False(t, result.IsDir, "%q is a directory, but is not supposed to be", name)
+ require.False(t, result.IsSymlink, "%q is not supposed to be a symbolic link, but appears to be one", name)
+ }
+ }
+ require.Equal(t, 1, matches, "non-glob %q matched %d items, not exactly one", name, matches)
+ }
+ })
+ }
+ }
+ }
+ }
+}
+
+func TestGetSingleNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testGetSingle(t)
+ canChroot = couldChroot
+}
+
+func testGetSingle(t *testing.T) {
+ for _, absolute := range []bool{false, true} {
+ for _, topdir := range []string{"", ".", "top"} {
+ for _, testArchive := range testArchives {
+ var excludes []string
+ for _, exclude := range testArchive.excludes {
+ excludes = append(excludes, filepath.FromSlash(exclude))
+ }
+
+ getOptions := GetOptions{
+ Excludes: excludes,
+ ExpandArchives: false,
+ }
+
+ if uid != 0 && testArchive.rootOnly {
+ t.Logf("test archive %q can only be tested with root privileges, skipping", testArchive.name)
+ continue
+ }
+
+ dir, err := makeContextFromArchive(t, makeArchive(testArchive.headers, testArchive.contents), topdir)
+ require.NoErrorf(t, err, "error creating context from archive %q", testArchive.name)
+
+ root := dir
+
+ for _, testItem := range testArchive.headers {
+ name := filepath.FromSlash(testItem.Name)
+ if absolute {
+ name = filepath.Join(root, topdir, name)
+ }
+ t.Run(fmt.Sprintf("absolute=%t,topdir=%s,archive=%s,item=%s", absolute, topdir, testArchive.name, name), func(t *testing.T) {
+ // check if we can get this one item
+ err := Get(root, topdir, getOptions, []string{name}, io.Discard)
+ // if we couldn't read that content, check if it's one of the expected failures
+ if err != nil && isExpectedError(err, topdir != "" && topdir != ".", testItem.Name, testArchive.expectedGetErrors) {
+ return
+ }
+ require.NoErrorf(t, err, "error getting %q under %q", name, filepath.Join(root, topdir))
+ // we'll check subdirectories later
+ if testItem.Typeflag == tar.TypeDir {
+ return
+ }
+ // check what we get when we get this one item
+ pipeReader, pipeWriter := io.Pipe()
+ var getErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ getErr = Get(root, topdir, getOptions, []string{name}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ tr := tar.NewReader(pipeReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ assert.Equal(t, filepath.Base(name), filepath.FromSlash(hdr.Name), "expected item named %q, got %q", filepath.Base(name), filepath.FromSlash(hdr.Name))
+ hdr, err = tr.Next()
+ }
+ assert.Equal(t, io.EOF.Error(), err.Error(), "expected EOF at end of archive, got %q", err.Error())
+ if !t.Failed() && testItem.Typeflag == tar.TypeReg && testItem.Mode&(cISUID|cISGID|cISVTX) != 0 {
+ for _, stripSetuidBit := range []bool{false, true} {
+ for _, stripSetgidBit := range []bool{false, true} {
+ for _, stripStickyBit := range []bool{false, true} {
+ t.Run(fmt.Sprintf("absolute=%t,topdir=%s,archive=%s,item=%s,strip_setuid=%t,strip_setgid=%t,strip_sticky=%t", absolute, topdir, testArchive.name, name, stripSetuidBit, stripSetgidBit, stripStickyBit), func(t *testing.T) {
+ var getErr error
+ var wg sync.WaitGroup
+ getOptions := getOptions
+ getOptions.StripSetuidBit = stripSetuidBit
+ getOptions.StripSetgidBit = stripSetgidBit
+ getOptions.StripStickyBit = stripStickyBit
+ pipeReader, pipeWriter := io.Pipe()
+ wg.Add(1)
+ go func() {
+ getErr = Get(root, topdir, getOptions, []string{name}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ tr := tar.NewReader(pipeReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ expectedMode := testItem.Mode
+ modifier := ""
+ if stripSetuidBit {
+ expectedMode &^= cISUID
+ modifier += "(with setuid bit stripped) "
+ }
+ if stripSetgidBit {
+ expectedMode &^= cISGID
+ modifier += "(with setgid bit stripped) "
+ }
+ if stripStickyBit {
+ expectedMode &^= cISVTX
+ modifier += "(with sticky bit stripped) "
+ }
+ if expectedMode != hdr.Mode && expectedMode&testModeMask == hdr.Mode&testModeMask {
+ logrus.Warnf("chmod() lost some bits: expected 0%o, got 0%o", expectedMode, hdr.Mode)
+ } else {
+ assert.Equal(t, expectedMode, hdr.Mode, "expected item named %q %sto have mode 0%o, got 0%o", hdr.Name, modifier, expectedMode, hdr.Mode)
+ }
+ hdr, err = tr.Next()
+ }
+ assert.Equal(t, io.EOF.Error(), err.Error(), "expected EOF at end of archive, got %q", err.Error())
+ wg.Wait()
+ assert.NoErrorf(t, getErr, "unexpected error from Get(%q): %v", name, getErr)
+ pipeReader.Close()
+ })
+ }
+ }
+ }
+ }
+
+ wg.Wait()
+ assert.NoErrorf(t, getErr, "unexpected error from Get(%q): %v", name, getErr)
+ pipeReader.Close()
+ })
+ }
+ }
+ }
+ }
+}
+
+func TestGetMultipleNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testGetMultiple(t)
+ canChroot = couldChroot
+}
+
+func testGetMultiple(t *testing.T) {
+ type getTestArchiveCase struct {
+ name string
+ pattern string
+ exclude []string
+ items []string
+ expandArchives bool
+ stripSetuidBit bool
+ stripSetgidBit bool
+ stripStickyBit bool
+ stripXattrs bool
+ keepDirectoryNames bool
+ renames map[string]string
+ noDerefSymlinks bool
+ }
+ var getTestArchives = []struct {
+ name string
+ headers []tar.Header
+ contents map[string][]byte
+ cases []getTestArchiveCase
+ expectedGetErrors []expectedError
+ }{
+ {
+ name: "regular",
+ headers: []tar.Header{
+ {Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600},
+ {Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
+ {Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
+ {Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0600},
+ {Name: "link-c", Typeflag: tar.TypeSymlink, Linkname: "subdir-c", Mode: 0700, ModTime: testDate},
+ {Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600},
+ {Name: "non-archive-a", Typeflag: tar.TypeReg, Size: 1199, Mode: 0600},
+ {Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600},
+ {Name: "something-a", Typeflag: tar.TypeReg, Size: 34, Mode: 0600},
+ {Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660},
+ {Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660},
+ {Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0600},
+ {Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0600},
+ {Name: "subdir-a/file-c", Typeflag: tar.TypeReg, Size: 56, Mode: 0600},
+ {Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660},
+ {Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 67, Mode: 0660},
+ {Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-c/file-p", Typeflag: tar.TypeReg, Size: 432, Mode: 0666},
+ {Name: "subdir-c/file-q", Typeflag: tar.TypeReg, Size: 78, Mode: 0666},
+ {Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600},
+ {Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0700},
+ {Name: "subdir-e/subdir-f/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600},
+ },
+ contents: map[string][]byte{
+ "archive-a": testArchiveSlice,
+ },
+ expectedGetErrors: []expectedError{
+ {inSubdir: true, name: ".", err: syscall.ENOENT},
+ {inSubdir: true, name: "/subdir-b/*", err: syscall.ENOENT},
+ {inSubdir: true, name: "../../subdir-b/*", err: syscall.ENOENT},
+ },
+ cases: []getTestArchiveCase{
+ {
+ name: "everything",
+ pattern: ".",
+ items: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "link-a",
+ "link-c",
+ "hlink-0",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "subdir-a",
+ "subdir-a/file-n",
+ "subdir-a/file-o",
+ "subdir-a/file-a",
+ "subdir-a/file-b",
+ "subdir-a/file-c",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-p",
+ "subdir-c/file-q",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-e",
+ "subdir-e/subdir-f",
+ "subdir-e/subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "wildcard",
+ pattern: "*",
+ items: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "link-a",
+ "hlink-0",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "file-n", // from subdir-a
+ "file-o", // from subdir-a
+ "file-a", // from subdir-a
+ "file-b", // from subdir-a
+ "file-c", // from subdir-a
+ "file-n", // from subdir-b
+ "file-o", // from subdir-b
+ "file-p", // from subdir-c
+ "file-p", // from link-c -> subdir-c
+ "file-q", // from subdir-c
+ "file-q", // from link-c -> subdir-c
+ "hlink-0", // from subdir-d
+ "subdir-f", // from subdir-e
+ "subdir-f/hlink-b", // from subdir-e
+ },
+ },
+ {
+ name: "dot-with-wildcard-includes-and-excludes",
+ pattern: ".",
+ exclude: []string{"**/*-a", "!**/*-c"},
+ items: []string{
+ "file-0",
+ "file-b",
+ "link-c",
+ "hlink-0",
+ // "subdir-a/file-c", // strings.HasPrefix("**/*-c", "subdir-a/") is false
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-p",
+ "subdir-c/file-q",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-e",
+ "subdir-e/subdir-f",
+ "subdir-e/subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "everything-with-wildcard-includes-and-excludes",
+ pattern: "*",
+ exclude: []string{"**/*-a", "!**/*-c"},
+ items: []string{
+ "file-0",
+ "file-b",
+ "file-c",
+ "file-n",
+ "file-o",
+ "file-p", // from subdir-c
+ "file-p", // from link-c -> subdir-c
+ "file-q", // from subdir-c
+ "file-q", // from link-c -> subdir-c
+ "hlink-0",
+ "hlink-0",
+ "subdir-f",
+ "subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "dot-with-dot-exclude",
+ pattern: ".",
+ exclude: []string{".", "!**/*-c"},
+ items: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ "link-a",
+ "link-c",
+ "hlink-0",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "subdir-a",
+ "subdir-a/file-a",
+ "subdir-a/file-b",
+ "subdir-a/file-c",
+ "subdir-a/file-n",
+ "subdir-a/file-o",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-c",
+ "subdir-c/file-p",
+ "subdir-c/file-q",
+ "subdir-d",
+ "subdir-d/hlink-0",
+ "subdir-e",
+ "subdir-e/subdir-f",
+ "subdir-e/subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "everything-with-dot-exclude",
+ pattern: "*",
+ exclude: []string{".", "!**/*-c"},
+ items: []string{
+ "file-0",
+ "file-a",
+ "file-a",
+ "file-b",
+ "file-b",
+ "file-c",
+ "file-n",
+ "file-n",
+ "file-o",
+ "file-o",
+ "file-p",
+ "file-p",
+ "file-q",
+ "file-q",
+ "hlink-0",
+ "hlink-0",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "subdir-f",
+ "subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "all-with-all-exclude",
+ pattern: "*",
+ exclude: []string{"*", "!**/*-c"},
+ items: []string{
+ "file-c",
+ "file-p",
+ "file-p",
+ "file-q",
+ "file-q",
+ },
+ },
+ {
+ name: "everything-with-all-exclude",
+ pattern: ".",
+ exclude: []string{"*", "!**/*-c"},
+ items: []string{
+ // "subdir-a/file-c", // strings.HasPrefix("**/*-c", "subdir-a/") is false
+ "link-c",
+ "subdir-c",
+ "subdir-c/file-p",
+ "subdir-c/file-q",
+ },
+ },
+ {
+ name: "file-wildcard",
+ pattern: "file-*",
+ items: []string{
+ "file-0",
+ "file-a",
+ "file-b",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard",
+ pattern: "*-a",
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "file-n", // from subdir-a
+ "file-o", // from subdir-a
+ "file-a", // from subdir-a
+ "file-b", // from subdir-a
+ "file-c", // from subdir-a
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-exclude",
+ pattern: "*-a",
+ exclude: []string{"subdir-a", "top/subdir-a"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-wildcard-exclude",
+ pattern: "*-a",
+ exclude: []string{"subdir*", "top/subdir*"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-deep-exclude",
+ pattern: "*-a",
+ exclude: []string{"**/subdir-a"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-wildcard-deep-exclude",
+ pattern: "*-a",
+ exclude: []string{"**/subdir*"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-deep-include",
+ pattern: "*-a",
+ exclude: []string{"**/subdir-a", "!**/file-c"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "file-c",
+ },
+ },
+ {
+ name: "file-and-dir-wildcard-with-wildcard-deep-include",
+ pattern: "*-a",
+ exclude: []string{"**/subdir*", "!**/file-c"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "something-a",
+ "archive-a",
+ "non-archive-a",
+ "file-c",
+ },
+ },
+ {
+ name: "subdirectory",
+ pattern: "subdir-e",
+ items: []string{
+ "subdir-f",
+ "subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "subdirectory-wildcard",
+ pattern: "*/subdir-*",
+ items: []string{
+ "hlink-b", // from subdir-e/subdir-f
+ },
+ },
+ {
+ name: "not-expanded-archive",
+ pattern: "*archive-a",
+ items: []string{
+ "archive-a",
+ "non-archive-a",
+ },
+ },
+ {
+ name: "expanded-archive",
+ pattern: "*archive-a",
+ expandArchives: true,
+ items: []string{
+ "non-archive-a",
+ "item-0",
+ "item-1",
+ "item-2",
+ },
+ },
+ {
+ name: "subdir-without-name",
+ pattern: "subdir-e",
+ items: []string{
+ "subdir-f",
+ "subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "subdir-with-name",
+ pattern: "subdir-e",
+ keepDirectoryNames: true,
+ items: []string{
+ "subdir-e",
+ "subdir-e/subdir-f",
+ "subdir-e/subdir-f/hlink-b",
+ },
+ },
+ {
+ name: "root-wildcard",
+ pattern: "/subdir-b/*",
+ keepDirectoryNames: false,
+ items: []string{
+ "file-n",
+ "file-o",
+ },
+ },
+ {
+ name: "dotdot-wildcard",
+ pattern: "../../subdir-b/*",
+ keepDirectoryNames: false,
+ items: []string{
+ "file-n",
+ "file-o",
+ },
+ },
+ {
+ name: "wildcard-with-rename",
+ pattern: "*-a",
+ keepDirectoryNames: false,
+ renames: map[string]string{"file-a": "renamed"},
+ items: []string{
+ "renamed", // from file-a
+ "link-a",
+ "archive-a",
+ "non-archive-a",
+ "something-a",
+ "file-n", // from subdir-a
+ "file-o", // from subdir-a
+ "renamed", // from subdir-a/file-a -> file-a -> renamed
+ "file-b", // from subdir-a
+ "file-c", // from subdir-a
+ },
+ },
+ {
+ name: "wildcard-with-rename-keep",
+ pattern: "*-a",
+ keepDirectoryNames: true,
+ renames: map[string]string{"subdir-a": "subdir-b"},
+ items: []string{
+ "file-a",
+ "link-a",
+ "archive-a",
+ "non-archive-a",
+ "something-a",
+ "subdir-b",
+ "subdir-b/file-n",
+ "subdir-b/file-o",
+ "subdir-b/file-a",
+ "subdir-b/file-b",
+ "subdir-b/file-c",
+ },
+ },
+ {
+ name: "no-deref-symlinks-baseline",
+ pattern: "*-a",
+ noDerefSymlinks: true,
+ items: []string{
+ "file-a",
+ "link-a",
+ "archive-a",
+ "non-archive-a",
+ "something-a",
+ "file-n", // from subdir-a
+ "file-o", // from subdir-a
+ "file-a", // from subdir-a
+ "file-b", // from subdir-a
+ "file-c", // from subdir-a
+ },
+ },
+ {
+ name: "no-deref-symlinks-directory",
+ pattern: "link-c",
+ noDerefSymlinks: true,
+ items: []string{
+ "link-c",
+ },
+ },
+ {
+ name: "deref-symlinks-directory",
+ pattern: "link-c",
+ noDerefSymlinks: false,
+ items: []string{
+ "file-p", // from link-c -> subdir-c
+ "file-q", // from link-c -> subdir-c
+ },
+ },
+ },
+ },
+ }
+
+ for _, topdir := range []string{"", ".", "top"} {
+ for _, testArchive := range getTestArchives {
+ dir, err := makeContextFromArchive(t, makeArchive(testArchive.headers, testArchive.contents), topdir)
+ require.NoErrorf(t, err, "error creating context from archive %q", testArchive.name)
+
+ root := dir
+
+ cases := make(map[string]struct{})
+ for _, testCase := range testArchive.cases {
+ if _, ok := cases[testCase.name]; ok {
+ t.Fatalf("duplicate case %q", testCase.name)
+ }
+ cases[testCase.name] = struct{}{}
+ }
+
+ for _, testCase := range testArchive.cases {
+ var excludes []string
+ for _, exclude := range testCase.exclude {
+ excludes = append(excludes, filepath.FromSlash(exclude))
+ }
+
+ getOptions := GetOptions{
+ Excludes: excludes,
+ ExpandArchives: testCase.expandArchives,
+ StripSetuidBit: testCase.stripSetuidBit,
+ StripSetgidBit: testCase.stripSetgidBit,
+ StripStickyBit: testCase.stripStickyBit,
+ StripXattrs: testCase.stripXattrs,
+ KeepDirectoryNames: testCase.keepDirectoryNames,
+ Rename: testCase.renames,
+ NoDerefSymlinks: testCase.noDerefSymlinks,
+ }
+
+ t.Run(fmt.Sprintf("topdir=%s,archive=%s,case=%s,pattern=%s", topdir, testArchive.name, testCase.name, testCase.pattern), func(t *testing.T) {
+ // ensure that we can get stuff using this spec
+ err := Get(root, topdir, getOptions, []string{testCase.pattern}, io.Discard)
+ if err != nil && isExpectedError(err, topdir != "" && topdir != ".", testCase.pattern, testArchive.expectedGetErrors) {
+ return
+ }
+ require.NoErrorf(t, err, "error getting %q under %q", testCase.pattern, filepath.Join(root, topdir))
+ // see what we get when we get this pattern
+ pipeReader, pipeWriter := io.Pipe()
+ var getErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ getErr = Get(root, topdir, getOptions, []string{testCase.pattern}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ tr := tar.NewReader(pipeReader)
+ hdr, err := tr.Next()
+ actualContents := []string{}
+ for err == nil {
+ actualContents = append(actualContents, filepath.FromSlash(hdr.Name))
+ hdr, err = tr.Next()
+ }
+ pipeReader.Close()
+ sort.Strings(actualContents)
+ // compare it to what we were supposed to get
+ expectedContents := make([]string, 0, len(testCase.items))
+ for _, item := range testCase.items {
+ expectedContents = append(expectedContents, filepath.FromSlash(item))
+ }
+ sort.Strings(expectedContents)
+ assert.Equal(t, io.EOF.Error(), err.Error(), "expected EOF at end of archive, got %q", err.Error())
+ wg.Wait()
+ assert.NoErrorf(t, getErr, "unexpected error from Get(%q)", testCase.pattern)
+ assert.Equal(t, expectedContents, actualContents, "Get(%q,excludes=%v) didn't produce the right set of items", testCase.pattern, excludes)
+ })
+ }
+ }
+ }
+}
+
+func TestEvalNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testEval(t)
+ canChroot = couldChroot
+}
+
+func testEval(t *testing.T) {
+ tmp := t.TempDir()
+ options := EvalOptions{}
+ linkname := filepath.Join(tmp, "link")
+ vectors := []struct {
+ id, linkTarget, inputPath, evaluatedPath string
+ }{
+ {"0a", "target", "link/foo", "target/foo"},
+ {"1a", "/target", "link/foo", "target/foo"},
+ {"2a", "../target", "link/foo", "target/foo"},
+ {"3a", "/../target", "link/foo", "target/foo"},
+ {"4a", "../../target", "link/foo", "target/foo"},
+ {"5a", "target/subdirectory", "link/foo", "target/subdirectory/foo"},
+ {"6a", "/target/subdirectory", "link/foo", "target/subdirectory/foo"},
+ {"7a", "../target/subdirectory", "link/foo", "target/subdirectory/foo"},
+ {"8a", "/../target/subdirectory", "link/foo", "target/subdirectory/foo"},
+ {"9a", "../../target/subdirectory", "link/foo", "target/subdirectory/foo"},
+ // inputPath is lexically cleaned to "foo" early, so callers
+ // won't get values consistent with the kernel, but we use the
+ // result for ADD and COPY, where docker build seems to have
+ // the same limitation
+ {"0b", "target", "link/../foo", "foo"},
+ {"1b", "/target", "link/../foo", "foo"},
+ {"2b", "../target", "link/../foo", "foo"},
+ {"3b", "/../target", "link/../foo", "foo"},
+ {"4b", "../../target", "link/../foo", "foo"},
+ {"5b", "target/subdirectory", "link/../foo", "foo"},
+ {"6b", "/target/subdirectory", "link/../foo", "foo"},
+ {"7b", "../target/subdirectory", "link/../foo", "foo"},
+ {"8b", "/../target/subdirectory", "link/../foo", "foo"},
+ {"9b", "../../target/subdirectory", "link/../foo", "foo"},
+ }
+ for _, vector := range vectors {
+ t.Run(fmt.Sprintf("id=%s", vector.id), func(t *testing.T) {
+ err := os.Symlink(vector.linkTarget, linkname)
+ if err != nil && errors.Is(err, os.ErrExist) {
+ os.Remove(linkname)
+ err = os.Symlink(vector.linkTarget, linkname)
+ }
+ require.NoErrorf(t, err, "error creating link from %q to %q", linkname, vector.linkTarget)
+ evaluated, err := Eval(tmp, filepath.Join(tmp, vector.inputPath), options)
+ require.NoErrorf(t, err, "error evaluating %q: %v", vector.inputPath, err)
+ require.Equalf(t, filepath.Join(tmp, vector.evaluatedPath), evaluated, "evaluation of %q with %q pointing to %q failed", vector.inputPath, linkname, vector.linkTarget)
+ })
+ }
+}
+
+func TestMkdirNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testMkdir(t)
+ canChroot = couldChroot
+}
+
+func testMkdir(t *testing.T) {
+ type testCase struct {
+ name string
+ create string
+ expect []string
+ }
+ testArchives := []struct {
+ name string
+ headers []tar.Header
+ testCases []testCase
+ }{
+ {
+ name: "regular",
+ headers: []tar.Header{
+ {Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle1", Typeflag: tar.TypeSymlink, Linkname: "dangle1-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle2", Typeflag: tar.TypeSymlink, Linkname: "../dangle2-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle3", Typeflag: tar.TypeSymlink, Linkname: "../../dangle3-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle4", Typeflag: tar.TypeSymlink, Linkname: "../../../dangle4-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle5", Typeflag: tar.TypeSymlink, Linkname: "../../../../dangle5-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle6", Typeflag: tar.TypeSymlink, Linkname: "/dangle6-target", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/dangle7", Typeflag: tar.TypeSymlink, Linkname: "/../dangle7-target", ModTime: testDate},
+ },
+ testCases: []testCase{
+ {
+ name: "basic",
+ create: "subdir-d",
+ expect: []string{"subdir-d"},
+ },
+ {
+ name: "subdir",
+ create: "subdir-d/subdir-e/subdir-f",
+ expect: []string{"subdir-d", "subdir-d/subdir-e", "subdir-d/subdir-e/subdir-f"},
+ },
+ {
+ name: "dangling-link-itself",
+ create: "subdir-a/subdir-b/dangle1",
+ expect: []string{"subdir-a/subdir-b/dangle1-target"},
+ },
+ {
+ name: "dangling-link-as-intermediate-parent",
+ create: "subdir-a/subdir-b/dangle2/final",
+ expect: []string{"subdir-a/dangle2-target", "subdir-a/dangle2-target/final"},
+ },
+ {
+ name: "dangling-link-as-intermediate-grandparent",
+ create: "subdir-a/subdir-b/dangle3/final",
+ expect: []string{"dangle3-target", "dangle3-target/final"},
+ },
+ {
+ name: "dangling-link-as-intermediate-attempted-relative-breakout",
+ create: "subdir-a/subdir-b/dangle4/final",
+ expect: []string{"dangle4-target", "dangle4-target/final"},
+ },
+ {
+ name: "dangling-link-as-intermediate-attempted-relative-breakout-again",
+ create: "subdir-a/subdir-b/dangle5/final",
+ expect: []string{"dangle5-target", "dangle5-target/final"},
+ },
+ {
+ name: "dangling-link-itself-absolute",
+ create: "subdir-a/subdir-b/dangle6",
+ expect: []string{"dangle6-target"},
+ },
+ {
+ name: "dangling-link-as-intermediate-absolute",
+ create: "subdir-a/subdir-b/dangle6/final",
+ expect: []string{"dangle6-target", "dangle6-target/final"},
+ },
+ {
+ name: "dangling-link-as-intermediate-absolute-relative-breakout",
+ create: "subdir-a/subdir-b/dangle7/final",
+ expect: []string{"dangle7-target", "dangle7-target/final"},
+ },
+ {
+ name: "parent-parent-final",
+ create: "../../final",
+ expect: []string{"final"},
+ },
+ {
+ name: "root-parent-final",
+ create: "/../final",
+ expect: []string{"final"},
+ },
+ {
+ name: "root-parent-intermediate-parent-final",
+ create: "/../intermediate/../final",
+ expect: []string{"final"},
+ },
+ },
+ },
+ }
+ for i := range testArchives {
+ t.Run(testArchives[i].name, func(t *testing.T) {
+ for _, testCase := range testArchives[i].testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ dir, err := makeContextFromArchive(t, makeArchive(testArchives[i].headers, nil), "")
+ require.NoErrorf(t, err, "error creating context from archive %q, topdir=%q", testArchives[i].name, "")
+ root := dir
+ options := MkdirOptions{ChownNew: &idtools.IDPair{UID: os.Getuid(), GID: os.Getgid()}}
+ var beforeNames, afterNames []string
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(dir, path)
+ if err != nil {
+ return err
+ }
+ beforeNames = append(beforeNames, rel)
+ return nil
+ })
+ require.NoErrorf(t, err, "error walking directory to catalog pre-Mkdir contents: %v", err)
+ err = Mkdir(root, testCase.create, options)
+ require.NoErrorf(t, err, "error creating directory %q under %q with Mkdir: %v", testCase.create, root, err)
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(dir, path)
+ if err != nil {
+ return err
+ }
+ afterNames = append(afterNames, rel)
+ return nil
+ })
+ require.NoErrorf(t, err, "error walking directory to catalog post-Mkdir contents: %v", err)
+ expected := append([]string{}, beforeNames...)
+ for _, expect := range testCase.expect {
+ expected = append(expected, filepath.FromSlash(expect))
+ }
+ sort.Strings(expected)
+ sort.Strings(afterNames)
+ assert.Equal(t, expected, afterNames, "expected different paths")
+ })
+ }
+ })
+ }
+}
+
+func TestCleanerSubdirectory(t *testing.T) {
+ testCases := [][2]string{
+ {".", "."},
+ {"..", "."},
+ {"/", "."},
+ {"directory/subdirectory/..", "directory"},
+ {"directory/../..", "."},
+ {"../../directory", "directory"},
+ {"../directory/subdirectory", "directory/subdirectory"},
+ {"/directory/../..", "."},
+ {"/directory/../../directory", "directory"},
+ }
+ for _, testCase := range testCases {
+ t.Run(testCase[0], func(t *testing.T) {
+ cleaner := cleanerReldirectory(filepath.FromSlash(testCase[0]))
+ assert.Equal(t, testCase[1], filepath.ToSlash(cleaner), "expected to get %q, got %q", testCase[1], cleaner)
+ })
+ }
+}
+
+func TestHandleRename(t *testing.T) {
+ renames := map[string]string{
+ "a": "b",
+ "c": "d",
+ "a/1": "a/2",
+ }
+ testCases := [][2]string{
+ {"a", "b"},
+ {"a/1", "a/2"},
+ {"a/1/2", "a/2/2"},
+ {"a/1/2/3", "a/2/2/3"},
+ {"a/2/3/4", "b/2/3/4"},
+ {"a/2/3", "b/2/3"},
+ {"a/2", "b/2"},
+ {"c/2", "d/2"},
+ }
+ for i, testCase := range testCases {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ renamed := handleRename(renames, testCase[0])
+ assert.Equal(t, testCase[1], renamed, "expected to get %q, got %q", testCase[1], renamed)
+ })
+ }
+}
+
+func TestRemoveNoChroot(t *testing.T) {
+ couldChroot := canChroot
+ canChroot = false
+ testRemove(t)
+ canChroot = couldChroot
+}
+
+func testRemove(t *testing.T) {
+ type testCase struct {
+ name string
+ remove string
+ all bool
+ fail bool
+ removed []string
+ }
+ testArchives := []struct {
+ name string
+ headers []tar.Header
+ testCases []testCase
+ }{
+ {
+ name: "regular",
+ headers: []tar.Header{
+ {Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/file-a", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/file-b", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-b/subdir-c/parent", Typeflag: tar.TypeSymlink, Linkname: "..", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/subdir-c/link-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", ModTime: testDate},
+ {Name: "subdir-a/subdir-b/subdir-c/root", Typeflag: tar.TypeSymlink, Linkname: "/", ModTime: testDate},
+ {Name: "subdir-a/subdir-d", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-e", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ {Name: "subdir-a/subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
+ },
+ testCases: []testCase{
+ {
+ name: "file",
+ remove: "subdir-a/file-a",
+ removed: []string{"subdir-a/file-a"},
+ },
+ {
+ name: "file-all",
+ remove: "subdir-a/file-a",
+ all: true,
+ removed: []string{"subdir-a/file-a"},
+ },
+ {
+ name: "subdir",
+ remove: "subdir-a/subdir-b",
+ all: false,
+ fail: true,
+ },
+ {
+ name: "subdir-all",
+ remove: "subdir-a/subdir-b/subdir-c",
+ all: true,
+ removed: []string{
+ "subdir-a/subdir-b/subdir-c",
+ "subdir-a/subdir-b/subdir-c/parent",
+ "subdir-a/subdir-b/subdir-c/link-b",
+ "subdir-a/subdir-b/subdir-c/root",
+ },
+ },
+ {
+ name: "file-link",
+ remove: "subdir-a/subdir-b/subdir-c/link-b",
+ removed: []string{"subdir-a/subdir-b/subdir-c/link-b"},
+ },
+ {
+ name: "file-link-all",
+ remove: "subdir-a/subdir-b/subdir-c/link-b",
+ all: true,
+ removed: []string{"subdir-a/subdir-b/subdir-c/link-b"},
+ },
+ {
+ name: "file-link-indirect",
+ remove: "subdir-a/subdir-b/subdir-c/parent/subdir-c/link-b",
+ removed: []string{"subdir-a/subdir-b/subdir-c/link-b"},
+ },
+ {
+ name: "file-link-indirect-all",
+ remove: "subdir-a/subdir-b/subdir-c/parent/subdir-c/link-b",
+ all: true,
+ removed: []string{"subdir-a/subdir-b/subdir-c/link-b"},
+ },
+ {
+ name: "dir-link",
+ remove: "subdir-a/subdir-b/subdir-c/root",
+ removed: []string{"subdir-a/subdir-b/subdir-c/root"},
+ },
+ {
+ name: "dir-link-all",
+ remove: "subdir-a/subdir-b/subdir-c/root",
+ all: true,
+ removed: []string{"subdir-a/subdir-b/subdir-c/root"},
+ },
+ {
+ name: "dir-through-link",
+ remove: "subdir-a/subdir-b/subdir-c/root/subdir-a/subdir-d",
+ removed: []string{"subdir-a/subdir-d"},
+ },
+ {
+ name: "dir-through-link-all",
+ remove: "subdir-a/subdir-b/subdir-c/root/subdir-a/subdir-d",
+ all: true,
+ removed: []string{"subdir-a/subdir-d"},
+ },
+ {
+ name: "tree-through-link",
+ remove: "subdir-a/subdir-b/subdir-c/root/subdir-a/subdir-e",
+ all: false,
+ fail: true,
+ },
+ {
+ name: "tree-through-link-all",
+ remove: "subdir-a/subdir-b/subdir-c/root/subdir-a/subdir-e",
+ all: true,
+ removed: []string{"subdir-a/subdir-e", "subdir-a/subdir-e/subdir-f"},
+ },
+ },
+ },
+ }
+ for i := range testArchives {
+ t.Run(testArchives[i].name, func(t *testing.T) {
+ for _, testCase := range testArchives[i].testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ dir, err := makeContextFromArchive(t, makeArchive(testArchives[i].headers, nil), "")
+ require.NoErrorf(t, err, "error creating context from archive %q, topdir=%q", testArchives[i].name, "")
+ root := dir
+ options := RemoveOptions{All: testCase.all}
+ beforeNames := make(map[string]struct{})
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(dir, path)
+ if err != nil {
+ return err
+ }
+ beforeNames[rel] = struct{}{}
+ return nil
+ })
+ require.NoErrorf(t, err, "error walking directory to catalog pre-Remove contents: %v", err)
+ err = Remove(root, testCase.remove, options)
+ if testCase.fail {
+ require.Errorf(t, err, "did not expect to succeed removing item %q under %q with Remove", testCase.remove, root)
+ return
+ }
+ require.NoErrorf(t, err, "error removing item %q under %q with Remove: %v", testCase.remove, root, err)
+ afterNames := make(map[string]struct{})
+ err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(dir, path)
+ if err != nil {
+ return err
+ }
+ afterNames[rel] = struct{}{}
+ return nil
+ })
+ require.NoErrorf(t, err, "error walking directory to catalog post-Remove contents: %v", err)
+ var removed []string
+ for beforeName := range beforeNames {
+ if _, stillPresent := afterNames[beforeName]; !stillPresent {
+ removed = append(removed, beforeName)
+ }
+ }
+ var expected []string
+ for _, expect := range testCase.removed {
+ expected = append(expected, filepath.FromSlash(expect))
+ }
+ sort.Strings(expected)
+ sort.Strings(removed)
+ assert.Equal(t, expected, removed, "expected different paths to be missing")
+ })
+ }
+ })
+ }
+}
diff --git a/copier/copier_unix_test.go b/copier/copier_unix_test.go
new file mode 100644
index 0000000..fb9888b
--- /dev/null
+++ b/copier/copier_unix_test.go
@@ -0,0 +1,77 @@
+// +build !windows
+
+package copier
+
+import (
+ "testing"
+)
+
+func TestPutChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testPut(t)
+ canChroot = couldChroot
+}
+
+func TestStatChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testStat(t)
+ canChroot = couldChroot
+}
+
+func TestGetSingleChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testGetSingle(t)
+ canChroot = couldChroot
+}
+
+func TestGetMultipleChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testGetMultiple(t)
+ canChroot = couldChroot
+}
+
+func TestEvalChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testEval(t)
+ canChroot = couldChroot
+}
+
+func TestMkdirChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testMkdir(t)
+ canChroot = couldChroot
+}
+
+func TestRemoveChroot(t *testing.T) {
+ if uid != 0 {
+ t.Skip("chroot() requires root privileges, skipping")
+ }
+ couldChroot := canChroot
+ canChroot = true
+ testRemove(t)
+ canChroot = couldChroot
+}
diff --git a/copier/hardlink_not_uint64.go b/copier/hardlink_not_uint64.go
new file mode 100644
index 0000000..062f489
--- /dev/null
+++ b/copier/hardlink_not_uint64.go
@@ -0,0 +1,15 @@
+//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le)
+// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le
+
+package copier
+
+import (
+ "syscall"
+)
+
+func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode {
+ return hardlinkDeviceAndInode{
+ device: uint64(st.Dev),
+ inode: uint64(st.Ino),
+ }
+}
diff --git a/copier/hardlink_uint64.go b/copier/hardlink_uint64.go
new file mode 100644
index 0000000..e739495
--- /dev/null
+++ b/copier/hardlink_uint64.go
@@ -0,0 +1,15 @@
+//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd
+// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd
+
+package copier
+
+import (
+ "syscall"
+)
+
+func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode {
+ return hardlinkDeviceAndInode{
+ device: st.Dev,
+ inode: st.Ino,
+ }
+}
diff --git a/copier/hardlink_unix.go b/copier/hardlink_unix.go
new file mode 100644
index 0000000..fdc84db
--- /dev/null
+++ b/copier/hardlink_unix.go
@@ -0,0 +1,32 @@
+//go:build linux || darwin || freebsd
+// +build linux darwin freebsd
+
+package copier
+
+import (
+ "os"
+ "sync"
+ "syscall"
+)
+
+type hardlinkDeviceAndInode struct {
+ device, inode uint64
+}
+
+type hardlinkChecker struct {
+ hardlinks sync.Map
+}
+
+func (h *hardlinkChecker) Check(fi os.FileInfo) string {
+ if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
+ if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" {
+ return name.(string)
+ }
+ }
+ return ""
+}
+func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
+ if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
+ h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
+ }
+}
diff --git a/copier/hardlink_windows.go b/copier/hardlink_windows.go
new file mode 100644
index 0000000..ec71cce
--- /dev/null
+++ b/copier/hardlink_windows.go
@@ -0,0 +1,17 @@
+//go:build !linux && !darwin
+// +build !linux,!darwin
+
+package copier
+
+import (
+ "os"
+)
+
+type hardlinkChecker struct {
+}
+
+func (h *hardlinkChecker) Check(fi os.FileInfo) string {
+ return ""
+}
+func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
+}
diff --git a/copier/mknod_int.go b/copier/mknod_int.go
new file mode 100644
index 0000000..b9e9f6f
--- /dev/null
+++ b/copier/mknod_int.go
@@ -0,0 +1,12 @@
+//go:build !windows && !freebsd
+// +build !windows,!freebsd
+
+package copier
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
diff --git a/copier/mknod_uint64.go b/copier/mknod_uint64.go
new file mode 100644
index 0000000..ccddf36
--- /dev/null
+++ b/copier/mknod_uint64.go
@@ -0,0 +1,12 @@
+//go:build freebsd
+// +build freebsd
+
+package copier
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, uint64(dev))
+}
diff --git a/copier/syscall_unix.go b/copier/syscall_unix.go
new file mode 100644
index 0000000..99b2ee7
--- /dev/null
+++ b/copier/syscall_unix.go
@@ -0,0 +1,92 @@
+//go:build !windows
+// +build !windows
+
+package copier
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+var canChroot = os.Getuid() == 0
+
+func chroot(root string) (bool, error) {
+ if canChroot {
+ if err := os.Chdir(root); err != nil {
+ return false, fmt.Errorf("changing to intended-new-root directory %q: %w", root, err)
+ }
+ if err := unix.Chroot(root); err != nil {
+ return false, fmt.Errorf("chrooting to directory %q: %w", root, err)
+ }
+ if err := os.Chdir(string(os.PathSeparator)); err != nil {
+ return false, fmt.Errorf("changing to just-became-root directory %q: %w", root, err)
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFCHR | mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return uint32(unix.S_IFBLK | mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return unix.Mkdev(major, minor)
+}
+
+func mkfifo(path string, mode uint32) error {
+ return unix.Mkfifo(path, mode)
+}
+
+func chmod(path string, mode os.FileMode) error {
+ return os.Chmod(path, mode)
+}
+
+func chown(path string, uid, gid int) error {
+ return os.Chown(path, uid, gid)
+}
+
+func lchown(path string, uid, gid int) error {
+ return os.Lchown(path, uid, gid)
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
+}
+
+// sameDevice returns true unless we're sure that they're not on the same device
+func sameDevice(a, b os.FileInfo) bool {
+ aSys := a.Sys()
+ bSys := b.Sys()
+ if aSys == nil || bSys == nil {
+ return true
+ }
+ au, aok := aSys.(*syscall.Stat_t)
+ bu, bok := bSys.(*syscall.Stat_t)
+ if !aok || !bok {
+ return true
+ }
+ return au.Dev == bu.Dev
+}
+
+const (
+ testModeMask = int64(os.ModePerm)
+ testIgnoreSymlinkDates = false
+)
diff --git a/copier/syscall_windows.go b/copier/syscall_windows.go
new file mode 100644
index 0000000..3a88d2d
--- /dev/null
+++ b/copier/syscall_windows.go
@@ -0,0 +1,88 @@
+// +build windows
+
+package copier
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+var canChroot = false
+
+func chroot(path string) (bool, error) {
+ return false, nil
+}
+
+func chrMode(mode os.FileMode) uint32 {
+ return windows.S_IFCHR | uint32(mode)
+}
+
+func blkMode(mode os.FileMode) uint32 {
+ return windows.S_IFBLK | uint32(mode)
+}
+
+func mkdev(major, minor uint32) uint64 {
+ return 0
+}
+
+func mkfifo(path string, mode uint32) error {
+ return syscall.ENOSYS
+}
+
+func mknod(path string, mode uint32, dev int) error {
+ return syscall.ENOSYS
+}
+
+func chmod(path string, mode os.FileMode) error {
+ err := os.Chmod(path, mode)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func chown(path string, uid, gid int) error {
+ err := os.Chown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lchown(path string, uid, gid int) error {
+ err := os.Lchown(path, uid, gid)
+ if err != nil && errors.Is(err, syscall.EWINDOWS) {
+ return nil
+ }
+ return err
+}
+
+func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
+ if isSymlink {
+ return nil
+ }
+ if atime.IsZero() || mtime.IsZero() {
+ now := time.Now()
+ if atime.IsZero() {
+ atime = now
+ }
+ if mtime.IsZero() {
+ mtime = now
+ }
+ }
+ return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
+}
+
+// sameDevice returns true since we can't be sure that they're not on the same device
+func sameDevice(a, b os.FileInfo) bool {
+ return true
+}
+
+const (
+ testModeMask = int64(0600)
+ testIgnoreSymlinkDates = true
+)
diff --git a/copier/xattrs.go b/copier/xattrs.go
new file mode 100644
index 0000000..f5b2e73
--- /dev/null
+++ b/copier/xattrs.go
@@ -0,0 +1,101 @@
+//go:build linux || netbsd || freebsd || darwin
+// +build linux netbsd freebsd darwin
+
+package copier
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ xattrsSupported = true
+)
+
+var (
+ relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others
+ initialXattrListSize = 64 * 1024
+ initialXattrValueSize = 64 * 1024
+)
+
+// isRelevantXattr checks if "attribute" matches one of the attribute patterns
+// listed in the "relevantAttributes" list.
+func isRelevantXattr(attribute string) bool {
+ for _, relevant := range relevantAttributes {
+ matched, err := filepath.Match(relevant, attribute)
+ if err != nil || !matched {
+ continue
+ }
+ return true
+ }
+ return false
+}
+
+// Lgetxattrs returns a map of the relevant extended attributes set on the given file.
+func Lgetxattrs(path string) (map[string]string, error) {
+ maxSize := 64 * 1024 * 1024
+ listSize := initialXattrListSize
+ var list []byte
+ for listSize < maxSize {
+ list = make([]byte, listSize)
+ size, err := unix.Llistxattr(path, list)
+ if err != nil {
+ if errors.Is(err, syscall.ERANGE) {
+ listSize *= 2
+ continue
+ }
+ if errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.ENOSYS) {
+ // treat these errors listing xattrs as equivalent to "no xattrs"
+ list = list[:0]
+ break
+ }
+ return nil, fmt.Errorf("listing extended attributes of %q: %w", path, err)
+ }
+ list = list[:size]
+ break
+ }
+ if listSize >= maxSize {
+ return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path)
+ }
+ m := make(map[string]string)
+ for _, attribute := range strings.Split(string(list), string('\000')) {
+ if isRelevantXattr(attribute) {
+ attributeSize := initialXattrValueSize
+ var attributeValue []byte
+ for attributeSize < maxSize {
+ attributeValue = make([]byte, attributeSize)
+ size, err := unix.Lgetxattr(path, attribute, attributeValue)
+ if err != nil {
+ if errors.Is(err, syscall.ERANGE) {
+ attributeSize *= 2
+ continue
+ }
+ return nil, fmt.Errorf("getting value of extended attribute %q on %q: %w", attribute, path, err)
+ }
+ m[attribute] = string(attributeValue[:size])
+ break
+ }
+ if attributeSize >= maxSize {
+ return nil, fmt.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
+ }
+ }
+ }
+ return m, nil
+}
+
+// Lsetxattrs sets the relevant members of the specified extended attributes on the given file.
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ for attribute, value := range xattrs {
+ if isRelevantXattr(attribute) {
+ if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
+ return fmt.Errorf("setting value of extended attribute %q on %q: %w", attribute, path, err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/copier/xattrs_test.go b/copier/xattrs_test.go
new file mode 100644
index 0000000..7f3e280
--- /dev/null
+++ b/copier/xattrs_test.go
@@ -0,0 +1,57 @@
+package copier
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ // exercise the ERANGE-handling logic
+ initialXattrListSize = 1
+ initialXattrValueSize = 1
+}
+
+func TestXattrs(t *testing.T) {
+ if !xattrsSupported {
+ t.Skipf("xattrs are not supported on this platform, skipping")
+ }
+ testValues := map[string]string{
+ "user.a": "attribute value a",
+ "user.b": "attribute value b",
+ }
+ tmp := t.TempDir()
+ for attribute, value := range testValues {
+ t.Run(fmt.Sprintf("attribute=%s", attribute), func(t *testing.T) {
+ f, err := os.CreateTemp(tmp, "copier-xattr-test-")
+ if !assert.Nil(t, err, "error creating test file: %v", err) {
+ t.FailNow()
+ }
+ defer os.Remove(f.Name())
+
+ err = Lsetxattrs(f.Name(), map[string]string{attribute: value})
+ if errors.Is(err, syscall.ENOTSUP) {
+ t.Skipf("extended attributes not supported on %q, skipping", tmp)
+ }
+ if !assert.Nil(t, err, "error setting attribute on file: %v", err) {
+ t.FailNow()
+ }
+
+ xattrs, err := Lgetxattrs(f.Name())
+ if !assert.Nil(t, err, "error reading attributes of file: %v", err) {
+ t.FailNow()
+ }
+ xvalue, ok := xattrs[attribute]
+ if !assert.True(t, ok, "did not read back attribute %q for file", attribute) {
+ t.FailNow()
+ }
+ if !assert.Equal(t, value, xvalue, "read back different value for attribute %q", attribute) {
+ t.FailNow()
+ }
+ })
+ }
+}
diff --git a/copier/xattrs_unsupported.go b/copier/xattrs_unsupported.go
new file mode 100644
index 0000000..750d842
--- /dev/null
+++ b/copier/xattrs_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!netbsd,!freebsd,!darwin
+
+package copier
+
+const (
+ xattrsSupported = false
+)
+
+func Lgetxattrs(path string) (map[string]string, error) {
+ return nil, nil
+}
+
+func Lsetxattrs(path string, xattrs map[string]string) error {
+ return nil
+}
diff --git a/define/build.go b/define/build.go
new file mode 100644
index 0000000..95c9b91
--- /dev/null
+++ b/define/build.go
@@ -0,0 +1,336 @@
+package define
+
+import (
+ "io"
+ "time"
+
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage/pkg/archive"
+ "golang.org/x/sync/semaphore"
+)
+
+// AdditionalBuildContext contains verbose details about a parsed build context from --build-context
+type AdditionalBuildContext struct {
+ // Value is the URL of an external tar archive.
+ IsURL bool
+ // Value is the name of an image which may or may not have already been pulled.
+ IsImage bool
+ // Value holds a URL, an image name, or an absolute filesystem path.
+ Value string
+ // Absolute filesystem path to downloaded and exported build context
+ // from external tar archive. This will be populated only if following
+ // buildcontext is created from IsURL and was downloaded before in any
+ // of the RUN step.
+ DownloadedCache string
+}
+
+// CommonBuildOptions are resources that can be defined by flags for both buildah from and build
+type CommonBuildOptions struct {
+ // AddHost is the list of hostnames to add to the build container's /etc/hosts.
+ AddHost []string
+ // OmitHistory tells the builder to ignore the history of build layers and
+ // base while preparing image-spec, setting this to true will ensure no history
+ // is added to the image-spec. (default false)
+ OmitHistory bool
+ // CgroupParent is the path to cgroups under which the cgroup for the container will be created.
+ CgroupParent string
+ // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
+ CPUPeriod uint64
+ // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota
+ CPUQuota int64
+ // CPUShares (relative weight
+ CPUShares uint64
+ // CPUSetCPUs in which to allow execution (0-3, 0,1)
+ CPUSetCPUs string
+ // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+ CPUSetMems string
+ // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container.
+ HTTPProxy bool
+ // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image.
+ IdentityLabel types.OptionalBool
+ // Memory is the upper limit (in bytes) on how much memory running containers can use.
+ Memory int64
+ // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf
+ DNSSearch []string
+ // DNSServers is the list of DNS servers to add to the build container's /etc/resolv.conf
+ DNSServers []string
+ // DNSOptions is the list of DNS
+ DNSOptions []string
+ // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable".
+ // Recognized field names are "role", "type", and "level".
+ LabelOpts []string
+ // MemorySwap limits the amount of memory and swap together.
+ MemorySwap int64
+ // NoHostname tells the builder not to create /etc/hostname content when running
+ // containers.
+ NoHostname bool
+ // NoHosts tells the builder not to create /etc/hosts content when running
+ // containers.
+ NoHosts bool
+ // NoNewPrivileges removes the ability for the container to gain privileges
+ NoNewPrivileges bool
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ OmitTimestamp bool
+ // SeccompProfilePath is the pathname of a seccomp profile.
+ SeccompProfilePath string
+ // ApparmorProfile is the name of an apparmor profile.
+ ApparmorProfile string
+ // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory.
+ ShmSize string
+ // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit].
+ // These types are recognized:
+ // "core": maximum core dump size (ulimit -c)
+ // "cpu": maximum CPU time (ulimit -t)
+ // "data": maximum size of a process's data segment (ulimit -d)
+ // "fsize": maximum size of new files (ulimit -f)
+ // "locks": maximum number of file locks (ulimit -x)
+ // "memlock": maximum amount of locked memory (ulimit -l)
+ // "msgqueue": maximum amount of data in message queues (ulimit -q)
+ // "nice": niceness adjustment (nice -n, ulimit -e)
+ // "nofile": maximum number of open files (ulimit -n)
+ // "nproc": maximum number of processes (ulimit -u)
+ // "rss": maximum size of a process's (ulimit -m)
+ // "rtprio": maximum real-time scheduling priority (ulimit -r)
+ // "rttime": maximum amount of real-time execution between blocking syscalls
+ // "sigpending": maximum number of pending signals (ulimit -i)
+ // "stack": maximum stack size (ulimit -s)
+ Ulimit []string
+ // Volumes to bind mount into the container
+ Volumes []string
+ // Secrets are the available secrets to use in a build. Each item in the
+ // slice takes the form "id=foo,src=bar", where both "id" and "src" are
+ // required, in that order, and "bar" is the name of a file.
+ Secrets []string
+ // SSHSources is the available ssh agent connections to forward in the build
+ SSHSources []string
+ // OCIHooksDir is the location of OCI hooks for the build containers
+ OCIHooksDir []string
+}
+
+// BuildOptions can be used to alter how an image is built.
+type BuildOptions struct {
+ // ContainerSuffix it the name to suffix containers with
+ ContainerSuffix string
+ // ContextDirectory is the default source location for COPY and ADD
+ // commands.
+ ContextDirectory string
+ // PullPolicy controls whether or not we pull images. It should be one
+ // of PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+ PullPolicy PullPolicy
+ // Registry is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone can not be resolved to a
+ // reference to a source image. No separator is implicitly added.
+ Registry string
+ // IgnoreUnrecognizedInstructions tells us to just log instructions we
+ // don't recognize, and try to keep going.
+ IgnoreUnrecognizedInstructions bool
+ // Manifest Name to which the image will be added.
+ Manifest string
+ // Quiet tells us whether or not to announce steps as we go through them.
+ Quiet bool
+ // Isolation controls how Run() runs things.
+ Isolation Isolation
+ // Runtime is the name of the command to run for RUN instructions when
+ // Isolation is either IsolationDefault or IsolationOCI. It should
+ // accept the same arguments and flags that runc does.
+ Runtime string
+ // RuntimeArgs adds global arguments for the runtime.
+ RuntimeArgs []string
+ // TransientMounts is a list of mounts that won't be kept in the image.
+ TransientMounts []string
+ // CacheFrom specifies any remote repository which can be treated as
+ // potential cache source.
+ CacheFrom []reference.Named
+ // CacheTo specifies any remote repository which can be treated as
+ // potential cache destination.
+ CacheTo []reference.Named
+ // CacheTTL specifies duration, if specified using `--cache-ttl` then
+ // cache intermediate images under this duration will be considered as
+ // valid cache sources and images outside this duration will be ignored.
+ CacheTTL time.Duration
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ Compression archive.Compression
+ // Arguments which can be interpolated into Dockerfiles
+ Args map[string]string
+ // Map of external additional build contexts
+ AdditionalBuildContexts map[string]*AdditionalBuildContext
+ // Name of the image to write to.
+ Output string
+ // BuildOutput specifies if any custom build output is selected for following build.
+ // It allows end user to export recently built rootfs into a directory or tar.
+ // See the documentation of 'buildah build --output' for the details of the format.
+ BuildOutput string
+ // ConfidentialWorkload controls whether or not, and if so, how, we produce an
+ // image that's meant to be run using krun as a VM instead of a conventional
+ // process-type container.
+ ConfidentialWorkload ConfidentialWorkloadOptions
+ // Additional tags to add to the image that we write, if we know of a
+ // way to add them.
+ AdditionalTags []string
+ // Logfile specifies if log output is redirected to an external file
+ // instead of stdout, stderr.
+ LogFile string
+ // LogByPlatform tells imagebuildah to split log to different log files
+ // for each platform if logging to external file was selected.
+ LogSplitByPlatform bool
+ // Log is a callback that will print a progress message. If no value
+ // is supplied, the message will be sent to Err (or os.Stderr, if Err
+ // is nil) by default.
+ Log func(format string, args ...interface{})
+ // In is connected to stdin for RUN instructions.
+ In io.Reader
+ // Out is a place where non-error log messages are sent.
+ Out io.Writer
+ // Err is a place where error log messages should be sent.
+ Err io.Writer
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // SkipUnusedStages allows users to skip stages in a multi-stage builds
+ // which do not contribute anything to the target stage. Expected default
+ // value is true.
+ SkipUnusedStages types.OptionalBool
+ // ReportWriter is an io.Writer which will be used to report the
+ // progress of the (possible) pulling of the source image and the
+ // writing of the new image.
+ ReportWriter io.Writer
+ // OutputFormat is the format of the output image's manifest and
+ // configuration data.
+ // Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest.
+ OutputFormat string
+ // SystemContext holds parameters used for authentication.
+ SystemContext *types.SystemContext
+ // NamespaceOptions controls how we set up namespaces processes that we
+ // might need when handling RUN instructions.
+ NamespaceOptions []NamespaceOption
+ // ConfigureNetwork controls whether or not network interfaces and
+ // routing are configured for a new network namespace (i.e., when not
+ // joining another's namespace and not just using the host's
+ // namespace), effectively deciding whether or not the process has a
+ // usable network.
+ ConfigureNetwork NetworkConfigurationPolicy
+ // CNIPluginPath is the location of CNI plugin helpers, if they should be
+ // run from a location other than the default location.
+ CNIPluginPath string
+ // CNIConfigDir is the location of CNI configuration files, if the files in
+ // the default configuration directory shouldn't be used.
+ CNIConfigDir string
+
+ // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
+ NetworkInterface nettypes.ContainerNetwork `json:"-"`
+
+ // ID mapping options to use if we're setting up our own user namespace
+ // when handling RUN instructions.
+ IDMappingOptions *IDMappingOptions
+ // AddCapabilities is a list of capabilities to add to the default set when
+ // handling RUN instructions.
+ AddCapabilities []string
+ // DropCapabilities is a list of capabilities to remove from the default set
+ // when handling RUN instructions. If a capability appears in both lists, it
+ // will be dropped.
+ DropCapabilities []string
+ // CommonBuildOpts is *required*.
+ CommonBuildOpts *CommonBuildOptions
+ // CPPFlags are additional arguments to pass to the C Preprocessor (cpp).
+ CPPFlags []string
+ // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
+ DefaultMountsFilePath string
+ // IIDFile tells the builder to write the image ID to the specified file
+ IIDFile string
+ // Squash tells the builder to produce an image with a single layer
+ // instead of with possibly more than one layer.
+ Squash bool
+ // Labels metadata for an image
+ Labels []string
+ // LayerLabels metadata for an intermediate image
+ LayerLabels []string
+ // Annotation metadata for an image
+ Annotations []string
+ // OnBuild commands to be run by images based on this image
+ OnBuild []string
+ // Layers tells the builder to create a cache of images for each step in the Dockerfile
+ Layers bool
+ // NoCache tells the builder to build the image from scratch without checking for a cache.
+ // It creates a new set of cached images for the build.
+ NoCache bool
+ // RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used
+ // during the build process. Default is true.
+ RemoveIntermediateCtrs bool
+ // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
+ // the build was unsuccessful.
+ ForceRmIntermediateCtrs bool
+ // BlobDirectory is a directory which we'll use for caching layer blobs.
+ BlobDirectory string
+ // Target the targeted FROM in the Dockerfile to build.
+ Target string
+ // Devices are the additional devices to add to the containers.
+ Devices []string
+ // SignBy is the fingerprint of a GPG key to use for signing images.
+ SignBy string
+ // Architecture specifies the target architecture of the image to be built.
+ Architecture string
+ // Timestamp sets the created timestamp to the specified time, allowing
+ // for deterministic, content-addressable builds.
+ Timestamp *time.Time
+ // OS is the specifies the operating system of the image to be built.
+ OS string
+ // MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
+ // image from or to an external registry if the first attempt fails.
+ MaxPullPushRetries int
+ // PullPushRetryDelay is how long to wait before retrying a pull or push attempt.
+ PullPushRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
+ // Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
+ // Ignored if a JobSemaphore is provided.
+ Jobs *int
+ // JobSemaphore, for when you want Jobs to be shared with more than just this build.
+ JobSemaphore *semaphore.Weighted
+ // LogRusage logs resource usage for each step.
+ LogRusage bool
+ // File to which the Rusage logs will be saved to instead of stdout
+ RusageLogFile string
+ // Excludes is a list of excludes to be used instead of the .dockerignore file.
+ Excludes []string
+ // IgnoreFile is a name of the .containerignore file
+ IgnoreFile string
+ // From is the image name to use to replace the value specified in the first
+ // FROM instruction in the Containerfile
+ From string
+ // GroupAdd is a list of groups to add to the primary process within
+ // the container. 'keep-groups' allows container processes to use
+ // supplementary groups.
+ GroupAdd []string
+ // Platforms is the list of parsed OS/Arch/Variant triples that we want
+ // to build the image for. If this slice has items in it, the OS and
+ // Architecture fields above are ignored.
+ Platforms []struct{ OS, Arch, Variant string }
+ // AllPlatforms tells the builder to set the list of target platforms
+ // to match the set of platforms for which all of the build's base
+ // images are available. If this field is set, Platforms is ignored.
+ AllPlatforms bool
+ // UnsetEnvs is a list of environments to not add to final image.
+ UnsetEnvs []string
+ // UnsetLabels is a list of labels to not add to final image from base image.
+ UnsetLabels []string
+ // Envs is a list of environment variables to set in the final image.
+ Envs []string
+ // OSFeatures specifies operating system features the image requires.
+ // It is typically only set when the OS is "windows".
+ OSFeatures []string
+ // OSVersion specifies the exact operating system version the image
+ // requires. It is typically only set when the OS is "windows". Any
+ // value set in a base image will be preserved, so this does not
+ // frequently need to be set.
+ OSVersion string
+}
diff --git a/define/isolation.go b/define/isolation.go
new file mode 100644
index 0000000..53bea85
--- /dev/null
+++ b/define/isolation.go
@@ -0,0 +1,32 @@
+package define
+
+import (
+ "fmt"
+)
+
+type Isolation int
+
+const (
+ // IsolationDefault is whatever we think will work best.
+ IsolationDefault Isolation = iota
+ // IsolationOCI is a proper OCI runtime.
+ IsolationOCI
+ // IsolationChroot is a more chroot-like environment: less isolation,
+ // but with fewer requirements.
+ IsolationChroot
+ // IsolationOCIRootless is a proper OCI runtime in rootless mode.
+ IsolationOCIRootless
+)
+
+// String converts a Isolation into a string.
+func (i Isolation) String() string {
+ switch i {
+ case IsolationDefault, IsolationOCI:
+ return "oci"
+ case IsolationChroot:
+ return "chroot"
+ case IsolationOCIRootless:
+ return "rootless"
+ }
+ return fmt.Sprintf("unrecognized isolation type %d", i)
+}
diff --git a/define/mount_freebsd.go b/define/mount_freebsd.go
new file mode 100644
index 0000000..ae5ccc5
--- /dev/null
+++ b/define/mount_freebsd.go
@@ -0,0 +1,17 @@
+//go:build freebsd
+// +build freebsd
+
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "nullfs"
+
+ // TempDir is the default for storing temporary files
+ TempDir = "/var/tmp"
+)
+
+var (
+ // Mount potions for bind
+ BindOptions = []string{}
+)
diff --git a/define/mount_linux.go b/define/mount_linux.go
new file mode 100644
index 0000000..9d59cb6
--- /dev/null
+++ b/define/mount_linux.go
@@ -0,0 +1,17 @@
+//go:build linux
+// +build linux
+
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+
+ // TempDir is the default for storing temporary files
+ TempDir = "/dev/shm"
+)
+
+var (
+ // Mount potions for bind
+ BindOptions = []string{"bind"}
+)
diff --git a/define/mount_unsupported.go b/define/mount_unsupported.go
new file mode 100644
index 0000000..fe09bfc
--- /dev/null
+++ b/define/mount_unsupported.go
@@ -0,0 +1,17 @@
+//go:build darwin || windows
+// +build darwin windows
+
+package define
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+
+ // TempDir is the default for storing temporary files
+ TempDir = "/var/tmp"
+)
+
+var (
+ // Mount potions for bind
+ BindOptions = []string{""}
+)
diff --git a/define/namespace.go b/define/namespace.go
new file mode 100644
index 0000000..d0247fe
--- /dev/null
+++ b/define/namespace.go
@@ -0,0 +1,87 @@
+package define
+
+import (
+ "fmt"
+)
+
+// NamespaceOption controls how we set up a namespace when launching processes.
+type NamespaceOption struct {
+ // Name specifies the type of namespace, typically matching one of the
+ // ...Namespace constants defined in
+ // github.com/opencontainers/runtime-spec/specs-go.
+ Name string
+ // Host is used to force our processes to use the host's namespace of
+ // this type.
+ Host bool
+ // Path is the path of the namespace to attach our process to, if Host
+ // is not set. If Host is not set and Path is also empty, a new
+ // namespace will be created for the process that we're starting.
+ // If Name is specs.NetworkNamespace, if Path doesn't look like an
+ // absolute path, it is treated as a comma-separated list of CNI
+ // configuration names which will be selected from among all of the CNI
+ // network configurations which we find.
+ Path string
+}
+
+// NamespaceOptions provides some helper methods for a slice of NamespaceOption
+// structs.
+type NamespaceOptions []NamespaceOption
+
+// Find the configuration for the namespace of the given type. If there are
+// duplicates, find the _last_ one of the type, since we assume it was appended
+// more recently.
+func (n *NamespaceOptions) Find(namespace string) *NamespaceOption {
+ for i := range *n {
+ j := len(*n) - 1 - i
+ if (*n)[j].Name == namespace {
+ return &((*n)[j])
+ }
+ }
+ return nil
+}
+
+// AddOrReplace either adds or replaces the configuration for a given namespace.
+func (n *NamespaceOptions) AddOrReplace(options ...NamespaceOption) {
+nextOption:
+ for _, option := range options {
+ for i := range *n {
+ j := len(*n) - 1 - i
+ if (*n)[j].Name == option.Name {
+ (*n)[j] = option
+ continue nextOption
+ }
+ }
+ *n = append(*n, option)
+ }
+}
+
+// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled,
+// or NetworkEnabled.
+type NetworkConfigurationPolicy int
+
+const (
+ // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that the default behavior should be used.
+ NetworkDefault NetworkConfigurationPolicy = iota
+ // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that network interfaces should NOT be configured for
+ // newly-created network namespaces.
+ NetworkDisabled
+ // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork
+ // can take, signalling that network interfaces should be configured for
+ // newly-created network namespaces.
+ NetworkEnabled
+)
+
+// String formats a NetworkConfigurationPolicy as a string.
+func (p NetworkConfigurationPolicy) String() string {
+ switch p {
+ case NetworkDefault:
+ return "NetworkDefault"
+ case NetworkDisabled:
+ return "NetworkDisabled"
+ case NetworkEnabled:
+ return "NetworkEnabled"
+ }
+ return fmt.Sprintf("unknown NetworkConfigurationPolicy %d", p)
+}
diff --git a/define/pull.go b/define/pull.go
new file mode 100644
index 0000000..00787bd
--- /dev/null
+++ b/define/pull.go
@@ -0,0 +1,50 @@
+package define
+
+import (
+ "fmt"
+)
+
+// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+type PullPolicy int
+
+const (
+ // PullIfMissing is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should be pulled from a
+ // registry if a local copy of it is not already present.
+ PullIfMissing PullPolicy = iota
+ // PullAlways is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that a fresh, possibly updated, copy of the image
+ // should be pulled from a registry before the build proceeds.
+ PullAlways
+ // PullIfNewer is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should only be pulled
+ // from a registry if a local copy is not already present or if a
+ // newer version the image is present on the repository.
+ PullIfNewer
+ // PullNever is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that the source image should not be pulled from a
+ // registry.
+ PullNever
+)
+
+// String converts a PullPolicy into a string.
+func (p PullPolicy) String() string {
+ switch p {
+ case PullIfMissing:
+ return "missing"
+ case PullAlways:
+ return "always"
+ case PullIfNewer:
+ return "ifnewer"
+ case PullNever:
+ return "never"
+ }
+ return fmt.Sprintf("unrecognized policy %d", p)
+}
+
+var PolicyMap = map[string]PullPolicy{
+ "missing": PullIfMissing,
+ "always": PullAlways,
+ "never": PullNever,
+ "ifnewer": PullIfNewer,
+}
diff --git a/define/pull_test.go b/define/pull_test.go
new file mode 100644
index 0000000..fbaf831
--- /dev/null
+++ b/define/pull_test.go
@@ -0,0 +1,13 @@
+package define
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPullPolicy(t *testing.T) {
+ for name, val := range PolicyMap {
+ assert.Equal(t, name, val.String())
+ }
+}
diff --git a/define/types.go b/define/types.go
new file mode 100644
index 0000000..5d0032a
--- /dev/null
+++ b/define/types.go
@@ -0,0 +1,311 @@
+package define
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ urlpkg "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/types"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // Package is the name of this package, used in help output and to
+ // identify working containers.
+ Package = "buildah"
+ // Version for the Package. Also used by .packit.sh for Packit builds.
+ Version = "1.33.5"
+
+ // DefaultRuntime if containers.conf fails.
+ DefaultRuntime = "runc"
+
+ // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
+ // suitable for specifying as a value of the PreferredManifestType
+ // member of a CommitOptions structure. It is also the default.
+ OCIv1ImageManifest = v1.MediaTypeImageManifest
+ // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
+ // manifest, suitable for specifying as a value of the
+ // PreferredManifestType member of a CommitOptions structure.
+ Dockerv2ImageManifest = manifest.DockerV2Schema2MediaType
+
+ // OCI used to define the "oci" image format
+ OCI = "oci"
+ // DOCKER used to define the "docker" image format
+ DOCKER = "docker"
+
+ // SEV is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization using encrypted state, requires epyc 1000 "naples")
+ SEV TeeType = "sev"
+ // SNP is a known trusted execution environment type: AMD-SNP (SEV secure nested pages) (requires epyc 3000 "milan")
+ SNP TeeType = "snp"
+)
+
+// TeeType is a supported trusted execution environment type.
+type TeeType string
+
+var (
+ // Deprecated: DefaultCapabilities values should be retrieved from
+ // github.com/containers/common/pkg/config
+ DefaultCapabilities = []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FOWNER",
+ "CAP_FSETID",
+ "CAP_KILL",
+ "CAP_MKNOD",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SETFCAP",
+ "CAP_SETGID",
+ "CAP_SETPCAP",
+ "CAP_SETUID",
+ "CAP_SYS_CHROOT",
+ }
+ // Deprecated: DefaultNetworkSysctl values should be retrieved from
+ // github.com/containers/common/pkg/config
+ DefaultNetworkSysctl = map[string]string{
+ "net.ipv4.ping_group_range": "0 0",
+ }
+
+ Gzip = archive.Gzip
+ Bzip2 = archive.Bzip2
+ Xz = archive.Xz
+ Zstd = archive.Zstd
+ Uncompressed = archive.Uncompressed
+)
+
+// IDMappingOptions controls how we set up UID/GID mapping when we set up a
+// user namespace.
+type IDMappingOptions struct {
+ HostUIDMapping bool
+ HostGIDMapping bool
+ UIDMap []specs.LinuxIDMapping
+ GIDMap []specs.LinuxIDMapping
+ AutoUserNs bool
+ AutoUserNsOpts types.AutoUserNsOptions
+}
+
+// Secret is a secret source that can be used in a RUN
+type Secret struct {
+ ID string
+ Source string
+ SourceType string
+}
+
+// BuildOutputOptions contains the the outcome of parsing the value of a build --output flag
+type BuildOutputOption struct {
+ Path string // Only valid if !IsStdout
+ IsDir bool
+ IsStdout bool
+}
+
+// ConfidentialWorkloadOptions encapsulates options which control whether or not
+// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image
+// instead of the usual rootfs contents.
+type ConfidentialWorkloadOptions struct {
+ Convert bool
+ AttestationURL string
+ CPUs int
+ Memory int
+ TempDir string
+ TeeType TeeType
+ IgnoreAttestationErrors bool
+ WorkloadID string
+ DiskEncryptionPassphrase string
+ Slop string
+ FirmwareLibrary string
+}
+
+// TempDirForURL checks if the passed-in string looks like a URL or -. If it is,
+// TempDirForURL creates a temporary directory, arranges for its contents to be
+// the contents of that URL, and returns the temporary directory's path, along
+// with the name of a subdirectory which should be used as the build context
+// (which may be empty or "."). Removal of the temporary directory is the
+// responsibility of the caller. If the string doesn't look like a URL,
+// TempDirForURL returns empty strings and a nil error code.
+func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) {
+ if !strings.HasPrefix(url, "http://") &&
+ !strings.HasPrefix(url, "https://") &&
+ !strings.HasPrefix(url, "git://") &&
+ !strings.HasPrefix(url, "github.com/") &&
+ url != "-" {
+ return "", "", nil
+ }
+ name, err = os.MkdirTemp(dir, prefix)
+ if err != nil {
+ return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err)
+ }
+ urlParsed, err := urlpkg.Parse(url)
+ if err != nil {
+ return "", "", fmt.Errorf("parsing url %q: %w", url, err)
+ }
+ if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
+ combinedOutput, gitSubDir, err := cloneToDirectory(url, name)
+ if err != nil {
+ if err2 := os.RemoveAll(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", "", fmt.Errorf("cloning %q to %q:\n%s: %w", url, name, string(combinedOutput), err)
+ }
+ return name, gitSubDir, nil
+ }
+ if strings.HasPrefix(url, "github.com/") {
+ ghurl := url
+ url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl)
+ logrus.Debugf("resolving url %q to %q", ghurl, url)
+ subdir = path.Base(ghurl) + "-master"
+ }
+ if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
+ err = downloadToDirectory(url, name)
+ if err != nil {
+ if err2 := os.RemoveAll(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", subdir, err
+ }
+ return name, subdir, nil
+ }
+ if url == "-" {
+ err = stdinToDirectory(name)
+ if err != nil {
+ if err2 := os.RemoveAll(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", subdir, err
+ }
+ logrus.Debugf("Build context is at %q", name)
+ return name, subdir, nil
+ }
+ logrus.Debugf("don't know how to retrieve %q", url)
+ if err2 := os.Remove(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", "", errors.New("unreachable code reached")
+}
+
+// parseGitBuildContext parses git build context to `repo`, `sub-dir`
+// `branch/commit`, accepts GitBuildContext in the format of
+// `repourl.git[#[branch-or-commit]:subdir]`.
+func parseGitBuildContext(url string) (string, string, string) {
+ gitSubdir := ""
+ gitBranch := ""
+ gitBranchPart := strings.Split(url, "#")
+ if len(gitBranchPart) > 1 {
+ // check if string contains path to a subdir
+ gitSubDirPart := strings.Split(gitBranchPart[1], ":")
+ if len(gitSubDirPart) > 1 {
+ gitSubdir = gitSubDirPart[1]
+ }
+ gitBranch = gitSubDirPart[0]
+ }
+ return gitBranchPart[0], gitSubdir, gitBranch
+}
+
+func cloneToDirectory(url, dir string) ([]byte, string, error) {
+ var cmd *exec.Cmd
+ gitRepo, gitSubdir, gitBranch := parseGitBuildContext(url)
+ // init repo
+ cmd = exec.Command("git", "init", dir)
+ combinedOutput, err := cmd.CombinedOutput()
+ if err != nil {
+ return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git init`: %w", err)
+ }
+ // add origin
+ cmd = exec.Command("git", "remote", "add", "origin", gitRepo)
+ cmd.Dir = dir
+ combinedOutput, err = cmd.CombinedOutput()
+ if err != nil {
+ return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err)
+ }
+ // fetch required branch or commit and perform checkout
+ // Always default to `HEAD` if nothing specified
+ fetch := "HEAD"
+ if gitBranch != "" {
+ fetch = gitBranch
+ }
+ logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, fetch, dir)
+ cmd = exec.Command("git", "fetch", "--depth=1", "origin", "--", fetch)
+ cmd.Dir = dir
+ combinedOutput, err = cmd.CombinedOutput()
+ if err != nil {
+ return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err)
+ }
+ if fetch == "HEAD" {
+ // We fetched default branch therefore
+ // we don't have any valid `branch` or
+ // `commit` name hence checkout detached
+ // `FETCH_HEAD`
+ fetch = "FETCH_HEAD"
+ }
+ cmd = exec.Command("git", "checkout", fetch)
+ cmd.Dir = dir
+ combinedOutput, err = cmd.CombinedOutput()
+ if err != nil {
+ return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git checkout`: %w", err)
+ }
+ return combinedOutput, gitSubdir, nil
+}
+
+func downloadToDirectory(url, dir string) error {
+ logrus.Debugf("extracting %q to %q", url, dir)
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("invalid response status %d", resp.StatusCode)
+ }
+ if resp.ContentLength == 0 {
+ return fmt.Errorf("no contents in %q", url)
+ }
+ if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {
+ resp1, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp1.Body.Close()
+ body, err := io.ReadAll(resp1.Body)
+ if err != nil {
+ return err
+ }
+ dockerfile := filepath.Join(dir, "Dockerfile")
+ // Assume this is a Dockerfile
+ if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {
+ return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err)
+ }
+ }
+ return nil
+}
+
+func stdinToDirectory(dir string) error {
+ logrus.Debugf("extracting stdin to %q", dir)
+ r := bufio.NewReader(os.Stdin)
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return fmt.Errorf("failed to read from stdin: %w", err)
+ }
+ reader := bytes.NewReader(b)
+ if err := chrootarchive.Untar(reader, dir, nil); err != nil {
+ dockerfile := filepath.Join(dir, "Dockerfile")
+ // Assume this is a Dockerfile
+ if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {
+ return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err)
+ }
+ }
+ return nil
+}
diff --git a/define/types_test.go b/define/types_test.go
new file mode 100644
index 0000000..4bde42a
--- /dev/null
+++ b/define/types_test.go
@@ -0,0 +1,29 @@
+package define
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestParseGitBuildContext(t *testing.T) {
+ // Tests with only repo
+ repo, subdir, branch := parseGitBuildContext("https://github.com/containers/repo.git")
+ assert.Equal(t, repo, "https://github.com/containers/repo.git")
+ assert.Equal(t, subdir, "")
+ assert.Equal(t, branch, "")
+ // Tests url with branch
+ repo, subdir, branch = parseGitBuildContext("https://github.com/containers/repo.git#main")
+ assert.Equal(t, repo, "https://github.com/containers/repo.git")
+ assert.Equal(t, subdir, "")
+ assert.Equal(t, branch, "main")
+ // Tests url with no branch and subdir
+ repo, subdir, branch = parseGitBuildContext("https://github.com/containers/repo.git#:mydir")
+ assert.Equal(t, repo, "https://github.com/containers/repo.git")
+ assert.Equal(t, subdir, "mydir")
+ assert.Equal(t, branch, "")
+ // Tests url with branch and subdir
+ repo, subdir, branch = parseGitBuildContext("https://github.com/containers/repo.git#main:mydir")
+ assert.Equal(t, repo, "https://github.com/containers/repo.git")
+ assert.Equal(t, subdir, "mydir")
+ assert.Equal(t, branch, "main")
+}
diff --git a/define/types_unix.go b/define/types_unix.go
new file mode 100644
index 0000000..c57e29d
--- /dev/null
+++ b/define/types_unix.go
@@ -0,0 +1,18 @@
+// +build darwin linux
+
+package define
+
+import (
+ "github.com/opencontainers/runc/libcontainer/devices"
+)
+
+// BuildahDevice is a wrapper around devices.Device
+// with additional support for renaming a device
+// using bind-mount in rootless environments.
+type BuildahDevice struct {
+ devices.Device
+ Source string
+ Destination string
+}
+
+type ContainerDevices = []BuildahDevice
diff --git a/define/types_unsupported.go b/define/types_unsupported.go
new file mode 100644
index 0000000..64e26d3
--- /dev/null
+++ b/define/types_unsupported.go
@@ -0,0 +1,6 @@
+// +build !linux,!darwin
+
+package define
+
+// ContainerDevices is currently not implemented.
+type ContainerDevices = []struct{}
diff --git a/delete.go b/delete.go
new file mode 100644
index 0000000..7adb7c5
--- /dev/null
+++ b/delete.go
@@ -0,0 +1,15 @@
+package buildah
+
+import "fmt"
+
+// Delete removes the working container. The buildah.Builder object should not
+// be used after this method is called.
+func (b *Builder) Delete() error {
+ if err := b.store.DeleteContainer(b.ContainerID); err != nil {
+ return fmt.Errorf("deleting build container %q: %w", b.ContainerID, err)
+ }
+ b.MountPoint = ""
+ b.Container = ""
+ b.ContainerID = ""
+ return nil
+}
diff --git a/demos/README.md b/demos/README.md
new file mode 100644
index 0000000..b3cb40c
--- /dev/null
+++ b/demos/README.md
@@ -0,0 +1,74 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Buildah Demos
+
+The purpose of these demonstrations is twofold:
+
+1. To help automate some of the tutorial material so that Buildah newcomers can walk through some of the concepts.
+2. For Buildah enthusiasts and practitioners to use for demos at educational presentations - college classes, Meetups etc.
+
+It is assumed that you have installed Buildah and Podman on your machine.
+
+ $ sudo yum -y install podman buildah
+
+For the Docker compatibility demo you will also need to install Docker.
+
+ $ sudo yum -y install docker
+
+Replace `yum` with `dnf` if required.
+
+## Building from scratch demo
+
+filename: [`buildah-scratch-demo.sh`](buildah-scratch-demo.sh)
+
+This demo builds a container image from scratch. The container is going to inject a bash shell script and therefore requires the installation of coreutils and bash.
+
+Please make sure you have installed Buildah and Podman. Also this demo uses Quay.io to push the image to that registry when it is completed. If you are not logged in then it will fail at that step and finish. If you wish to login to Quay.io before running the demo, then it will push to your repository successfully.
+
+```bash
+# Rootful session
+$ sudo buildah login quay.io
+#
+# or
+#
+# Rootless session
+$ buildah login quay.io
+```
+
+There are several variables you will want to set that are listed at the top of the script. The name for the container image, your Quay.io username, your name, and the Fedora release number:
+
+ demoimg=myshdemo
+ quayuser=UserNameHere
+ myname=YourNameHere
+ distrorelease=30
+ pkgmgr=dnf # switch to yum if using yum
+
+## Buildah and Docker compatibility demo
+
+filename: [`docker-compatibility-demo.sh`](docker-compatibility-demo.sh)
+
+This demo builds an nginx container image using Buildah. It modifies the homepage and commits the image. The container is tested using `podman run` and then stopped. The Docker daemon is then started and the image is pushed to the Docker repository. The container is started using `docker run` and tested.
+
+There are several variables you will want to set that are listed at the top of the script. The name for the container image, your Quay.io username, your name, and the Fedora release number:
+
+ demoimg=dockercompatibilitydemo
+ quayuser=UsernameHere
+ myname=YourNameHere
+ distro=fedora
+ distrorelease=30
+ pkgmgr=dnf # switch to yum if using yum
+
+## Buildah build using Docker demo
+
+filename: [`docker-bud-demo.sh`](buildah-bud-demo.sh)
+
+This demo builds an nginx container image using Buildah with. Buildah's `buildah-using-docker`, or `bud` option, provides a mechanism for using existing Dockerfiles to build the container image. This image is the same as the image in the Docker compatibility demo (at time of creating this README). The container is tested using `podman run` and then stopped. The Docker daemon is then started and the image is pushed to the Docker repository. The container is started using `docker run` and tested.
+
+There are several variables you will want to set that are listed at the top of the script. The name for the container image, your Quay.io username, your name, and the Fedora release number:
+
+ demoimg=buildahbuddemo
+ quayuser=UsernameHere
+ myname=YourNameHere
+ distro=fedora
+ distrorelease=30
+ pkgmgr=dnf # switch to yum if using yum
diff --git a/demos/buildah-bud-demo.sh b/demos/buildah-bud-demo.sh
new file mode 100755
index 0000000..6ca5383
--- /dev/null
+++ b/demos/buildah-bud-demo.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+# buildah-bud-demo.sh
+# author : ipbabble
+# Assumptions install buildah, podman & docker
+# Do NOT start the docker daemon
+# Set some of the variables below
+
+demoimg=buildahbuddemo
+quayuser=ipbabble
+myname="William Henry"
+distro=fedora
+distrorelease=30
+pkgmgr=dnf # switch to yum if using yum
+
+#Setting up some colors for helping read the demo output
+red=$(tput setaf 1)
+green=$(tput setaf 2)
+yellow=$(tput setaf 3)
+blue=$(tput setaf 4)
+cyan=$(tput setaf 6)
+reset=$(tput sgr0)
+
+echo -e "Using ${green}GREEN${reset} to introduce Buildah steps"
+echo -e "Using ${yellow}YELLOW${reset} to introduce code"
+echo -e "Using ${blue}BLUE${reset} to introduce Podman steps"
+echo -e "Using ${cyan}CYAN${reset} to introduce bash commands"
+echo -e "Using ${red}RED${reset} to introduce Docker commands"
+
+echo -e "Building an image called ${demoimg}"
+read -p "${green}Start of the script${reset}"
+
+set -x
+DOCKERFILE=./Dockerfile
+/bin/cat <<EOM >$DOCKERFILE
+FROM docker://docker.io/fedora:latest
+MAINTAINER ${myname}
+
+
+RUN dnf -y update; dnf -y clean all
+RUN dnf -y install nginx --setopt install_weak_deps=false; dnf -y clean all
+RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+RUN echo "nginx on Fedora" > /usr/share/nginx/html/index.html
+
+EXPOSE 80
+
+CMD [ "/usr/sbin/nginx" ]
+EOM
+read -p "${cyan}Display the Dockerfile:${reset}"
+cat $DOCKERFILE
+read -p "${green}Create a new container image from Dockerfile${reset}"
+buildah bud -t $demoimg .
+read -p "${green}List the images we have.${reset}"
+buildah images
+read -p "${green}Inspect the container image meta data${yellow}"
+buildah inspect --type image $demoimg
+read -p "${blue}Run the container using Podman.${reset}"
+containernum=$(podman run -d -p 80:80 $demoimg)
+read -p "${cyan}Check that nginx is up and running with our new page${reset}"
+curl localhost
+read -p "${blue}Stop the container and rm it${reset}"
+podman ps
+podman stop $containernum
+podman rm $containernum
+read -p "${cyan}Check that nginx is down${reset}"
+curl localhost
+read -p "${cyan}Start the Docker daemon. Using restart in case it is already started${reset}"
+systemctl restart docker
+read -p "${red}List the Docker images in the repository - should be empty${reset}"
+docker images
+read -p "${blue}Push the image to the local Docker repository using docker-daemon${reset}"
+podman push $demoimg docker-daemon:$quayuser/${demoimg}:latest
+read -p "${red}List the Docker images in the repository${reset}"
+docker images
+read -p "${red}Start the container from the new Docker repo image${reset}"
+dockercontainer=$(docker run -d -p 80:80 $quayuser/$demoimg)
+read -p "${cyan}Check that nginx is up and running with our new page${reset}"
+curl localhost
+read -p "${red}Stop the container and remove it and the image${reset}"
+docker stop $dockercontainer
+docker rm $dockercontainer
+docker rmi $demoimg
+read -p "${cyan}Stop Docker${reset}"
+systemctl stop docker
+echo -e "${red}We are done!${reset}"
diff --git a/demos/buildah-scratch-demo.sh b/demos/buildah-scratch-demo.sh
new file mode 100755
index 0000000..315ff13
--- /dev/null
+++ b/demos/buildah-scratch-demo.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+
+# author : ipbabble
+# Assumptions install buildah and podman
+# login to Quay.io using buildah if you want to see the image push
+# otherwise it will just fail the last step and no biggy.
+# buildah login quay.io
+# Set some of the variables below
+
+#################
+# is_rootless # Check if we run as normal user
+#################
+function is_rootless() {
+ [ "$(id -u)" -ne 0 ]
+}
+
+## Steps in this demo use pkg-managers like dnf and yum which
+## must be invoked as root. Similarly `buildah mount` only work
+## as root. The `buildah unshare` command switches your user
+## session to root within the user namespace.
+if is_rootless; then
+ buildah unshare $0
+ exit
+fi
+
+demoimg=myshdemo
+quayuser=ipbabble
+myname=WilliamHenry
+distrorelease=30
+pkgmgr=dnf # switch to yum if using yum
+
+#Setting up some colors for helping read the demo output
+bold=$(tput bold)
+red=$(tput setaf 1)
+green=$(tput setaf 2)
+yellow=$(tput setaf 3)
+blue=$(tput setaf 4)
+cyan=$(tput setaf 6)
+reset=$(tput sgr0)
+
+echo -e "Using ${green}GREEN${reset} to introduce Buildah steps"
+echo -e "Using ${yellow}YELLOW${reset} to introduce code"
+echo -e "Using ${blue}BLUE${reset} to introduce Podman steps"
+echo -e "Using ${cyan}CYAN${reset} to introduce bash commands"
+echo -e "Building an image called ${demoimg}"
+read -p "${green}Start of the script${reset}"
+
+set -x
+read -p "${green}Create a new container on disk from scratch${reset}"
+newcontainer=$(buildah from scratch)
+read -p "${green}Mount the root directory of the new scratch container${reset}"
+scratchmnt=$(buildah mount $newcontainer)
+read -p "${cyan}Lets see what is in scratchmnt${reset}"
+ls $scratchmnt
+echo -e "${red}Note that the root of the scratch container is EMPTY!${reset}"
+read -p "${cyan}Time to install some basic bash capabilities: coreutils and bash packages${reset}"
+if [ "$pkgmgr" == "dnf" ]; then
+ $pkgmgr install --installroot $scratchmnt --release ${distrorelease} bash coreutils --setopt install_weak_deps=false -y
+elif [ "$pkgmgr" == "yum" ]; then
+ $pkgmgr install --installroot $scratchmnt --releasever ${distrorelease} bash coreutils -y
+else
+ echo -e "${red}[Error] Unknown package manager ${pkgmgr}${reset}"
+fi
+
+read -p "${cyan}Clean up the packages${reset}"
+$pkgmgr clean --installroot $scratchmnt all
+read -p "${green}Run the shell and see what is inside. When your done, type ${red}exit${green} and return.${reset}"
+buildah run $newcontainer bash
+read -p "${cyan}Let's look at the program${yellow}"
+FILE=./runecho.sh
+/bin/cat <<EOM >$FILE
+#!/usr/bin/env bash
+for i in {1..9};
+do
+ echo "This is a new cloud native container using Buildah [" \$i "]"
+done
+EOM
+chmod +x $FILE
+cat $FILE
+read -p "${green}Copy program into the container and run ls to see it is there${reset}"
+buildah copy $newcontainer $FILE /usr/bin
+ls -al $scratchmnt/usr/bin/*.sh
+read -p "${green}Run the container using Buildah${reset}"
+buildah run $newcontainer /usr/bin/runecho.sh
+read -p "${green}Make the container run the program by default when container is run${reset}"
+buildah config --entrypoint /usr/bin/runecho.sh $newcontainer
+read -p "${green}Set some config information for the container image${reset}"
+buildah config --author "${myname}" --created-by "${quayuser}" --label name=${demoimg} $newcontainer
+read -p "${green}Inspect the meta data${yellow}"
+buildah inspect $newcontainer
+read -p "${green}Unmount the container and commit to an image called ${demoimg}.${reset}"
+buildah unmount $newcontainer
+buildah commit $newcontainer $demoimg
+read -p "${green}List the images we have.${reset}"
+buildah images
+read -p "${blue}Run the container using Podman.${reset}"
+podman run -t $demoimg
+read -p "${green}Make sure you are already logged into your account on Quay.io. Or use Quay creds.${reset}"
+buildah push $demoimg docker://quay.io/$quayuser/$demoimg
+echo -e "${red}We are done!${reset}"
diff --git a/demos/buildah_multi_stage.sh b/demos/buildah_multi_stage.sh
new file mode 100755
index 0000000..43af14b
--- /dev/null
+++ b/demos/buildah_multi_stage.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+
+# author : tsweeney (based on ipbabble's other demos)
+# Based on Alex Ellis blog (https://blog.alexellis.io/mutli-stage-docker-builds - note multi is misspelled)
+# Assumptions install buildah and podman
+# Set some of the variables below
+
+
+#################
+# is_rootless # Check if we run as normal user
+#################
+function is_rootless() {
+ [ "$(id -u)" -ne 0 ]
+}
+
+## The `buildah mount` only work as root so use
+## `buildah unshare` command which switches your
+## user session to root within the user namespace.
+if is_rootless; then
+ buildah unshare $0
+ exit
+fi
+
+demoimg=mymultidemo
+quayuser=myquayuser
+myname=MyName
+distrorelease=30
+pkgmgr=dnf # switch to yum if using yum
+
+#Setting up some colors for helping read the demo output
+bold=$(tput bold)
+red=$(tput setaf 1)
+green=$(tput setaf 2)
+yellow=$(tput setaf 3)
+blue=$(tput setaf 4)
+cyan=$(tput setaf 6)
+reset=$(tput sgr0)
+
+echo -e "Using ${green}GREEN${reset} to introduce Buildah steps"
+echo -e "Using ${yellow}YELLOW${reset} to introduce code"
+echo -e "Using ${blue}BLUE${reset} to introduce Podman steps"
+echo -e "Using ${cyan}CYAN${reset} to introduce bash commands"
+echo -e "Building an image called ${demoimg}"
+read -p "${green}Start of the script${reset}"
+
+set -x
+read -p "${yellow}Create Dockerfile.multi${reset}"
+FILE=./Dockerfile.multi
+/bin/cat <<EOM >$FILE
+FROM golang:1.7.3 as builder
+WORKDIR /go/src/github.com/alexellis/href-counter/
+RUN go get -d -v golang.org/x/net/html
+COPY app.go .
+RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o app .
+FROM alpine:latest
+RUN apk --no-cache add ca-certificates
+WORKDIR /root/
+COPY --from=builder /go/src/github.com/alexellis/href-counter/app .
+CMD ["./app"]
+EOM
+chmod +x $FILE
+read -p "${yellow}Let's look at our Dockerfile.multi${reset}"
+cat ./Dockerfile.multi
+read -p "${yellow}Pull app.go from GitHub${reset}"
+curl https://raw.githubusercontent.com/alexellis/href-counter/master/app.go > app.go
+read -p "${green}Create a new image on disk from Dockerfile.multi${reset}"
+newcontainer=$(buildah bud -t multifromfile:latest -f ./Dockerfile.multi .)
+read -p "${blue}Run the multifromfile container${reset}"
+podman run --network=host -e url=https://www.alexellis.io/ multifromfile:latest
+podman run --network=host -e url=https://www.alexellis.io/ multifromfile:latest
+read -p "${green}Let's check the size of the images${reset}"
+buildah images
+read -p "${green}Let's clear out our containers${reset}"
+buildah rm -a
+
+read -p "${green}Let's build the container with Buildah, first GoLang${reset}"
+buildcntr=$(buildah from golang:1.7.3)
+read -p "${green}Let's mount the container getting the root directory${reset}"
+buildmnt=$(buildah mount $buildcntr)
+read -p "${green}Let's get x/net/html into the container${reset}"
+buildah run $buildcntr go get -d -v golang.org/x/net/html
+read -p "${yellow}Copy app.go into the container${reset}"
+cp app.go $buildmnt/go
+read -p "${green}Build app.go inside the container${reset}"
+buildah run $buildcntr /bin/sh -c "CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o app ."
+
+read -p "${green}Build new image to run application in production${reset}"
+rtcntr=$(buildah from alpine:latest)
+read -p "${green}Mount the new images root fs${reset}"
+rtmnt=$(buildah mount $rtcntr)
+read -p "${green}Install required packages${reset}"
+buildah run $rtcntr apk --no-cache add ca-certificates
+read -p "${yellow}Copy the app from the previous container${reset}"
+cp $buildmnt/go/app $rtmnt
+read -p "${yellow}Set the CMD for the container${reset}"
+buildah config --cmd ./app $rtcntr
+read -p "${yellow}Unmount and commit the rtimg${reset}"
+buildah unmount $rtcntr
+buildah commit $rtcntr multifrombuildah:latest
+
+read -p "${blue}Run the multifrombuildah container${reset}"
+podman run --network=host -e url=https://www.alexellis.io/ multifrombuildah:latest
+podman run --network=host -e url=https://www.alexellis.io/ multifrombuildah:latest
+
+read -p "${green}Let's check the size of the images${reset}"
+buildah images
+read -p "${green}Let's clear out our containers${reset}"
+buildah rm -a
+read -p "${green}Let's clear out our images${reset}"
+buildah rmi -a -f
+read -p "${green}Let's remove app.go and Dockerfile.multi${reset}"
+rm ./app.go ./Dockerfile.multi
+
+echo -e "${red}We are done!${reset}"
diff --git a/demos/docker-compatibility-demo.sh b/demos/docker-compatibility-demo.sh
new file mode 100755
index 0000000..4e6a726
--- /dev/null
+++ b/demos/docker-compatibility-demo.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+
+# docker-compatibility-demo.sh
+# author : ipbabble
+# Assumptions install buildah, podman & docker
+# Do NOT start the docker daemon
+# Set some of the variables below
+
+demoimg=dockercompatibilitydemo
+quayuser=ipbabble
+myname="William Henry"
+distro=fedora
+distrorelease=30
+pkgmgr=dnf # switch to yum if using yum
+
+#Setting up some colors for helping read the demo output
+bold=$(tput bold)
+red=$(tput setaf 1)
+green=$(tput setaf 2)
+yellow=$(tput setaf 3)
+blue=$(tput setaf 4)
+cyan=$(tput setaf 6)
+reset=$(tput sgr0)
+
+echo -e "Using ${green}GREEN${reset} to introduce Buildah steps"
+echo -e "Using ${yellow}YELLOW${reset} to introduce code"
+echo -e "Using ${blue}BLUE${reset} to introduce Podman steps"
+echo -e "Using ${cyan}CYAN${reset} to introduce bash commands"
+echo -e "Using ${red}RED${reset} to introduce Docker commands"
+
+echo -e "Building an image called ${demoimg}"
+read -p "${green}Start of the script${reset}"
+
+set -x
+read -p "${green}Create a new container on disk from ${distro}${reset}"
+newcontainer=$(buildah from ${distro})
+read -p "${green}Update packages and clean all ${reset}"
+buildah run $newcontainer -- ${pkgmgr} -y update && ${pkgmgr} -y clean all
+read -p "${green}Install nginx${reset}"
+buildah run $newcontainer -- ${pkgmgr} -y install nginx && ${pkgmgr} -y clean all
+read -p "${green}Make some nginx config and home page changes ${reset}"
+buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf'
+buildah run $newcontainer bash -c 'echo "nginx on OCI Fedora image, built using Buildah" > /usr/share/nginx/html/index.html'
+read -p "${green}Use buildah config to expose the port and set the entrypoint${reset}"
+buildah config --port 80 --entrypoint /usr/sbin/nginx $newcontainer
+read -p "${green}Set other meta data using buildah config${reset}"
+buildah config --created-by "${quayuser}" $newcontainer
+buildah config --author "${myname}" --label name=$demoimg $newcontainer
+read -p "${green}Inspect the container image meta data${yellow}"
+buildah inspect $newcontainer
+read -p "${green}Commit the container to an OCI image called ${demoimg}.${reset}"
+buildah commit $newcontainer $demoimg
+read -p "${green}List the images we have.${reset}"
+buildah images
+read -p "${blue}Run the container using Podman.${reset}"
+containernum=$(podman run -d -p 80:80 $demoimg)
+read -p "${cyan}Check that nginx is up and running with our new page${reset}"
+curl localhost
+read -p "${blue}Stop the container and rm it${reset}"
+podman ps
+podman stop $containernum
+podman rm $containernum
+read -p "${cyan}Check that nginx is down${reset}"
+curl localhost
+read -p "${cyan}Start the Docker daemon. Using restart in case it is already started${reset}"
+systemctl restart docker
+read -p "${red}List the Docker images in the repository - should be empty${reset}"
+docker images
+read -p "${blue}Push the image to the local Docker repository using docker-daemon${reset}"
+podman push $demoimg docker-daemon:$quayuser/dockercompatibilitydemo:latest
+read -p "${red}List the Docker images in the repository${reset}"
+docker images
+read -p "${red}Start the container from the new Docker repo image${reset}"
+dockercontainer=$(docker run -d -p 80:80 $quayuser/$demoimg)
+read -p "${cyan}Check that nginx is up and running with our new page${reset}"
+curl localhost
+read -p "${red}Stop the container and rm it${reset}"
+docker stop $dockercontainer
+docker rm $dockercontainer
+docker rmi $demoimg
+read -p "${cyan}Stop Docker${reset}"
+systemctl stop docker
+echo -e "${red}We are done!${reset}"
diff --git a/developmentplan.md b/developmentplan.md
new file mode 100644
index 0000000..7d82c2e
--- /dev/null
+++ b/developmentplan.md
@@ -0,0 +1,13 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Development Plan
+
+## Development goals for Buildah
+
+ * Integration into Kubernetes and potentially other tools. The biggest requirement for this is to be able run Buildah within a standard linux container without SYS_ADMIN privileges. This would allow Buildah to run non-privileged containers inside of Kubernetes, so you could distribute your container workloads.
+
+ * Integration with User Namespace, Podman has this already and the goal is to get `buildah build` and `buildah run` to be able to run its containers in a usernamespace to give the builder better security isolation from the host.
+
+ * Buildah `buildah build` command's goal is to have feature parity with other OCI image and container build systems.
+
+ * Addressing issues from the community as reported in the [Issues](https://github.com/containers/buildah/issues) page.
diff --git a/digester.go b/digester.go
new file mode 100644
index 0000000..0ed8fa4
--- /dev/null
+++ b/digester.go
@@ -0,0 +1,269 @@
+package buildah
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "sync"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type digester interface {
+ io.WriteCloser
+ ContentType() string
+ Digest() digest.Digest
+}
+
+// A simple digester just digests its content as-is.
+type simpleDigester struct {
+ digester digest.Digester
+ hasher hash.Hash
+ contentType string
+}
+
+func newSimpleDigester(contentType string) digester {
+ finalDigester := digest.Canonical.Digester()
+ return &simpleDigester{
+ digester: finalDigester,
+ hasher: finalDigester.Hash(),
+ contentType: contentType,
+ }
+}
+
+func (s *simpleDigester) ContentType() string {
+ return s.contentType
+}
+
+func (s *simpleDigester) Write(p []byte) (int, error) {
+ return s.hasher.Write(p)
+}
+
+func (s *simpleDigester) Close() error {
+ return nil
+}
+
+func (s *simpleDigester) Digest() digest.Digest {
+ return s.digester.Digest()
+}
+
+// A tarFilterer passes a tarball through to an io.WriteCloser, potentially
+// modifying headers as it goes.
+type tarFilterer struct {
+ wg sync.WaitGroup
+ pipeWriter *io.PipeWriter
+ closedLock sync.Mutex
+ closed bool
+ err error
+}
+
+func (t *tarFilterer) Write(p []byte) (int, error) {
+ return t.pipeWriter.Write(p)
+}
+
+func (t *tarFilterer) Close() error {
+ t.closedLock.Lock()
+ if t.closed {
+ t.closedLock.Unlock()
+ return errors.New("tar filter is already closed")
+ }
+ t.closed = true
+ t.closedLock.Unlock()
+ err := t.pipeWriter.Close()
+ t.wg.Wait()
+ if err != nil {
+ return fmt.Errorf("closing filter pipe: %w", err)
+ }
+ return t.err
+}
+
+// newTarFilterer passes one or more tar archives through to an io.WriteCloser
+// as a single archive, potentially calling filter to modify headers and
+// contents as it goes.
+//
+// Note: if "filter" indicates that a given item should be skipped, there is no
+// guarantee that there will not be a subsequent item of type TypeLink, which
+// is a hard link, which points to the skipped item as the link target.
+func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
+ pipeReader, pipeWriter := io.Pipe()
+ tarWriter := tar.NewWriter(writeCloser)
+ filterer := &tarFilterer{
+ pipeWriter: pipeWriter,
+ }
+ filterer.wg.Add(1)
+ go func() {
+ filterer.closedLock.Lock()
+ closed := filterer.closed
+ filterer.closedLock.Unlock()
+ for !closed {
+ tarReader := tar.NewReader(pipeReader)
+ hdr, err := tarReader.Next()
+ for err == nil {
+ var skip, replaceContents bool
+ var replacementContents io.Reader
+ if filter != nil {
+ skip, replaceContents, replacementContents = filter(hdr)
+ }
+ if !skip {
+ err = tarWriter.WriteHeader(hdr)
+ if err != nil {
+ err = fmt.Errorf("filtering tar header for %q: %w", hdr.Name, err)
+ break
+ }
+ if hdr.Size != 0 {
+ var n int64
+ var copyErr error
+ if replaceContents {
+ n, copyErr = io.CopyN(tarWriter, replacementContents, hdr.Size)
+ } else {
+ n, copyErr = io.Copy(tarWriter, tarReader)
+ }
+ if copyErr != nil {
+ err = fmt.Errorf("copying content for %q: %w", hdr.Name, copyErr)
+ break
+ }
+ if n != hdr.Size {
+ err = fmt.Errorf("filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
+ break
+ }
+ }
+ }
+ hdr, err = tarReader.Next()
+ }
+ if err != io.EOF {
+ filterer.err = fmt.Errorf("reading tar archive: %w", err)
+ break
+ }
+ filterer.closedLock.Lock()
+ closed = filterer.closed
+ filterer.closedLock.Unlock()
+ }
+ pipeReader.Close()
+ tarWriter.Close()
+ writeCloser.Close()
+ filterer.wg.Done()
+ }()
+ return filterer
+}
+
+// A tar digester digests an archive, modifying the headers it digests by
+// calling a specified function to potentially modify the header that it's
+// about to write.
+type tarDigester struct {
+ isOpen bool
+ nested digester
+ tarFilterer io.WriteCloser
+}
+
+func modifyTarHeaderForDigesting(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
+ zeroTime := time.Time{}
+ hdr.ModTime = zeroTime
+ hdr.AccessTime = zeroTime
+ hdr.ChangeTime = zeroTime
+ return false, false, nil
+}
+
+func newTarDigester(contentType string) digester {
+ nested := newSimpleDigester(contentType)
+ digester := &tarDigester{
+ isOpen: true,
+ nested: nested,
+ tarFilterer: newTarFilterer(nested, modifyTarHeaderForDigesting),
+ }
+ return digester
+}
+
+func (t *tarDigester) ContentType() string {
+ return t.nested.ContentType()
+}
+
+func (t *tarDigester) Digest() digest.Digest {
+ return t.nested.Digest()
+}
+
+func (t *tarDigester) Write(p []byte) (int, error) {
+ return t.tarFilterer.Write(p)
+}
+
+func (t *tarDigester) Close() error {
+ if t.isOpen {
+ t.isOpen = false
+ return t.tarFilterer.Close()
+ }
+ return nil
+}
+
+// CompositeDigester can compute a digest over multiple items.
+type CompositeDigester struct {
+ digesters []digester
+ closer io.Closer
+}
+
+// closeOpenDigester closes an open sub-digester, if we have one.
+func (c *CompositeDigester) closeOpenDigester() {
+ if c.closer != nil {
+ c.closer.Close()
+ c.closer = nil
+ }
+}
+
+// Restart clears all state, so that the composite digester can start over.
+func (c *CompositeDigester) Restart() {
+ c.closeOpenDigester()
+ c.digesters = nil
+}
+
+// Start starts recording the digest for a new item ("", "file", or "dir").
+// The caller should call Hash() immediately after to retrieve the new
+// io.WriteCloser.
+func (c *CompositeDigester) Start(contentType string) {
+ c.closeOpenDigester()
+ switch contentType {
+ case "":
+ c.digesters = append(c.digesters, newSimpleDigester(""))
+ case "file", "dir":
+ digester := newTarDigester(contentType)
+ c.closer = digester
+ c.digesters = append(c.digesters, digester)
+ default:
+ panic(fmt.Sprintf(`unrecognized content type: expected "", "file", or "dir", got %q`, contentType))
+ }
+}
+
+// Hash returns the hasher for the current item.
+func (c *CompositeDigester) Hash() io.WriteCloser {
+ num := len(c.digesters)
+ if num == 0 {
+ return nil
+ }
+ return c.digesters[num-1]
+}
+
+// Digest returns the content type and a composite digest over everything
+// that's been digested.
+func (c *CompositeDigester) Digest() (string, digest.Digest) {
+ c.closeOpenDigester()
+ num := len(c.digesters)
+ switch num {
+ case 0:
+ return "", ""
+ case 1:
+ return c.digesters[0].ContentType(), c.digesters[0].Digest()
+ default:
+ content := ""
+ for i, digester := range c.digesters {
+ if i > 0 {
+ content += ","
+ }
+ contentType := digester.ContentType()
+ if contentType != "" {
+ contentType += ":"
+ }
+ content += contentType + digester.Digest().Encoded()
+ }
+ return "multi", digest.Canonical.FromString(content)
+ }
+}
diff --git a/digester_test.go b/digester_test.go
new file mode 100644
index 0000000..58dc009
--- /dev/null
+++ b/digester_test.go
@@ -0,0 +1,306 @@
+package buildah
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/require"
+)
+
+func (c *CompositeDigester) isOpen() bool {
+ for _, digester := range c.digesters {
+ if tarDigester, ok := digester.(*tarDigester); ok {
+ if tarDigester.isOpen {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func TestCompositeDigester(t *testing.T) {
+ tests := []struct {
+ name string
+ itemTypes []string
+ resultType string
+ }{
+ {
+ name: "download",
+ itemTypes: []string{""},
+ resultType: "",
+ },
+ {
+ name: "file",
+ itemTypes: []string{"file"},
+ resultType: "file",
+ },
+ {
+ name: "dir",
+ itemTypes: []string{"dir"},
+ resultType: "dir",
+ },
+ {
+ name: "multiple-1",
+ itemTypes: []string{"file", "dir"},
+ resultType: "multi",
+ },
+ {
+ name: "multiple-2",
+ itemTypes: []string{"dir", "file"},
+ resultType: "multi",
+ },
+ {
+ name: "multiple-3",
+ itemTypes: []string{"", "dir"},
+ resultType: "multi",
+ },
+ {
+ name: "multiple-4",
+ itemTypes: []string{"", "file"},
+ resultType: "multi",
+ },
+ {
+ name: "multiple-5",
+ itemTypes: []string{"dir", ""},
+ resultType: "multi",
+ },
+ {
+ name: "multiple-6",
+ itemTypes: []string{"file", ""},
+ resultType: "multi",
+ },
+ }
+ var digester CompositeDigester
+ var i int
+ var buf bytes.Buffer
+ zero := time.Unix(0, 0)
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ for _, filtered := range []bool{false, true} {
+ desc := "unfiltered"
+ if filtered {
+ desc = "filter"
+ }
+ t.Run(desc, func(t *testing.T) {
+ if i > 0 {
+ // restart only after it's been used some, to make sure it's not necessary otherwise
+ digester.Restart()
+ }
+ i++
+ size := int64(i * 32) // items for this archive will be bigger than the last one
+ for _, itemType := range test.itemTypes {
+ for int64(buf.Len()) < size {
+ err := buf.WriteByte(byte(buf.Len() % 256))
+ require.Nil(t, err, "error padding content buffer: %v", err)
+ }
+ // feed it content that it will treat either as raw data ("") or expect to
+ // look like a tarball ("file"/"dir")
+ digester.Start(itemType)
+ hasher := digester.Hash() // returns an io.WriteCloser
+ require.NotNil(t, hasher, "digester returned a null hasher?")
+ if itemType == "" {
+ // write something that isn't an archive
+ n, err := io.Copy(hasher, &buf)
+ require.Nil(t, err, "error writing tar content to digester: %v", err)
+ require.Equal(t, size, n, "short write writing tar content to digester")
+ continue
+ }
+ // write an archive
+ var written bytes.Buffer // a copy of the archive we're generating and digesting
+ hasher = &struct {
+ io.Writer
+ io.Closer
+ }{
+ Writer: io.MultiWriter(hasher, &written), // splice into the writer
+ Closer: hasher,
+ }
+ if filtered {
+ // wrap the WriteCloser in another WriteCloser
+ hasher = newTarFilterer(hasher, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ hdr.ModTime = zero
+ return false, false, nil
+ })
+ require.NotNil(t, hasher, "newTarFilterer returned a null WriteCloser?")
+ }
+ // write this item as an archive
+ tw := tar.NewWriter(hasher)
+ hdr := &tar.Header{
+ Name: "content",
+ Size: size,
+ Mode: 0640,
+ ModTime: time.Now(),
+ Typeflag: tar.TypeReg,
+ }
+ err := tw.WriteHeader(hdr)
+ require.Nil(t, err, "error writing tar header to digester: %v", err)
+ n, err := io.Copy(tw, &buf)
+ require.Nil(t, err, "error writing tar content to digester: %v", err)
+ require.Equal(t, size, n, "short write writing tar content to digester")
+ err = tw.Flush()
+ require.Nil(t, err, "error flushing tar content to digester: %v", err)
+ err = tw.Close()
+ require.Nil(t, err, "error closing tar archive being written digester: %v", err)
+ if filtered {
+ // the ContentDigester can close its own if we don't explicitly ask it to,
+ // but if we wrapped it in a filter, we have to close the filter to clean
+ // up the filter, so we can't skip it to exercise that logic; we have to
+ // leave that for the corresponding unfiltered case to try
+ hasher.Close()
+ }
+ // now read the archive back
+ tr := tar.NewReader(&written)
+ require.NotNil(t, tr, "unable to read byte buffer?")
+ hdr, err = tr.Next()
+ for err == nil {
+ var n int64
+ if filtered {
+ // the filter should have set the modtime to unix 0
+ require.Equal(t, zero, hdr.ModTime, "timestamp for entry should have been zero")
+ } else {
+ // the filter should have left modtime to "roughly now"
+ require.NotEqual(t, zero, hdr.ModTime, "timestamp for entry should not have been zero")
+ }
+ n, err = io.Copy(io.Discard, tr)
+ require.Nil(t, err, "error reading tar content from buffer: %v", err)
+ require.Equal(t, hdr.Size, n, "short read reading tar content")
+ hdr, err = tr.Next()
+ }
+ require.Equal(t, io.EOF, err, "finished reading archive with %v, not EOF", err)
+ }
+ // check the composite digest type matches expectations and the value is not just the
+ // digest of zero-length data, which is absolutely not what we wrote
+ digestType, digestValue := digester.Digest()
+ require.Equal(t, test.resultType, digestType, "expected to get a %q digest back for %v, got %q", test.resultType, test.itemTypes, digestType)
+ require.NotEqual(t, digest.Canonical.FromBytes([]byte{}), digestValue, "digester wasn't fed any data")
+ require.False(t, digester.isOpen(), "expected digester to have been closed with this usage pattern")
+ })
+ }
+ })
+ }
+}
+
+func TestTarFilterer(t *testing.T) {
+ tests := []struct {
+ name string
+ input, output map[string]string
+ breakAfter int
+ filter func(*tar.Header) (bool, bool, io.Reader)
+ }{
+ {
+ name: "none",
+ input: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ },
+ output: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ },
+ filter: nil,
+ },
+ {
+ name: "plain",
+ input: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ },
+ output: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ },
+ filter: func(*tar.Header) (bool, bool, io.Reader) { return false, false, nil },
+ },
+ {
+ name: "skip",
+ input: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ },
+ output: map[string]string{
+ "file a": "content a",
+ },
+ filter: func(hdr *tar.Header) (bool, bool, io.Reader) { return hdr.Name == "file b", false, nil },
+ },
+ {
+ name: "replace",
+ input: map[string]string{
+ "file a": "content a",
+ "file b": "content b",
+ "file c": "content c",
+ },
+ output: map[string]string{
+ "file a": "content a",
+ "file b": "content b+c",
+ "file c": "content c",
+ },
+ breakAfter: 2,
+ filter: func(hdr *tar.Header) (bool, bool, io.Reader) {
+ if hdr.Name == "file b" {
+ content := "content b+c"
+ hdr.Size = int64(len(content))
+ return false, true, strings.NewReader(content)
+ }
+ return false, false, nil
+ },
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ var buffer bytes.Buffer
+ tw := tar.NewWriter(&buffer)
+ files := 0
+ for filename, contents := range test.input {
+ hdr := tar.Header{
+ Name: filename,
+ Size: int64(len(contents)),
+ Typeflag: tar.TypeReg,
+ }
+ err := tw.WriteHeader(&hdr)
+ require.Nil(t, err, "unexpected error from TarWriter.WriteHeader")
+ n, err := io.CopyN(tw, strings.NewReader(contents), int64(len(contents)))
+ require.Nil(t, err, "unexpected error copying to tar writer")
+ require.Equal(t, int64(len(contents)), n, "unexpected write length")
+ files++
+ if test.breakAfter != 0 && files%test.breakAfter == 0 {
+ // this test may have us writing multiple archives to the buffer
+ // they should still read back as a single archive
+ tw.Close()
+ tw = tar.NewWriter(&buffer)
+ }
+ }
+ tw.Close()
+ output := make(map[string]string)
+ pipeReader, pipeWriter := io.Pipe()
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ tr := tar.NewReader(pipeReader)
+ hdr, err := tr.Next()
+ for err == nil {
+ var buffer bytes.Buffer
+ var n int64
+ n, err = io.Copy(&buffer, tr)
+ require.Nil(t, err, "unexpected error copying from tar reader")
+ require.Equal(t, hdr.Size, n, "unexpected read length")
+ output[hdr.Name] = buffer.String()
+ hdr, err = tr.Next()
+ }
+ require.Equal(t, io.EOF, err, "unexpected error ended our tarstream read")
+ pipeReader.Close()
+ wg.Done()
+ }()
+ filterer := newTarFilterer(pipeWriter, test.filter)
+ _, err := io.Copy(filterer, &buffer)
+ require.Nil(t, err, "unexpected error copying archive through filter to reader")
+ filterer.Close()
+ wg.Wait()
+ require.Equal(t, test.output, output, "got unexpected results")
+ })
+ }
+}
diff --git a/docker/AUTHORS b/docker/AUTHORS
new file mode 100644
index 0000000..b2cd9ec
--- /dev/null
+++ b/docker/AUTHORS
@@ -0,0 +1,1788 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see `hack/generate-authors.sh`.
+
+Aanand Prasad <aanand.prasad@gmail.com>
+Aaron Davidson <aaron@databricks.com>
+Aaron Feng <aaron.feng@gmail.com>
+Aaron Huslage <huslage@gmail.com>
+Aaron Lehmann <aaron.lehmann@docker.com>
+Aaron Welch <welch@packet.net>
+Aaron.L.Xu <likexu@harmonycloud.cn>
+Abel Muiño <amuino@gmail.com>
+Abhijeet Kasurde <akasurde@redhat.com>
+Abhinav Ajgaonkar <abhinav316@gmail.com>
+Abhishek Chanda <abhishek.becs@gmail.com>
+Abin Shahab <ashahab@altiscale.com>
+Adam Avilla <aavilla@yp.com>
+Adam Eijdenberg <adam.eijdenberg@gmail.com>
+Adam Kunk <adam.kunk@tiaa-cref.org>
+Adam Miller <admiller@redhat.com>
+Adam Mills <adam@armills.info>
+Adam Singer <financeCoding@gmail.com>
+Adam Walz <adam@adamwalz.net>
+Addam Hardy <addam.hardy@gmail.com>
+Aditi Rajagopal <arajagopal@us.ibm.com>
+Aditya <aditya@netroy.in>
+Adolfo Ochagavía <aochagavia92@gmail.com>
+Adria Casas <adriacasas88@gmail.com>
+Adrian Moisey <adrian@changeover.za.net>
+Adrian Mouat <adrian.mouat@gmail.com>
+Adrian Oprea <adrian@codesi.nz>
+Adrien Folie <folie.adrien@gmail.com>
+Adrien Gallouët <adrien@gallouet.fr>
+Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Ahmet Alp Balkan <ahmetb@microsoft.com>
+Aidan Feldman <aidan.feldman@gmail.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
+AJ Bowen <aj@gandi.net>
+Ajey Charantimath <ajey.charantimath@gmail.com>
+ajneu <ajneu@users.noreply.github.com>
+Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
+Akira Koyasu <mail@akirakoyasu.net>
+Akshay Karle <akshay.a.karle@gmail.com>
+Al Tobey <al@ooyala.com>
+alambike <alambike@gmail.com>
+Alan Scherger <flyinprogrammer@gmail.com>
+Alan Thompson <cloojure@gmail.com>
+Albert Callarisa <shark234@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
+Aleksa Sarai <asarai@suse.de>
+Aleksandrs Fadins <aleks@s-ko.net>
+Alena Prokharchyk <alena@rancher.com>
+Alessandro Boch <aboch@docker.com>
+Alessio Biancalana <dottorblaster@gmail.com>
+Alex Chan <alex@alexwlchan.net>
+Alex Chen <alexchenunix@gmail.com>
+Alex Coventry <alx@empirical.com>
+Alex Crawford <alex.crawford@coreos.com>
+Alex Ellis <alexellis2@gmail.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Olshansky <i@creagenics.com>
+Alex Samorukov <samm@os2.kiev.ua>
+Alex Warhawk <ax.warhawk@gmail.com>
+Alexander Artemenko <svetlyak.40wt@gmail.com>
+Alexander Boyd <alex@opengroove.org>
+Alexander Larsson <alexl@redhat.com>
+Alexander Morozov <lk4d4@docker.com>
+Alexander Shopov <ash@kambanaria.org>
+Alexandre Beslic <alexandre.beslic@gmail.com>
+Alexandre González <agonzalezro@gmail.com>
+Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
+Alexey Guskov <lexag@mail.ru>
+Alexey Kotlyarov <alexey@infoxchange.net.au>
+Alexey Shamrin <shamrin@gmail.com>
+Alexis THOMAS <fr.alexisthomas@gmail.com>
+Alfred Landrum <alfred.landrum@docker.com>
+Ali Dehghani <ali.dehghani.g@gmail.com>
+Alicia Lauerman <alicia@eta.im>
+Alihan Demir <alihan_6153@hotmail.com>
+Allen Madsen <blatyo@gmail.com>
+Allen Sun <allen.sun@daocloud.io>
+almoehi <almoehi@users.noreply.github.com>
+Alvaro Saurin <alvaro.saurin@gmail.com>
+Alvin Richards <alvin.richards@docker.com>
+amangoel <amangoel@gmail.com>
+Amen Belayneh <amenbelayneh@gmail.com>
+Amir Goldstein <amir73il@aquasec.com>
+Amit Bakshi <ambakshi@gmail.com>
+Amit Krishnan <amit.krishnan@oracle.com>
+Amit Shukla <amit.shukla@docker.com>
+Amy Lindburg <amy.lindburg@docker.com>
+Anand Patil <anand.prabhakar.patil@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Anatoly Borodin <anatoly.borodin@gmail.com>
+Anchal Agrawal <aagrawa4@illinois.edu>
+Anders Janmyr <anders@janmyr.com>
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky <robotciti@live.com>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
+Andreas Köhler <andi5.py@gmx.net>
+Andreas Savvides <andreas@editd.com>
+Andreas Tiefenthaler <at@an-ti.eu>
+Andrei Gherzan <andrei@resin.io>
+Andrew C. Bodine <acbodine@us.ibm.com>
+Andrew Clay Shafer <andrewcshafer@gmail.com>
+Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
+Andrew Gerrand <adg@golang.org>
+Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew Hsu <andrewhsu@docker.com>
+Andrew Kuklewicz <kookster@gmail.com>
+Andrew Macgregor <andrew.macgregor@agworld.com.au>
+Andrew Macpherson <hopscotch23@gmail.com>
+Andrew Martin <sublimino@gmail.com>
+Andrew McDonnell <bugs@andrewmcdonnell.net>
+Andrew Munsell <andrew@wizardapps.net>
+Andrew Po <absourd.noise@gmail.com>
+Andrew Weiss <andrew.weiss@outlook.com>
+Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
+Andrey Petrov <andrey.petrov@shazow.net>
+Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
+André Martins <aanm90@gmail.com>
+andy <ztao@tibco-support.com>
+Andy Chambers <anchambers@paypal.com>
+andy diller <dillera@gmail.com>
+Andy Goldstein <agoldste@redhat.com>
+Andy Kipp <andy@rstudio.com>
+Andy Rothfusz <github@developersupport.net>
+Andy Smith <github@anarkystic.com>
+Andy Wilson <wilson.andrew.j+github@gmail.com>
+Anes Hasicic <anes.hasicic@gmail.com>
+Anil Belur <askb23@gmail.com>
+Anil Madhavapeddy <anil@recoil.org>
+Ankush Agarwal <ankushagarwal11@gmail.com>
+Anonmily <michelle@michelleliu.io>
+Anran Qiao <anran.qiao@daocloud.io>
+Anthon van der Neut <anthon@mnt.org>
+Anthony Baire <Anthony.Baire@irisa.fr>
+Anthony Bishopric <git@anthonybishopric.com>
+Anthony Dahanne <anthony.dahanne@gmail.com>
+Anthony Sottile <asottile@umich.edu>
+Anton Löfgren <anton.lofgren@gmail.com>
+Anton Nikitin <anton.k.nikitin@gmail.com>
+Anton Polonskiy <anton.polonskiy@gmail.com>
+Anton Tiurin <noxiouz@yandex.ru>
+Antonio Murdaca <antonio.murdaca@gmail.com>
+Antonis Kalipetis <akalipetis@gmail.com>
+Antony Messerli <amesserl@rackspace.com>
+Anuj Bahuguna <anujbahuguna.dev@gmail.com>
+Anusha Ragunathan <anusha.ragunathan@docker.com>
+apocas <petermdias@gmail.com>
+Arash Deshmeh <adeshmeh@ca.ibm.com>
+ArikaChen <eaglesora@gmail.com>
+Arnaud Lefebvre <a.lefebvre@outlook.fr>
+Arnaud Porterie <arnaud.porterie@docker.com>
+Arthur Barr <arthur.barr@uk.ibm.com>
+Arthur Gautier <baloo@gandi.net>
+Artur Meyster <arthurfbi@yahoo.com>
+Arun Gupta <arun.gupta@gmail.com>
+Asbjørn Enge <asbjorn@hanafjedle.net>
+averagehuman <averagehuman@users.noreply.github.com>
+Avi Das <andas222@gmail.com>
+Avi Miller <avi.miller@oracle.com>
+Avi Vaid <avaid1996@gmail.com>
+ayoshitake <airandfingers@gmail.com>
+Azat Khuyiyakhmetov <shadow_uz@mail.ru>
+Bardia Keyoumarsi <bkeyouma@ucsc.edu>
+Barnaby Gray <barnaby@pickle.me.uk>
+Barry Allard <barry.allard@gmail.com>
+Bartłomiej Piotrowski <b@bpiotrowski.pl>
+Bastiaan Bakker <bbakker@xebia.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Bonnefoy <frenchben@docker.com>
+Ben Firshman <ben@firshman.co.uk>
+Ben Golub <ben.golub@dotcloud.com>
+Ben Hall <ben@benhall.me.uk>
+Ben Sargent <ben@brokendigits.com>
+Ben Severson <BenSeverson@users.noreply.github.com>
+Ben Toews <mastahyeti@gmail.com>
+Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
+Bernerd Schaefer <bj.schaefer@gmail.com>
+Bert Goethals <bert@bertg.be>
+Bharath Thiruveedula <bharath_ves@hotmail.com>
+Bhiraj Butala <abhiraj.butala@gmail.com>
+Bhumika Bayani <bhumikabayani@gmail.com>
+Bilal Amarni <bilal.amarni@gmail.com>
+Bill W <SydOps@users.noreply.github.com>
+bin liu <liubin0329@users.noreply.github.com>
+Bingshen Wang <bingshen.wbs@alibaba-inc.com>
+Blake Geno <blakegeno@gmail.com>
+Boaz Shuster <ripcurld.github@gmail.com>
+bobby abbott <ttobbaybbob@gmail.com>
+Boshi Lian <farmer1992@gmail.com>
+boucher <rboucher@gmail.com>
+Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
+boynux <boynux@gmail.com>
+Bradley Cicenas <bradley.cicenas@gmail.com>
+Bradley Wright <brad@intranation.com>
+Brandon Liu <bdon@bdon.org>
+Brandon Philips <brandon@ifup.org>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brendan Dixon <brendand@microsoft.com>
+Brent Salisbury <brent.salisbury@docker.com>
+Brett Higgins <brhiggins@arbor.net>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
+Brian Bland <brian.bland@docker.com>
+Brian DeHamer <brian@dehamer.com>
+Brian Dorsey <brian@dorseys.org>
+Brian Flad <bflad417@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Brian McCallister <brianm@skife.org>
+Brian Olsen <brian@maven-group.org>
+Brian Shumate <brian@couchbase.com>
+Brian Torres-Gil <brian@dralth.com>
+Brian Trump <btrump@yelp.com>
+Brice Jaglin <bjaglin@teads.tv>
+Briehan Lombaard <briehan.lombaard@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Binet <bruno.binet@gmail.com>
+Bruno Gazzera <bgazzera@paginar.com>
+Bruno Renié <brutasse@gmail.com>
+Bruno Tavares <btavare@thoughtworks.com>
+Bryan Bess <squarejaw@bsbess.com>
+Bryan Boreham <bjboreham@gmail.com>
+Bryan Matsuo <bryan.matsuo@gmail.com>
+Bryan Murphy <bmurphy1976@gmail.com>
+buddhamagnet <buddhamagnet@gmail.com>
+Burke Libbey <burke@libbey.me>
+Byung Kang <byung.kang.ctr@amrdec.army.mil>
+Caleb Spare <cespare@gmail.com>
+Calen Pennington <cale@edx.org>
+Cameron Boehmer <cameron.boehmer@gmail.com>
+Cameron Spear <cameronspear@gmail.com>
+Campbell Allen <campbell.allen@gmail.com>
+Candid Dauth <cdauth@cdauth.eu>
+Cao Weiwei <cao.weiwei30@zte.com.cn>
+Carl Henrik Lunde <chlunde@ping.uio.no>
+Carl Loa Odin <carlodin@gmail.com>
+Carl X. Su <bcbcarl@gmail.com>
+Carlos Alexandro Becker <caarlos0@gmail.com>
+Carlos Sanchez <carlos@apache.org>
+Carol Fager-Higgins <carol.fager-higgins@docker.com>
+Cary <caryhartline@users.noreply.github.com>
+Casey Bisson <casey.bisson@joyent.com>
+Ce Gao <ce.gao@outlook.com>
+Cedric Davies <cedricda@microsoft.com>
+Cezar Sa Espinola <cezarsa@gmail.com>
+Chad Swenson <chadswen@gmail.com>
+Chance Zibolski <chance.zibolski@gmail.com>
+Chander G <chandergovind@gmail.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Hooper <charles.hooper@dotcloud.com>
+Charles Law <claw@conduce.com>
+Charles Lindsay <chaz@chazomatic.us>
+Charles Merriam <charles.merriam@gmail.com>
+Charles Sarrazin <charles@sarraz.in>
+Charles Smith <charles.smith@docker.com>
+Charlie Drage <charlie@charliedrage.com>
+Charlie Lewis <charliel@lab41.org>
+Chase Bolt <chase.bolt@gmail.com>
+ChaYoung You <yousbe@gmail.com>
+Chen Chao <cc272309126@gmail.com>
+Chen Chuanliang <chen.chuanliang@zte.com.cn>
+Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
+Chen Mingjie <chenmingjie0828@163.com>
+cheney90 <cheney-90@hotmail.com>
+Chewey <prosto-chewey@users.noreply.github.com>
+Chia-liang Kao <clkao@clkao.org>
+chli <chli@freewheel.tv>
+Cholerae Hu <choleraehyq@gmail.com>
+Chris Alfonso <calfonso@redhat.com>
+Chris Armstrong <chris@opdemand.com>
+Chris Dituri <csdituri@gmail.com>
+Chris Fordham <chris@fordham-nagy.id.au>
+Chris Gavin <chris@chrisgavin.me>
+Chris Khoo <chris.khoo@gmail.com>
+Chris McKinnel <chrismckinnel@gmail.com>
+Chris Seto <chriskseto@gmail.com>
+Chris Snow <chsnow123@gmail.com>
+Chris St. Pierre <chris.a.st.pierre@gmail.com>
+Chris Stivers <chris@stivers.us>
+Chris Swan <chris.swan@iee.org>
+Chris Wahl <github@wahlnetwork.com>
+Chris Weyl <cweyl@alumni.drew.edu>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
+Christian Berendt <berendt@b1-systems.de>
+Christian Böhme <developement@boehme3d.de>
+Christian Persson <saser@live.se>
+Christian Rotzoll <ch.rotzoll@gmail.com>
+Christian Simon <simon@swine.de>
+Christian Stefanescu <st.chris@gmail.com>
+ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Mehay <cmehay@online.net>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christopher Currie <codemonkey+github@gmail.com>
+Christopher Jones <tophj@linux.vnet.ibm.com>
+Christopher Latham <sudosurootdev@gmail.com>
+Christopher Rigor <crigor@gmail.com>
+Christy Perez <christy@linux.vnet.ibm.com>
+Chun Chen <ramichen@tencent.com>
+Ciro S. Costa <ciro.costa@usp.br>
+Clayton Coleman <ccoleman@redhat.com>
+Clinton Kitson <clintonskitson@gmail.com>
+Coenraad Loubser <coenraad@wish.org.za>
+Colin Dunklau <colin.dunklau@gmail.com>
+Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
+Collin Guarino <collin.guarino@gmail.com>
+Colm Hally <colmhally@gmail.com>
+companycy <companycy@gmail.com>
+Cory Forsyth <cory.forsyth@gmail.com>
+cressie176 <github@stephen-cresswell.net>
+CrimsonGlory <CrimsonGlory@users.noreply.github.com>
+Cristian Staretu <cristian.staretu@gmail.com>
+cristiano balducci <cristiano.balducci@gmail.com>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+CUI Wei <ghostplant@qq.com>
+Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
+Cyril F <cyrilf7x@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
+Daehyeok Mun <daehyeok@gmail.com>
+Dafydd Crosby <dtcrsby@gmail.com>
+dalanlan <dalanlan925@gmail.com>
+Damian Smyth <damian@dsau.co>
+Damien Nadé <github@livna.org>
+Damien Nozay <damien.nozay@gmail.com>
+Damjan Georgievski <gdamjan@gmail.com>
+Dan Anolik <dan@anolik.net>
+Dan Buch <d.buch@modcloth.com>
+Dan Cotora <dan@bluevision.ro>
+Dan Feldman <danf@jfrog.com>
+Dan Griffin <dgriffin@peer1.com>
+Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan Levy <dan@danlevy.net>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
+Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Exner <dex@dragonslave.de>
+Daniel Farrell <dfarrell@redhat.com>
+Daniel Garcia <daniel@danielgarcia.info>
+Daniel Gasienica <daniel@gasienica.ch>
+Daniel Hiltgen <daniel.hiltgen@docker.com>
+Daniel Menet <membership@sontags.ch>
+Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
+Daniel Nephin <dnephin@docker.com>
+Daniel Norberg <dano@spotify.com>
+Daniel Nordberg <dnordberg@gmail.com>
+Daniel Robinson <gottagetmac@gmail.com>
+Daniel S <dan.streby@gmail.com>
+Daniel Von Fange <daniel@leancoder.com>
+Daniel X Moore <yahivin@gmail.com>
+Daniel YC Lin <dlin.tw@gmail.com>
+Daniel Zhang <jmzwcn@gmail.com>
+Daniel, Dao Quang Minh <dqminh@cloudflare.com>
+Danny Berger <dpb587@gmail.com>
+Danny Yates <danny@codeaholics.org>
+Darren Coxall <darren@darrencoxall.com>
+Darren Shepherd <darren.s.shepherd@gmail.com>
+Darren Stahl <darst@microsoft.com>
+Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
+Davanum Srinivas <davanum@gmail.com>
+Dave Barboza <dbarboza@datto.com>
+Dave Henderson <dhenderson@gmail.com>
+Dave MacDonald <mindlapse@gmail.com>
+Dave Tucker <dt@docker.com>
+David Anderson <dave@natulte.net>
+David Calavera <david.calavera@gmail.com>
+David Corking <dmc-source@dcorking.com>
+David Cramer <davcrame@cisco.com>
+David Currie <david_currie@uk.ibm.com>
+David Davis <daviddavis@redhat.com>
+David Dooling <dooling@gmail.com>
+David Gageot <david@gageot.net>
+David Gebler <davidgebler@gmail.com>
+David Lawrence <david.lawrence@docker.com>
+David Lechner <david@lechnology.com>
+David M. Karr <davidmichaelkarr@gmail.com>
+David Mackey <tdmackey@booleanhaiku.com>
+David Mat <david@davidmat.com>
+David Mcanulty <github@hellspark.com>
+David Pelaez <pelaez89@gmail.com>
+David R. Jenni <david.r.jenni@gmail.com>
+David Röthlisberger <david@rothlis.net>
+David Sheets <sheets@alum.mit.edu>
+David Sissitka <me@dsissitka.com>
+David Trott <github@davidtrott.com>
+David Williamson <davidwilliamson@users.noreply.github.com>
+David Xia <dxia@spotify.com>
+David Young <yangboh@cn.ibm.com>
+Davide Ceretti <davide.ceretti@hogarthww.com>
+Dawn Chen <dawnchen@google.com>
+dbdd <wangtong2712@gmail.com>
+dcylabs <dcylabs@gmail.com>
+decadent <decadent@users.noreply.github.com>
+deed02392 <georgehafiz@gmail.com>
+Deng Guangxing <dengguangxing@huawei.com>
+Deni Bertovic <deni@kset.org>
+Denis Gladkikh <denis@gladkikh.email>
+Denis Ollier <larchunix@users.noreply.github.com>
+Dennis Chen <barracks510@gmail.com>
+Dennis Docter <dennis@d23.nl>
+Derek <crq@kernel.org>
+Derek <crquan@gmail.com>
+Derek Ch <denc716@gmail.com>
+Derek McGowan <derek@mcgstyle.net>
+Deric Crago <deric.crago@gmail.com>
+Deshi Xiao <dxiao@redhat.com>
+devmeyster <arthurfbi@yahoo.com>
+Devvyn Murphy <devvyn@devvyn.com>
+Dharmit Shah <shahdharmit@gmail.com>
+Diego Romero <idiegoromero@gmail.com>
+Diego Siqueira <dieg0@live.com>
+Dieter Reuter <dieter.reuter@me.com>
+Dillon Dixon <dillondixon@gmail.com>
+Dima Stopel <dima@twistlock.com>
+Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
+Dimitris Rozakis <dimrozakis@gmail.com>
+Dimitry Andric <d.andric@activevideo.com>
+Dinesh Subhraveti <dineshs@altiscale.com>
+Ding Fei <dingfei@stars.org.cn>
+Diogo Monica <diogo@docker.com>
+DiuDiugirl <sophia.wang@pku.edu.cn>
+Djibril Koné <kone.djibril@gmail.com>
+dkumor <daniel@dkumor.com>
+Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
+Dmitri Shuralyov <shurcooL@gmail.com>
+Dmitry Demeshchuk <demeshchuk@gmail.com>
+Dmitry Gusev <dmitry.gusev@gmail.com>
+Dmitry Kononenko <d@dm42.ru>
+Dmitry Shyshkin <dmitry@shyshkin.org.ua>
+Dmitry Smirnov <onlyjob@member.fsf.org>
+Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
+Dmitry Vorobev <dimahabr@gmail.com>
+Dolph Mathews <dolph.mathews@gmail.com>
+Dominik Dingel <dingel@linux.vnet.ibm.com>
+Dominik Finkbeiner <finkes93@gmail.com>
+Dominik Honnef <dominik@honnef.co>
+Don Kirkby <donkirkby@users.noreply.github.com>
+Don Kjer <don.kjer@gmail.com>
+Don Spaulding <donspauldingii@gmail.com>
+Donald Huang <don.hcd@gmail.com>
+Dong Chen <dongluo.chen@docker.com>
+Donovan Jones <git@gamma.net.nz>
+Doron Podoleanu <doronp@il.ibm.com>
+Doug Davis <dug@us.ibm.com>
+Doug MacEachern <dougm@vmware.com>
+Doug Tangren <d.tangren@gmail.com>
+Dr Nic Williams <drnicwilliams@gmail.com>
+dragon788 <dragon788@users.noreply.github.com>
+Dražen Lučanin <kermit666@gmail.com>
+Drew Erny <drew.erny@docker.com>
+Dustin Sallings <dustin@spy.net>
+Ed Costello <epc@epcostello.com>
+Edmund Wagner <edmund-wagner@web.de>
+Eiichi Tsukata <devel@etsukata.com>
+Eike Herzbach <eike@herzbach.net>
+Eivin Giske Skaaren <eivinsn@axis.com>
+Eivind Uggedal <eivind@uggedal.com>
+Elan Ruusamäe <glen@delfi.ee>
+Elena Morozova <lelenanam@gmail.com>
+Elias Probst <mail@eliasprobst.eu>
+Elijah Zupancic <elijah@zupancic.name>
+eluck <mail@eluck.me>
+Elvir Kuric <elvirkuric@gmail.com>
+Emil Hernvall <emil@quench.at>
+Emily Maier <emily@emilymaier.net>
+Emily Rose <emily@contactvibe.com>
+Emir Ozer <emirozer@yandex.com>
+Enguerran <engcolson@gmail.com>
+Eohyung Lee <liquidnuker@gmail.com>
+epeterso <epeterson@breakpoint-labs.com>
+Eric Barch <barch@tomesoftware.com>
+Eric Curtin <ericcurtin17@gmail.com>
+Eric Hanchrow <ehanchrow@ine.com>
+Eric Lee <thenorthsecedes@gmail.com>
+Eric Myhre <hash@exultant.us>
+Eric Paris <eparis@redhat.com>
+Eric Rafaloff <erafaloff@gmail.com>
+Eric Rosenberg <ehaydenr@users.noreply.github.com>
+Eric Sage <eric.david.sage@gmail.com>
+Erica Windisch <erica@windisch.us>
+Eric Yang <windfarer@gmail.com>
+Eric-Olivier Lamey <eo@lamey.me>
+Erik Bray <erik.m.bray@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
+Erik Kristensen <erik@erikkristensen.com>
+Erik St. Martin <alakriti@gmail.com>
+Erik Weathers <erikdw@gmail.com>
+Erno Hopearuoho <erno.hopearuoho@gmail.com>
+Erwin van der Koogh <info@erronis.nl>
+Euan <euank@amazon.com>
+Eugene Yakubovich <eugene.yakubovich@coreos.com>
+eugenkrizo <eugen.krizo@gmail.com>
+evalle <shmarnev@gmail.com>
+Evan Allrich <evan@unguku.com>
+Evan Carmi <carmi@users.noreply.github.com>
+Evan Hazlett <ehazlett@users.noreply.github.com>
+Evan Hazlett <ejhazlett@gmail.com>
+Evan Krall <krall@yelp.com>
+Evan Phoenix <evan@fallingsnow.net>
+Evan Wies <evan@neomantra.net>
+Evelyn Xu <evelynhsu21@gmail.com>
+Everett Toews <everett.toews@rackspace.com>
+Evgeny Vereshchagin <evvers@ya.ru>
+Ewa Czechowska <ewa@ai-traders.com>
+Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
+ezbercih <cem.ezberci@gmail.com>
+Ezra Silvera <ezra@il.ibm.com>
+Fabiano Rosas <farosas@br.ibm.com>
+Fabio Falci <fabiofalci@gmail.com>
+Fabio Rapposelli <fabio@vmware.com>
+Fabio Rehm <fgrehm@gmail.com>
+Fabrizio Regini <freegenie@gmail.com>
+Fabrizio Soppelsa <fsoppelsa@mirantis.com>
+Faiz Khan <faizkhan00@gmail.com>
+falmp <chico.lopes@gmail.com>
+Fangyuan Gao <21551127@zju.edu.cn>
+Fareed Dudhia <fareeddudhia@googlemail.com>
+Fathi Boudra <fathi.boudra@linaro.org>
+Federico Gimenez <fgimenez@coit.es>
+Felipe Oliveira <felipeweb.programador@gmail.com>
+Felix Abecassis <fabecassis@nvidia.com>
+Felix Geisendörfer <felix@debuggable.com>
+Felix Hupfeld <quofelix@users.noreply.github.com>
+Felix Rabe <felix@rabe.io>
+Felix Ruess <felix.ruess@gmail.com>
+Felix Schindler <fschindler@weluse.de>
+Ferenc Szabo <pragmaticfrank@gmail.com>
+Fernando <fermayo@gmail.com>
+Fero Volar <alian@alian.info>
+Ferran Rodenas <frodenas@gmail.com>
+Filipe Brandenburger <filbranden@google.com>
+Filipe Oliveira <contato@fmoliveira.com.br>
+fl0yd <fl0yd@me.com>
+Flavio Castelli <fcastelli@suse.com>
+FLGMwt <ryan.stelly@live.com>
+Florian <FWirtz@users.noreply.github.com>
+Florian Klein <florian.klein@free.fr>
+Florian Maier <marsmensch@users.noreply.github.com>
+Florian Weingarten <flo@hackvalue.de>
+Florin Asavoaie <florin.asavoaie@gmail.com>
+fonglh <fonglh@gmail.com>
+fortinux <fortinux@users.noreply.github.com>
+Francesc Campoy <campoy@google.com>
+Francis Chuang <francis.chuang@boostport.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+Francisco Souza <f@souza.cc>
+Frank Groeneveld <frank@ivaldi.nl>
+Frank Herrmann <fgh@4gh.tv>
+Frank Macreery <frank@macreery.com>
+Frank Rosquin <frank.rosquin+github@gmail.com>
+Fred Lifton <fred.lifton@docker.com>
+Frederick F. Kautz IV <fkautz@redhat.com>
+Frederik Loeffert <frederik@zitrusmedia.de>
+Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
+Freek Kalter <freek@kalteronline.org>
+frosforever <frosforever@users.noreply.github.com>
+fy2462 <fy2462@gmail.com>
+Félix Baylac-Jacqué <baylac.felix@gmail.com>
+Félix Cantournet <felix.cantournet@cloudwatt.com>
+Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
+Gabriel Linder <linder.gabriel@gmail.com>
+Gabriel Monroy <gabriel@opdemand.com>
+Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
+Gaetan de Villele <gdevillele@gmail.com>
+Galen Sampson <galen.sampson@gmail.com>
+Gang Qiao <qiaohai8866@gmail.com>
+Gareth Rushgrove <gareth@morethanseven.net>
+Garrett Barboza <garrett@garrettbarboza.com>
+Gaurav <gaurav.gosec@gmail.com>
+gautam, prasanna <prasannagautam@gmail.com>
+Gaël PORTAY <gael.portay@savoirfairelinux.com>
+GennadySpb <lipenkov@gmail.com>
+Geoffrey Bachelet <grosfrais@gmail.com>
+George MacRorie <gmacr31@gmail.com>
+George Xie <georgexsh@gmail.com>
+Georgi Hristozov <georgi@forkbomb.nl>
+Gereon Frey <gereon.frey@dynport.de>
+German DZ <germ@ndz.com.ar>
+Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Gerwim <gerwim@gmail.com>
+Gianluca Borello <g.borello@gmail.com>
+Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
+gissehel <public-devgit-dantus@gissehel.org>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Gleb M Borisov <borisov.gleb@gmail.com>
+Glyn Normington <gnormington@gopivotal.com>
+GoBella <caili_welcome@163.com>
+Goffert van Gool <goffert@phusion.nl>
+Gosuke Miyashita <gosukenator@gmail.com>
+Gou Rao <gourao@users.noreply.github.com>
+Govinda Fichtner <govinda.fichtner@googlemail.com>
+Grant Reaber <grant.reaber@gmail.com>
+Graydon Hoare <graydon@pobox.com>
+Greg Fausak <greg@tacodata.com>
+Greg Thornton <xdissent@me.com>
+grossws <grossws@gmail.com>
+grunny <mwgrunny@gmail.com>
+gs11 <gustav.sinder@gmail.com>
+Guilhem Lettron <guilhem+github@lettron.fr>
+Guilherme Salgado <gsalgado@gmail.com>
+Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
+Guillaume J. Charmes <guillaume.charmes@docker.com>
+guoxiuyan <guoxiuyan@huawei.com>
+Gurjeet Singh <gurjeet@singh.im>
+Guruprasad <lgp171188@gmail.com>
+gwx296173 <gaojing3@huawei.com>
+Günter Zöchbauer <guenter@gzoechbauer.com>
+Hans Kristian Flaatten <hans@starefossen.com>
+Hans Rødtang <hansrodtang@gmail.com>
+Hao Shu Wei <haosw@cn.ibm.com>
+Hao Zhang <21521210@zju.edu.cn>
+Harald Albers <github@albersweb.de>
+Harley Laue <losinggeneration@gmail.com>
+Harold Cooper <hrldcpr@gmail.com>
+Harry Zhang <harryz@hyper.sh>
+Harshal Patil <harshalp@linux.vnet.ibm.com>
+He Simei <hesimei@zju.edu.cn>
+He Xin <he_xinworld@126.com>
+heartlock <21521209@zju.edu.cn>
+Hector Castro <hectcastro@gmail.com>
+Helen Xie <chenjg@harmonycloud.cn>
+Henning Sprang <henning.sprang@gmail.com>
+Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie@docker.com>
+Hong Xu <hong@topbug.net>
+Hongbin Lu <hongbin034@gmail.com>
+hsinko <21551195@zju.edu.cn>
+Hu Keping <hukeping@huawei.com>
+Hu Tao <hutao@cn.fujitsu.com>
+Huanzhong Zhang <zhanghuanzhong90@gmail.com>
+Huayi Zhang <irachex@gmail.com>
+Hugo Duncan <hugo@hugoduncan.org>
+Hugo Marisco <0x6875676f@gmail.com>
+Hunter Blanks <hunter@twilio.com>
+huqun <huqun@zju.edu.cn>
+Huu Nguyen <huu@prismskylabs.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+hyp3rdino <markus.kortlang@lhsystems.com>
+Hyzhou <1187766782@qq.com>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bishop <ianbishop@pace7.com>
+Ian Bull <irbull@gmail.com>
+Ian Calvert <ianjcalvert@gmail.com>
+Ian Campbell <ian.campbell@docker.com>
+Ian Lee <IanLee1521@gmail.com>
+Ian Main <imain@redhat.com>
+Ian Truslove <ian.truslove@gmail.com>
+Iavael <iavaelooeyt@gmail.com>
+Icaro Seara <icaro.seara@gmail.com>
+Igor Dolzhikov <bluesriverz@gmail.com>
+Iliana Weller <iweller@amazon.com>
+Ilkka Laukkanen <ilkka@ilkka.io>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
+Ilya Gusev <mail@igusev.ru>
+ILYA Khlopotov <ilya.khlopotov@gmail.com>
+imre Fitos <imre.fitos+github@gmail.com>
+inglesp <peter.inglesby@gmail.com>
+Ingo Gottwald <in.gottwald@gmail.com>
+Isaac Dupree <antispam@idupree.com>
+Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isao Jonas <isao.jonas@gmail.com>
+Ivan Babrou <ibobrik@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
+Ivan Grcic <igrcic@gmail.com>
+J Bruni <joaohbruni@yahoo.com.br>
+J. Nunn <jbnunn@gmail.com>
+Jack Danger Canty <jackdanger@squareup.com>
+Jacob Atzen <jacob@jacobatzen.dk>
+Jacob Edelman <edelman.jd@gmail.com>
+Jacob Tomlinson <jacob@tom.linson.uk>
+Jake Champlin <jake.champlin.27@gmail.com>
+Jake Moshenko <jake@devtable.com>
+Jake Sanders <jsand@google.com>
+jakedt <jake@devtable.com>
+James Allen <jamesallen0108@gmail.com>
+James Carey <jecarey@us.ibm.com>
+James Carr <james.r.carr@gmail.com>
+James DeFelice <james.defelice@ishisystems.com>
+James Harrison Fisher <jameshfisher@gmail.com>
+James Kyburz <james.kyburz@gmail.com>
+James Kyle <james@jameskyle.org>
+James Lal <james@lightsofapollo.com>
+James Mills <prologic@shortcircuit.net.au>
+James Nugent <james@jen20.com>
+James Turnbull <james@lovedthanlost.net>
+Jamie Hannaford <jamie.hannaford@rackspace.com>
+Jamshid Afshar <jafshar@yahoo.com>
+Jan Keromnes <janx@linux.com>
+Jan Koprowski <jan.koprowski@gmail.com>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jan-Gerd Tenberge <janten@gmail.com>
+Jan-Jaap Driessen <janjaapdriessen@gmail.com>
+Jana Radhakrishnan <mrjana@docker.com>
+Jannick Fahlbusch <git@jf-projects.de>
+Janonymous <janonymous.codevulture@gmail.com>
+Januar Wayong <januar@gmail.com>
+Jared Biel <jared.biel@bolderthinking.com>
+Jared Hocutt <jaredh@netapp.com>
+Jaroslaw Zabiello <hipertracker@gmail.com>
+jaseg <jaseg@jaseg.net>
+Jasmine Hegman <jasmine@jhegman.com>
+Jason Divock <jdivock@gmail.com>
+Jason Giedymin <jasong@apache.org>
+Jason Green <Jason.Green@AverInformatics.Com>
+Jason Hall <imjasonh@gmail.com>
+Jason Heiss <jheiss@aput.net>
+Jason Livesay <ithkuil@gmail.com>
+Jason McVetta <jason.mcvetta@gmail.com>
+Jason Plum <jplum@devonit.com>
+Jason Shepherd <jason@jasonshepherd.net>
+Jason Smith <jasonrichardsmith@gmail.com>
+Jason Sommer <jsdirv@gmail.com>
+Jason Stangroome <jason@codeassassin.com>
+jaxgeller <jacksongeller@gmail.com>
+Jay <imjching@hotmail.com>
+Jay <teguhwpurwanto@gmail.com>
+Jay Kamat <github@jgkamat.33mail.com>
+Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
+Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
+Jean-Christophe Berthon <huygens@berthon.eu>
+Jean-Paul Calderone <exarkun@twistedmatrix.com>
+Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
+Jean-Tiare Le Bigot <jt@yadutaf.fr>
+Jeff Anderson <jeff@docker.com>
+Jeff Johnston <jeff.johnston.mn@gmail.com>
+Jeff Lindsay <progrium@gmail.com>
+Jeff Mickey <j@codemac.net>
+Jeff Minard <jeff@creditkarma.com>
+Jeff Nickoloff <jeff.nickoloff@gmail.com>
+Jeff Silberman <jsilberm@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
+Jeffrey Morgan <jmorganca@gmail.com>
+Jeffrey van Gogh <jvg@google.com>
+Jenny Gebske <jennifer@gebske.de>
+Jeremy Grosser <jeremy@synack.me>
+Jeremy Price <jprice.rhit@gmail.com>
+Jeremy Qian <vanpire110@163.com>
+Jeremy Unruh <jeremybunruh@gmail.com>
+Jeroen Jacobs <github@jeroenj.be>
+Jesse Dearing <jesse.dearing@gmail.com>
+Jesse Dubay <jesse@thefortytwo.net>
+Jessica Frazelle <jessfraz@google.com>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
+jgeiger <jgeiger@gmail.com>
+Jhon Honce <jhonce@redhat.com>
+Ji.Zhilong <zhilongji@gmail.com>
+Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
+jianbosun <wonderflow.sun@gmail.com>
+Jie Luo <luo612@zju.edu.cn>
+Jilles Oldenbeuving <ojilles@gmail.com>
+Jim Alateras <jima@comware.com.au>
+Jim Minter <jminter@redhat.com>
+Jim Perrin <jperrin@centos.org>
+Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jimmy Puckett <jimmy.puckett@spinen.com>
+jimmyxian <jimmyxian2004@yahoo.com.cn>
+Jinsoo Park <cellpjs@gmail.com>
+Jiri Popelka <jpopelka@redhat.com>
+Jiuyue Ma <majiuyue@huawei.com>
+Jiří Župka <jzupka@redhat.com>
+jjy <jiangjinyang@outlook.com>
+jmzwcn <jmzwcn@gmail.com>
+Joao Fernandes <joao.fernandes@docker.com>
+Joe Beda <joe.github@bedafamily.com>
+Joe Doliner <jdoliner@pachyderm.io>
+Joe Ferguson <joe@infosiftr.com>
+Joe Gordon <joe.gordon0@gmail.com>
+Joe Shaw <joe@joeshaw.org>
+Joe Van Dyk <joe@tanga.com>
+Joel Friedly <joelfriedly@gmail.com>
+Joel Handwell <joelhandwell@gmail.com>
+Joel Hansson <joel.hansson@ecraft.com>
+Joel Wurtz <jwurtz@jolicode.com>
+Joey Geiger <jgeiger@users.noreply.github.com>
+Joey Gibson <joey@joeygibson.com>
+Joffrey F <joffrey@docker.com>
+Johan Euphrosine <proppy@google.com>
+Johan Rydberg <johan.rydberg@gmail.com>
+Johanan Lieberman <johanan.lieberman@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
+John Costa <john.costa@gmail.com>
+John Feminella <jxf@jxf.me>
+John Gardiner Myers <jgmyers@proofpoint.com>
+John Gossman <johngos@microsoft.com>
+John Howard (VM) <John.Howard@microsoft.com>
+John Mulhausen <john@docker.com>
+John OBrien III <jobrieniii@yahoo.com>
+John Starks <jostarks@microsoft.com>
+John Stephens <johnstep@docker.com>
+John Tims <john.k.tims@gmail.com>
+John Warwick <jwarwick@gmail.com>
+John Willis <john.willis@docker.com>
+johnharris85 <john@johnharris.io>
+Jon Wedaman <jweede@gmail.com>
+Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan A. Sternberg <jonathansternberg@gmail.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
+Jonathan Dowland <jon+github@alcopop.org>
+Jonathan Lebon <jlebon@redhat.com>
+Jonathan Lomas <jonathan@floatinglomas.ca>
+Jonathan McCrohan <jmccrohan@gmail.com>
+Jonathan Mueller <j.mueller@apoveda.ch>
+Jonathan Pares <jonathanpa@users.noreply.github.com>
+Jonathan Rudenberg <jonathan@titanous.com>
+Jonathan Stoppani <jonathan.stoppani@divio.com>
+Jonh Wendell <jonh.wendell@redhat.com>
+Joost Cassee <joost@cassee.net>
+Jordan <jjn2009@users.noreply.github.com>
+Jordan Arentsen <blissdev@gmail.com>
+Jordan Sissel <jls@semicomplete.com>
+Jorge Marin <chipironcin@users.noreply.github.com>
+Jose Diaz-Gonzalez <jose@seatgeek.com>
+Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
+Joseph Hager <ajhager@gmail.com>
+Joseph Kern <jkern@semafour.net>
+Josh <jokajak@gmail.com>
+Josh Bodah <jb3689@yahoo.com>
+Josh Chorlton <jchorlton@gmail.com>
+Josh Eveleth <joshe@opendns.com>
+Josh Hawn <josh.hawn@docker.com>
+Josh Horwitz <horwitz@addthis.com>
+Josh Poimboeuf <jpoimboe@redhat.com>
+Josh Wilson <josh.wilson@fivestars.com>
+Josiah Kiehl <jkiehl@riotgames.com>
+José Tomás Albornoz <jojo@eljojo.net>
+JP <jpellerin@leapfrogonline.com>
+jrabbit <jackjrabbit@gmail.com>
+jroenf <jeroenfranse@gmail.com>
+Julian Taylor <jtaylor.debian@googlemail.com>
+Julien Barbier <write0@gmail.com>
+Julien Bisconti <veggiemonk@users.noreply.github.com>
+Julien Bordellier <julienbordellier@gmail.com>
+Julien Dubois <julien.dubois@gmail.com>
+Julien Pervillé <julien.perville@perfect-memory.com>
+Julio Montes <imc.coder@gmail.com>
+Jun-Ru Chang <jrjang@gmail.com>
+Jussi Nummelin <jussi.nummelin@gmail.com>
+Justas Brazauskas <brazauskasjustas@gmail.com>
+Justin Cormack <justin.cormack@docker.com>
+Justin Force <justin.force@gmail.com>
+Justin Plock <jplock@users.noreply.github.com>
+Justin Simonelis <justin.p.simonelis@gmail.com>
+Justin Terry <juterry@microsoft.com>
+Justyn Temme <justyntemme@gmail.com>
+Jyrki Puttonen <jyrkiput@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
+Jörg Thalheim <joerg@higgsboson.tk>
+Kai Blin <kai@samba.org>
+Kai Qiang Wu(Kennan) <wkq5325@gmail.com>
+Kamil Domański <kamil@domanski.co>
+kamjar gerami <kami.gerami@gmail.com>
+Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
+Kara Alexandra <kalexandra@us.ibm.com>
+Karan Lyons <karan@karanlyons.com>
+Kareem Khazem <karkhaz@karkhaz.com>
+kargakis <kargakis@users.noreply.github.com>
+Karl Grzeszczak <karlgrz@gmail.com>
+Karol Duleba <mr.fuxi@gmail.com>
+Katie McLaughlin <katie@glasnt.com>
+Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
+Katrina Owen <katrina.owen@gmail.com>
+Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
+Kay Yan <kay.yan@daocloud.io>
+kayrus <kay.diam@gmail.com>
+Ke Li <kel@splunk.com>
+Ke Xu <leonhartx.k@gmail.com>
+Kei Ohmura <ohmura.kei@gmail.com>
+Keith Hudgins <greenman@greenman.org>
+Keli Hu <dev@keli.hu>
+Ken Cochrane <kencochrane@gmail.com>
+Ken Herner <kherner@progress.com>
+Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
+Kenjiro Nakayama <nakayamakenjiro@gmail.com>
+Kent Johnson <kentoj@gmail.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
+Kevin Burke <kev@inburke.com>
+Kevin Clark <kevin.clark@gmail.com>
+Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Jing Qiu <kevin@idempotent.ca>
+Kevin Kern <kaiwentan@harmonycloud.cn>
+Kevin Menard <kevin@nirvdrum.com>
+Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
+Kevin Richardson <kevin@kevinrichardson.co>
+Kevin Shi <kshi@andrew.cmu.edu>
+Kevin Wallace <kevin@pentabarf.net>
+Kevin Yap <me@kevinyap.ca>
+kevinmeredith <kevin.m.meredith@gmail.com>
+Keyvan Fatehi <keyvanfatehi@gmail.com>
+kies <lleelm@gmail.com>
+Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+Kim Eik <kim@heldig.org>
+Kimbro Staken <kstaken@kstaken.com>
+Kir Kolyshkin <kir@openvz.org>
+Kiran Gangadharan <kiran.daredevil@gmail.com>
+Kirill Kolyshkin <kolyshkin@users.noreply.github.com>
+Kirill SIbirev <l0kix2@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
+Koichi Shiraishi <k@zchee.io>
+Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
+Konstantin L <sw.double@gmail.com>
+Konstantin Pelykh <kpelykh@zettaset.com>
+Krasi Georgiev <krasi@vip-consult.solutions>
+Krasimir Georgiev <support@vip-consult.co.uk>
+Kris-Mikael Krister <krismikael@protonmail.com>
+Kristian Haugene <kristian.haugene@capgemini.com>
+Kristina Zabunova <triara.xiii@gmail.com>
+krrg <krrgithub@gmail.com>
+Kun Zhang <zkazure@gmail.com>
+Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
+Kyle Conroy <kyle.j.conroy@gmail.com>
+Kyle Linden <linden.kyle@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
+Lai Jiangshan <jiangshanlai@gmail.com>
+Lajos Papp <lajos.papp@sequenceiq.com>
+Lakshan Perera <lakshan@laktek.com>
+Lalatendu Mohanty <lmohanty@redhat.com>
+Lance Chen <cyen0312@gmail.com>
+Lance Kinley <lkinley@loyaltymethods.com>
+Lars Butler <Lars.Butler@gmail.com>
+Lars Kellogg-Stedman <lars@redhat.com>
+Lars R. Damerow <lars@pixar.com>
+Lars-Magnus Skog <ralphtheninja@riseup.net>
+Laszlo Meszaros <lacienator@gmail.com>
+Laura Frank <ljfrank@gmail.com>
+Laurent Erignoux <lerignoux@gmail.com>
+Laurie Voss <github@seldo.com>
+Leandro Siqueira <leandro.siqueira@gmail.com>
+Lee Chao <932819864@qq.com>
+Lee, Meng-Han <sunrisedm4@gmail.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Lei Jitang <leijitang@huawei.com>
+Len Weincier <len@cloudafrica.net>
+Lennie <github@consolejunkie.net>
+Leo Gallucci <elgalu3@gmail.com>
+Leszek Kowalski <github@leszekkowalski.pl>
+Levi Blackstone <levi.blackstone@rackspace.com>
+Levi Gross <levi@levigross.com>
+Lewis Daly <lewisdaly@me.com>
+Lewis Marshall <lewis@lmars.net>
+Lewis Peckover <lew+github@lew.io>
+Liam Macgillavry <liam@kumina.nl>
+Liana Lo <liana.lixia@gmail.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
+Liang-Chi Hsieh <viirya@gmail.com>
+liaoqingwei <liaoqingwei@huawei.com>
+Lily Guo <lily.guo@docker.com>
+limsy <seongyeol37@gmail.com>
+Lin Lu <doraalin@163.com>
+LingFaKe <lingfake@huawei.com>
+Linus Heckemann <lheckemann@twig-world.com>
+Liran Tal <liran.tal@gmail.com>
+Liron Levin <liron@twistlock.com>
+Liu Bo <bo.li.liu@oracle.com>
+Liu Hua <sdu.liu@huawei.com>
+liwenqi <vikilwq@zju.edu.cn>
+lixiaobing10051267 <li.xiaobing1@zte.com.cn>
+Liz Zhang <lizzha@microsoft.com>
+LIZAO LI <lzlarryli@gmail.com>
+Lizzie Dixon <_@lizzie.io>
+Lloyd Dewolf <foolswisdom@gmail.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
+longliqiang88 <394564827@qq.com>
+Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
+Lorenzo Fontana <fontanalorenzo@me.com>
+Louis Opter <kalessin@kalessin.fr>
+Luca Favatella <lucafavatella@users.noreply.github.com>
+Luca Marturana <lucamarturana@gmail.com>
+Luca Orlandi <luca.orlandi@gmail.com>
+Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
+Lucas Chan <lucas-github@lucaschan.com>
+Lucas Chi <lucas@teacherspayteachers.com>
+Luciano Mores <leslau@gmail.com>
+Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
+Luiz Svoboda <luizek@gmail.com>
+Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
+lukaspustina <lukas.pustina@centerdevice.com>
+Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
+lukemarsden <luke@digital-crocus.com>
+Lyn <energylyn@zju.edu.cn>
+Lynda O'Leary <lyndaoleary29@gmail.com>
+Lénaïc Huard <lhuard@amadeus.com>
+Ma Müller <mueller-ma@users.noreply.github.com>
+Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
+Mabin <bin.ma@huawei.com>
+Madhav Puri <madhav.puri@gmail.com>
+Madhu Venugopal <madhu@socketplane.io>
+Mageee <21521230.zju.edu.cn>
+Mahesh Tiyyagura <tmahesh@gmail.com>
+malnick <malnick@gmail..com>
+Malte Janduda <mail@janduda.net>
+manchoz <giampaolo@trampolineup.com>
+Manfred Touron <m@42.am>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
+Mansi Nahar <mmn4185@rit.edu>
+mansinahar <mansinahar@users.noreply.github.com>
+Manuel Meurer <manuel@krautcomputing.com>
+Manuel Woelker <github@manuel.woelker.org>
+mapk0y <mapk0y@gmail.com>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
+Marcelo Salazar <chelosalazar@gmail.com>
+Marco Hennings <marco.hennings@freiheit.com>
+Marcus Cobden <mcobden@cisco.com>
+Marcus Farkas <toothlessgear@finitebox.com>
+Marcus Linke <marcus.linke@gmx.de>
+Marcus Ramberg <marcus@nordaaker.com>
+Marek Goldmann <marek.goldmann@gmail.com>
+Marian Marinov <mm@yuhu.biz>
+Marianna Tessel <mtesselh@gmail.com>
+Mario Loriedo <mario.loriedo@gmail.com>
+Marius Gundersen <me@mariusgundersen.net>
+Marius Sturm <marius@graylog.com>
+Marius Voila <marius.voila@gmail.com>
+Mark Allen <mrallen1@yahoo.com>
+Mark McGranaghan <mmcgrana@gmail.com>
+Mark McKinstry <mmckinst@umich.edu>
+Mark Milstein <mark@epiloque.com>
+Mark Parker <godefroi@users.noreply.github.com>
+Mark West <markewest@gmail.com>
+Marko Mikulicic <mmikulicic@gmail.com>
+Marko Tibold <marko@tibold.nl>
+Markus Fix <lispmeister@gmail.com>
+Martijn Dwars <ikben@martijndwars.nl>
+Martijn van Oosterhout <kleptog@svana.org>
+Martin Honermeyer <maze@strahlungsfrei.de>
+Martin Kelly <martin@surround.io>
+Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
+Martin Redmond <redmond.martin@gmail.com>
+Mary Anthony <mary.anthony@docker.com>
+Masahito Zembutsu <zembutsu@users.noreply.github.com>
+Masayuki Morita <minamijoyo@gmail.com>
+Mason Malone <mason.malone@gmail.com>
+Mateusz Sulima <sulima.mateusz@gmail.com>
+Mathias Monnerville <mathias@monnerville.com>
+Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
+Mathieu Parent <math.parent@gmail.com>
+Matt Apperson <me@mattapperson.com>
+Matt Bachmann <bachmann.matt@gmail.com>
+Matt Bentley <matt.bentley@docker.com>
+Matt Haggard <haggardii@gmail.com>
+Matt Hoyle <matt@deployable.co>
+Matt McCormick <matt.mccormick@kitware.com>
+Matt Moore <mattmoor@google.com>
+Matt Richardson <matt@redgumtech.com.au>
+Matt Robenolt <matt@ydekproductions.com>
+Matthew Heon <mheon@redhat.com>
+Matthew Lapworth <matthewl@bit-shift.net>
+Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mueller <mattmuelle@gmail.com>
+Matthew Riley <mattdr@google.com>
+Matthias Klumpp <matthias@tenstral.net>
+Matthias Kühnle <git.nivoc@neverbox.com>
+Matthias Rampke <mr@soundcloud.com>
+Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
+mattymo <raytrac3r@gmail.com>
+mattyw <mattyw@me.com>
+Mauricio Garavaglia <mauricio@medallia.com>
+mauriyouth <mauriyouth@gmail.com>
+Max Shytikov <mshytikov@gmail.com>
+Maxim Fedchyshyn <sevmax@gmail.com>
+Maxim Ivanov <ivanov.maxim@gmail.com>
+Maxim Kulkin <mkulkin@mirantis.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
+Meaglith Ma <genedna@gmail.com>
+meejah <meejah@meejah.ca>
+Megan Kostick <mkostick@us.ibm.com>
+Mehul Kar <mehul.kar@gmail.com>
+Mei ChunTao <mei.chuntao@zte.com.cn>
+Mengdi Gao <usrgdd@gmail.com>
+Mert Yazıcıoğlu <merty@users.noreply.github.com>
+mgniu <mgniu@dataman-inc.com>
+Micah Zoltu <micah@newrelic.com>
+Michael A. Smith <michael@smith-li.com>
+Michael Bridgen <mikeb@squaremobius.net>
+Michael Brown <michael@netdirect.ca>
+Michael Chiang <mchiang@docker.com>
+Michael Crosby <michael@docker.com>
+Michael Currie <mcurrie@bruceforceresearch.com>
+Michael Friis <friism@gmail.com>
+Michael Gorsuch <gorsuch@github.com>
+Michael Grauer <michael.grauer@kitware.com>
+Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Michael Hudson-Doyle <michael.hudson@canonical.com>
+Michael Huettermann <michael@huettermann.net>
+Michael Irwin <mikesir87@gmail.com>
+Michael Käufl <docker@c.michael-kaeufl.de>
+Michael Neale <michael.neale@gmail.com>
+Michael Prokop <github@michael-prokop.at>
+Michael Scharf <github@scharf.gr>
+Michael Stapelberg <michael+gh@stapelberg.de>
+Michael Steinert <mike.steinert@gmail.com>
+Michael Thies <michaelthies78@gmail.com>
+Michael West <mwest@mdsol.com>
+Michal Fojtik <mfojtik@redhat.com>
+Michal Gebauer <mishak@mishak.net>
+Michal Jemala <michal.jemala@gmail.com>
+Michal Minář <miminar@redhat.com>
+Michal Wieczorek <wieczorek-michal@wp.pl>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michał Czeraszkiewicz <czerasz@gmail.com>
+Michiel@unhosted <michiel@unhosted.org>
+Mickaël FORTUNATO <morsi.morsicus@gmail.com>
+Miguel Angel Fernández <elmendalerenda@gmail.com>
+Miguel Morales <mimoralea@gmail.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
+Mike Brown <brownwm@us.ibm.com>
+Mike Chelen <michael.chelen@gmail.com>
+Mike Danese <mikedanese@google.com>
+Mike Dillon <mike@embody.org>
+Mike Dougherty <mike.dougherty@docker.com>
+Mike Gaffney <mike@uberu.com>
+Mike Goelzer <mike.goelzer@docker.com>
+Mike Leone <mleone896@gmail.com>
+Mike MacCana <mike.maccana@gmail.com>
+Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
+mikelinjie <294893458@qq.com>
+Mikhail Sobolev <mss@mawhrin.net>
+Milind Chawre <milindchawre@gmail.com>
+Miloslav Trmač <mitr@redhat.com>
+mingqing <limingqing@cyou-inc.com>
+Mingzhen Feng <fmzhen@zju.edu.cn>
+Misty Stanley-Jones <misty@docker.com>
+Mitch Capper <mitch.capper@gmail.com>
+mlarcher <github@ringabell.org>
+Mohammad Banikazemi <mb@us.ibm.com>
+Mohammed Aaqib Ansari <maaquib@gmail.com>
+Mohit Soni <mosoni@ebay.com>
+Morgan Bauer <mbauer@us.ibm.com>
+Morgante Pell <morgante.pell@morgante.net>
+Morgy93 <thomas@ulfertsprygoda.de>
+Morten Siebuhr <sbhr@sbhr.dk>
+Morton Fox <github@qslw.com>
+Moysés Borges <moysesb@gmail.com>
+mqliang <mqliang.zju@gmail.com>
+Mrunal Patel <mrunalp@gmail.com>
+msabansal <sabansal@microsoft.com>
+mschurenko <matt.schurenko@gmail.com>
+Muayyad Alsadi <alsadi@gmail.com>
+muge <stevezhang2014@gmail.com>
+Mustafa Akın <mustafa91@gmail.com>
+Muthukumar R <muthur@gmail.com>
+Máximo Cuadros <mcuadros@gmail.com>
+Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nahum Shalman <nshalman@omniti.com>
+Nakul Pathak <nakulpathak3@hotmail.com>
+Nalin Dahyabhai <nalin@redhat.com>
+Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
+Natalie Parker <nparker@omnifone.com>
+Natanael Copa <natanael.copa@docker.com>
+Nate Brennand <nate.brennand@clever.com>
+Nate Eagleson <nate@nateeag.com>
+Nate Jones <nate@endot.org>
+Nathan Hsieh <hsieh.nathan@gmail.com>
+Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
+Nathan McCauley <nathan.mccauley@docker.com>
+Nathan Williams <nathan@teamtreehouse.com>
+Neal McBurnett <neal@mcburnett.org>
+Neil Peterson <neilpeterson@outlook.com>
+Nelson Chen <crazysim@gmail.com>
+Neyazul Haque <nuhaque@gmail.com>
+Nghia Tran <nghia@google.com>
+Niall O'Higgins <niallo@unworkable.org>
+Nicholas E. Rabenau <nerab@gmx.at>
+nick <nicholasjamesrusso@gmail.com>
+Nick DeCoursin <n.decoursin@foodpanda.com>
+Nick Irvine <nfirvine@nfirvine.com>
+Nick Parker <nikaios@gmail.com>
+Nick Payne <nick@kurai.co.uk>
+Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
+Nick Stinemates <nick@stinemates.org>
+NickrenREN <yuquan.ren@easystack.cn>
+Nicola Kabar <nicolaka@gmail.com>
+Nicolas Borboën <ponsfrilus@users.noreply.github.com>
+Nicolas De loof <nicolas.deloof@gmail.com>
+Nicolas Dudebout <nicolas.dudebout@gatech.edu>
+Nicolas Goy <kuon@goyman.com>
+Nicolas Kaiser <nikai@nikai.net>
+Nicolás Hock Isaza <nhocki@gmail.com>
+Nigel Poulton <nigelpoulton@hotmail.com>
+NikolaMandic <mn080202@gmail.com>
+nikolas <nnyby@columbia.edu>
+Nikolay Milovanov <nmil@itransformers.net>
+Nirmal Mehta <nirmalkmehta@gmail.com>
+Nishant Totla <nishanttotla@gmail.com>
+NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
+Noah Treuhaft <noah.treuhaft@docker.com>
+noducks <onemannoducks@gmail.com>
+Nolan Darilek <nolan@thewordnerd.info>
+nponeccop <andy.melnikov@gmail.com>
+Nuutti Kotivuori <naked@iki.fi>
+nzwsch <hi@nzwsch.com>
+O.S. Tezer <ostezer@gmail.com>
+objectified <objectified@gmail.com>
+OddBloke <daniel@daniel-watkins.co.uk>
+odk- <github@odkurzacz.org>
+Oguz Bilgic <fisyonet@gmail.com>
+Oh Jinkyun <tintypemolly@gmail.com>
+Ohad Schneider <ohadschn@users.noreply.github.com>
+ohmystack <jun.jiang02@ele.me>
+Ole Reifschneider <mail@ole-reifschneider.de>
+Oliver Neal <ItsVeryWindy@users.noreply.github.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
+Olle Jonsson <olle.jonsson@gmail.com>
+Oriol Francès <oriolfa@gmail.com>
+orkaa <orkica@gmail.com>
+Oskar Niburski <oskarniburski@gmail.com>
+Otto Kekäläinen <otto@seravo.fi>
+Ovidio Mallo <ovidio.mallo@gmail.com>
+oyld <oyld0210@163.com>
+ozlerhakan <hakan.ozler@kodcu.com>
+paetling <paetling@gmail.com>
+pandrew <letters@paulnotcom.se>
+panticz <mail@konczalski.de>
+Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal Borreli <pascal@borreli.com>
+Pascal Hartig <phartig@rdrei.net>
+Patrick Böänziger <patrick.baenziger@bsi-software.com>
+Patrick Devine <patrick.devine@docker.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Stapleton <github@gdi2290.com>
+pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
+paul <paul@inkling.com>
+Paul Annesley <paul@annesley.cc>
+Paul Bellamy <paul.a.bellamy@gmail.com>
+Paul Bowsher <pbowsher@globalpersonals.co.uk>
+Paul Furtado <pfurtado@hubspot.com>
+Paul Hammond <paul@paulhammond.org>
+Paul Jimenez <pj@place.org>
+Paul Kehrer <paul.l.kehrer@gmail.com>
+Paul Lietar <paul@lietar.net>
+Paul Liljenberg <liljenberg.paul@gmail.com>
+Paul Morie <pmorie@gmail.com>
+Paul Nasrat <pnasrat@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Paulo Ribeiro <paigr.io@gmail.com>
+Pavel Lobashov <ShockwaveNN@gmail.com>
+Pavel Pospisil <pospispa@gmail.com>
+Pavel Sutyrin <pavel.sutyrin@gmail.com>
+Pavel Tikhomirov <ptikhomirov@parallels.com>
+Pavlos Ratis <dastergon@gentoo.org>
+Pavol Vargovcik <pallly.vargovcik@gmail.com>
+Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
+Peggy Li <peggyli.224@gmail.com>
+Pei Su <sillyousu@gmail.com>
+Penghan Wang <ph.wang@daocloud.io>
+perhapszzy@sina.com <perhapszzy@sina.com>
+pestophagous <pestophagous@users.noreply.github.com>
+Peter Bourgon <peter@bourgon.org>
+Peter Braden <peterbraden@peterbraden.co.uk>
+Peter Choi <reikani@Peters-MacBook-Pro.local>
+Peter Dave Hello <PeterDaveHello@users.noreply.github.com>
+Peter Edge <peter.edge@gmail.com>
+Peter Ericson <pdericson@gmail.com>
+Peter Esbensen <pkesbensen@gmail.com>
+Peter Malmgren <ptmalmgren@gmail.com>
+Peter Salvatore <peter@psftw.com>
+Peter Volpe <petervo@redhat.com>
+Peter Waller <p@pwaller.net>
+Petr Švihlík <svihlik.petr@gmail.com>
+Phil <underscorephil@gmail.com>
+Phil Estes <estesp@linux.vnet.ibm.com>
+Phil Spitler <pspitler@gmail.com>
+Philip Monroe <phil@philmonroe.com>
+Philipp Wahala <philipp.wahala@gmail.com>
+Philipp Weissensteiner <mail@philippweissensteiner.com>
+Phillip Alexander <git@phillipalexander.io>
+pidster <pid@pidster.com>
+Piergiuliano Bossi <pgbossi@gmail.com>
+Pierre <py@poujade.org>
+Pierre Carrier <pierre@meteor.com>
+Pierre Dal-Pra <dalpra.pierre@gmail.com>
+Pierre Wacrenier <pierre.wacrenier@gmail.com>
+Pierre-Alain RIVIERE <pariviere@ippon.fr>
+Piotr Bogdan <ppbogdan@gmail.com>
+pixelistik <pixelistik@users.noreply.github.com>
+Porjo <porjo38@yahoo.com.au>
+Poul Kjeldager Sørensen <pks@s-innovations.net>
+Pradeep Chhetri <pradeep@indix.com>
+Prasanna Gautam <prasannagautam@gmail.com>
+Prayag Verma <prayag.verma@gmail.com>
+Przemek Hejman <przemyslaw.hejman@gmail.com>
+pysqz <randomq@126.com>
+qhuang <h.huangqiang@huawei.com>
+Qiang Huang <h.huangqiang@huawei.com>
+Qinglan Peng <qinglanpeng@zju.edu.cn>
+qudongfang <qudongfang@gmail.com>
+Quentin Brossard <qbrossard@gmail.com>
+Quentin Perez <qperez@ocs.online.net>
+Quentin Tayssier <qtayssier@gmail.com>
+r0n22 <cameron.regan@gmail.com>
+Rafal Jeczalik <rjeczalik@gmail.com>
+Rafe Colton <rafael.colton@gmail.com>
+Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Raghuram Devarakonda <draghuram@gmail.com>
+Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
+Ralf Sippl <ralf.sippl@gmail.com>
+Ralle <spam@rasmusa.net>
+Ralph Bean <rbean@redhat.com>
+Ramkumar Ramachandra <artagnon@gmail.com>
+Ramon Brooker <rbrooker@aetherealmind.com>
+Ramon van Alteren <ramon@vanalteren.nl>
+Ray Tsang <saturnism@users.noreply.github.com>
+ReadmeCritic <frankensteinbot@gmail.com>
+Recursive Madman <recursive.madman@gmx.de>
+Reficul <xuzhenglun@gmail.com>
+Regan McCooey <rmccooey27@aol.com>
+Remi Rampin <remirampin@gmail.com>
+Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
+resouer <resouer@163.com>
+rgstephens <greg@udon.org>
+Rhys Hiltner <rhys@twitch.tv>
+Rich Moyse <rich@moyse.us>
+Rich Seymour <rseymour@gmail.com>
+Richard <richard.scothern@gmail.com>
+Richard Burnison <rburnison@ebay.com>
+Richard Harvey <richard@squarecows.com>
+Richard Mathie <richard.mathie@amey.co.uk>
+Richard Metzler <richard@paadee.com>
+Richard Scothern <richard.scothern@gmail.com>
+Richo Healey <richo@psych0tik.net>
+Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Rick Wieman <git@rickw.nl>
+Rik Nijessen <rik@keefo.nl>
+Riku Voipio <riku.voipio@linaro.org>
+Riley Guerin <rileytg.dev@gmail.com>
+Ritesh H Shukla <sritesh@vmware.com>
+Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
+Rob Vesse <rvesse@dotnetrdf.org>
+Robert Bachmann <rb@robertbachmann.at>
+Robert Bittle <guywithnose@gmail.com>
+Robert Obryk <robryk@gmail.com>
+Robert Stern <lexandro2000@gmail.com>
+Robert Terhaar <robbyt@users.noreply.github.com>
+Robert Wallis <smilingrob@gmail.com>
+Roberto G. Hashioka <roberto.hashioka@docker.com>
+Roberto Muñoz Fernández <robertomf@gmail.com>
+Robin Naundorf <r.naundorf@fh-muenster.de>
+Robin Schneider <ypid@riseup.net>
+Robin Speekenbrink <robin@kingsquare.nl>
+robpc <rpcann@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Rodrigo Vaz <rodrigo.vaz@gmail.com>
+Roel Van Nyen <roel.vannyen@gmail.com>
+Roger Peppe <rogpeppe@gmail.com>
+Rohit Jnagal <jnagal@google.com>
+Rohit Kadam <rohit.d.kadam@gmail.com>
+Rojin George <rojingeorge@huawei.com>
+Roland Huß <roland@jolokia.org>
+Roland Kammerer <roland.kammerer@linbit.com>
+Roland Moriz <rmoriz@users.noreply.github.com>
+Roma Sokolov <sokolov.r.v@gmail.com>
+Roman Strashkin <roman.strashkin@gmail.com>
+Ron Smits <ron.smits@gmail.com>
+Ron Williams <ron.a.williams@gmail.com>
+root <docker-dummy@example.com>
+root <root@localhost>
+root <root@lxdebmas.marist.edu>
+root <root@ubuntu-14.04-amd64-vbox>
+root <root@webm215.cluster016.ha.ovh.net>
+Rory Hunter <roryhunter2@gmail.com>
+Rory McCune <raesene@gmail.com>
+Ross Boucher <rboucher@gmail.com>
+Rovanion Luckey <rovanion.luckey@gmail.com>
+Rozhnov Alexandr <nox73@ya.ru>
+rsmoorthy <rsmoorthy@users.noreply.github.com>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Rui Lopes <rgl@ruilopes.com>
+Runshen Zhu <runshen.zhu@gmail.com>
+Ryan Abrams <rdabrams@gmail.com>
+Ryan Anderson <anderson.ryanc@gmail.com>
+Ryan Aslett <github@mixologic.com>
+Ryan Belgrave <rmb1993@gmail.com>
+Ryan Detzel <ryan.detzel@gmail.com>
+Ryan Fowler <rwfowler@gmail.com>
+Ryan McLaughlin <rmclaughlin@insidesales.com>
+Ryan O'Donnell <odonnellryanc@gmail.com>
+Ryan Seto <ryanseto@yak.net>
+Ryan Thomas <rthomas@atlassian.com>
+Ryan Trauntvein <rtrauntvein@novacoast.com>
+Ryan Wallner <ryan.wallner@clusterhq.com>
+Ryan Zhang <ryan.zhang@docker.com>
+RyanDeng <sheldon.d1018@gmail.com>
+Rémy Greinhofer <remy.greinhofer@livelovely.com>
+s. rannou <mxs@sbrk.org>
+s00318865 <sunyuan3@huawei.com>
+Sabin Basyal <sabin.basyal@gmail.com>
+Sachin Joshi <sachin_jayant_joshi@hotmail.com>
+Sagar Hani <sagarhani33@gmail.com>
+Sainath Grandhi <sainath.grandhi@intel.com>
+sakeven <jc5930@sina.cn>
+Sally O'Malley <somalley@redhat.com>
+Sam Abed <sam.abed@gmail.com>
+Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
+Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Neirinck <sam@samneirinck.com>
+Sam Reis <sreis@atlassian.com>
+Sam Rijs <srijs@airpost.net>
+Sambuddha Basu <sambuddhabasu1@gmail.com>
+Sami Wagiaalla <swagiaal@redhat.com>
+Samuel Andaya <samuel@andaya.net>
+Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
+Samuel Karp <skarp@amazon.com>
+Samuel PHAN <samuel-phan@users.noreply.github.com>
+Sandeep Bansal <msabansal@microsoft.com>
+Sankar சங்கர் <sankar.curiosity@gmail.com>
+Sanket Saurav <sanketsaurav@gmail.com>
+Santhosh Manohar <santhosh@docker.com>
+sapphiredev <se.imas.kr@gmail.com>
+Satnam Singh <satnam@raintown.org>
+satoru <satorulogic@gmail.com>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
+Satoshi Tagomori <tagomoris@gmail.com>
+scaleoutsean <scaleoutsean@users.noreply.github.com>
+Scott Bessler <scottbessler@gmail.com>
+Scott Collier <emailscottcollier@gmail.com>
+Scott Johnston <scott@docker.com>
+Scott Stamp <scottstamp851@gmail.com>
+Scott Walls <sawalls@umich.edu>
+sdreyesg <sdreyesg@gmail.com>
+Sean Christopherson <sean.j.christopherson@intel.com>
+Sean Cronin <seancron@gmail.com>
+Sean McIntyre <s.mcintyre@xverba.ca>
+Sean OMeara <sean@chef.io>
+Sean P. Kane <skane@newrelic.com>
+Sean Rodman <srodman7689@gmail.com>
+Sebastiaan van Steenis <mail@superseb.nl>
+Sebastiaan van Stijn <github@gone.nl>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+Senthil Kumaran <senthil@uthcode.com>
+SeongJae Park <sj38.park@gmail.com>
+Seongyeol Lim <seongyeol37@gmail.com>
+Serge Hallyn <serge.hallyn@ubuntu.com>
+Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
+Sergey Evstifeev <sergey.evstifeev@gmail.com>
+Sergii Kabashniuk <skabashnyuk@codenvy.com>
+Serhat Gülçiçek <serhat25@gmail.com>
+Sevki Hasirci <s@sevki.org>
+Shane Canon <scanon@lbl.gov>
+Shane da Silva <shane@dasilva.io>
+shaunol <shaunol@gmail.com>
+Shawn Landden <shawn@churchofgit.com>
+Shawn Siefkas <shawn.siefkas@meredith.com>
+shawnhe <shawnhe@shawnhedeMacBook-Pro.local>
+Shayne Wang <shaynexwang@gmail.com>
+Shekhar Gulati <shekhargulati84@gmail.com>
+Sheng Yang <sheng@yasker.org>
+Shengbo Song <thomassong@tencent.com>
+Shev Yan <yandong_8212@163.com>
+Shih-Yuan Lee <fourdollars@gmail.com>
+Shijiang Wei <mountkin@gmail.com>
+Shishir Mahajan <shishir.mahajan@redhat.com>
+Shoubhik Bose <sbose78@gmail.com>
+Shourya Sarcar <shourya.sarcar@gmail.com>
+shuai-z <zs.broccoli@gmail.com>
+Shukui Yang <yangshukui@huawei.com>
+Shuwei Hao <haosw@cn.ibm.com>
+Sian Lerk Lau <kiawin@gmail.com>
+sidharthamani <sid@rancher.com>
+Silas Sewell <silas@sewell.org>
+Silvan Jegen <s.jegen@gmail.com>
+Simei He <hesimei@zju.edu.cn>
+Simon Eskildsen <sirup@sirupsen.com>
+Simon Leinen <simon.leinen@gmail.com>
+Simon Taranto <simon.taranto@gmail.com>
+Sindhu S <sindhus@live.in>
+Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
+skaasten <shaunk@gmail.com>
+Solganik Alexander <solganik@gmail.com>
+Solomon Hykes <solomon@docker.com>
+Song Gao <song@gao.io>
+Soshi Katsuta <soshi.katsuta@gmail.com>
+Soulou <leo@unbekandt.eu>
+Spencer Brown <spencer@spencerbrown.org>
+Spencer Smith <robertspencersmith@gmail.com>
+Sridatta Thatipamala <sthatipamala@gmail.com>
+Sridhar Ratnakumar <sridharr@activestate.com>
+Srini Brahmaroutu <srbrahma@us.ibm.com>
+srinsriv <srinsriv@users.noreply.github.com>
+Steeve Morin <steeve.morin@gmail.com>
+Stefan Berger <stefanb@linux.vnet.ibm.com>
+Stefan J. Wernli <swernli@microsoft.com>
+Stefan Praszalowicz <stefan@greplin.com>
+Stefan S. <tronicum@user.github.com>
+Stefan Scherer <scherer_stefan@icloud.com>
+Stefan Staudenmeyer <doerte@instana.com>
+Stefan Weil <sw@weilnetz.de>
+Stephen Crosby <stevecrozz@gmail.com>
+Stephen Day <stephen.day@docker.com>
+Stephen Drake <stephen@xenolith.net>
+Stephen Rust <srust@blockbridge.com>
+Steve Dougherty <steve@asksteved.com>
+Steve Durrheimer <s.durrheimer@gmail.com>
+Steve Francia <steve.francia@gmail.com>
+Steve Koch <stevekochscience@gmail.com>
+Steven Burgess <steven.a.burgess@hotmail.com>
+Steven Erenst <stevenerenst@gmail.com>
+Steven Hartland <steven.hartland@multiplay.co.uk>
+Steven Iveson <sjiveson@outlook.com>
+Steven Merrill <steven.merrill@gmail.com>
+Steven Richards <steven@axiomzen.co>
+Steven Taylor <steven.taylor@me.com>
+Subhajit Ghosh <isubuz.g@gmail.com>
+Sujith Haridasan <sujith.h@gmail.com>
+Sun Gengze <690388648@qq.com>
+Suryakumar Sudar <surya.trunks@gmail.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Swapnil Daingade <swapnil.daingade@gmail.com>
+Sylvain Baubeau <sbaubeau@redhat.com>
+Sylvain Bellemare <sylvain@ascribe.io>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
+Tadej Janež <tadej.j@nez.si>
+TAGOMORI Satoshi <tagomoris@gmail.com>
+tang0th <tang0th@gmx.com>
+Tangi COLIN <tangicolin@gmail.com>
+Tatsuki Sugiura <sugi@nemui.org>
+Tatsushi Inagaki <e29253@jp.ibm.com>
+Taylor Jones <monitorjbl@gmail.com>
+tbonza <tylers.pile@gmail.com>
+Ted M. Young <tedyoung@gmail.com>
+Tehmasp Chaudhri <tehmasp@gmail.com>
+Tejesh Mehta <tejesh.mehta@gmail.com>
+terryding77 <550147740@qq.com>
+tgic <farmer1992@gmail.com>
+Thatcher Peskens <thatcher@docker.com>
+theadactyl <thea.lamkin@gmail.com>
+Thell 'Bo' Fowler <thell@tbfowler.name>
+Thermionix <bond711@gmail.com>
+Thijs Terlouw <thijsterlouw@gmail.com>
+Thomas Bikeev <thomas.bikeev@mac.com>
+Thomas Frössman <thomasf@jossystem.se>
+Thomas Gazagnaire <thomas@gazagnaire.org>
+Thomas Grainger <tagrain@gmail.com>
+Thomas Hansen <thomas.hansen@gmail.com>
+Thomas Leonard <thomas.leonard@docker.com>
+Thomas LEVEIL <thomasleveil@gmail.com>
+Thomas Orozco <thomas@orozco.fr>
+Thomas Riccardi <riccardi@systran.fr>
+Thomas Schroeter <thomas@cliqz.com>
+Thomas Sjögren <konstruktoid@users.noreply.github.com>
+Thomas Swift <tgs242@gmail.com>
+Thomas Tanaka <thomas.tanaka@oracle.com>
+Thomas Texier <sharkone@en-mousse.org>
+Tianon Gravi <admwiggin@gmail.com>
+Tianyi Wang <capkurmagati@gmail.com>
+Tibor Vass <teabee89@gmail.com>
+Tiffany Jernigan <tiffany.f.j@gmail.com>
+Tiffany Low <tiffany@box.com>
+Tim Bosse <taim@bosboot.org>
+Tim Dettrick <t.dettrick@uq.edu.au>
+Tim Düsterhus <tim@bastelstu.be>
+Tim Hockin <thockin@google.com>
+Tim Ruffles <oi@truffles.me.uk>
+Tim Smith <timbot@google.com>
+Tim Terhorst <mynamewastaken+git@gmail.com>
+Tim Wang <timwangdev@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
+Tim Zju <21651152@zju.edu.cn>
+timfeirg <kkcocogogo@gmail.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+tjwebb123 <tjwebb123@users.noreply.github.com>
+tobe <tobegit3hub@gmail.com>
+Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Bradtke <webwurst@gmail.com>
+Tobias Gesellchen <tobias@gesellix.de>
+Tobias Klauser <tklauser@distanz.ch>
+Tobias Munk <schmunk@usrbin.de>
+Tobias Schmidt <ts@soundcloud.com>
+Tobias Schwab <tobias.schwab@dynport.de>
+Todd Crane <todd@toddcrane.com>
+Todd Lunter <tlunter@gmail.com>
+Todd Whiteman <todd.whiteman@joyent.com>
+Toli Kuznets <toli@docker.com>
+Tom Barlow <tomwbarlow@gmail.com>
+Tom Booth <tombooth@gmail.com>
+Tom Denham <tom@tomdee.co.uk>
+Tom Fotherby <tom+github@peopleperhour.com>
+Tom Howe <tom.howe@enstratius.com>
+Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom Wilkie <tom.wilkie@gmail.com>
+Tom X. Tobin <tomxtobin@tomxtobin.com>
+Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
+Tomasz Lipinski <tlipinski@users.noreply.github.com>
+Tomasz Nurkiewicz <nurkiewicz@gmail.com>
+Tommaso Visconti <tommaso.visconti@gmail.com>
+Tomáš Hrčka <thrcka@redhat.com>
+Tonny Xu <tonny.xu@gmail.com>
+Tony Abboud <tdabboud@hotmail.com>
+Tony Daws <tony@daws.ca>
+Tony Miller <mcfiredrill@gmail.com>
+toogley <toogley@mailbox.org>
+Torstein Husebø <torstein@huseboe.net>
+Tõnis Tiigi <tonistiigi@gmail.com>
+tpng <benny.tpng@gmail.com>
+tracylihui <793912329@qq.com>
+Trapier Marshall <trapier.marshall@docker.com>
+Travis Cline <travis.cline@gmail.com>
+Travis Thieman <travis.thieman@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
+Trevor <trevinwoodstock@gmail.com>
+Trevor Pounds <trevor.pounds@gmail.com>
+Trevor Sullivan <pcgeek86@gmail.com>
+trishnaguha <trishnaguha17@gmail.com>
+Tristan Carel <tristan@cogniteev.com>
+Troy Denton <trdenton@gmail.com>
+Tyler Brock <tyler.brock@gmail.com>
+Tzu-Jung Lee <roylee17@gmail.com>
+Ulysse Carion <ulyssecarion@gmail.com>
+unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
+vagrant <vagrant@ubuntu-14.04-amd64-vbox>
+Vaidas Jablonskis <jablonskis@gmail.com>
+Veres Lajos <vlajos@gmail.com>
+vgeta <gopikannan.venugopalsamy@gmail.com>
+Victor Algaze <valgaze@gmail.com>
+Victor Coisne <victor.coisne@dotcloud.com>
+Victor Costan <costan@gmail.com>
+Victor I. Wood <viw@t2am.com>
+Victor Lyuboslavsky <victor@victoreda.com>
+Victor Marmol <vmarmol@google.com>
+Victor Palma <palma.victor@gmail.com>
+Victor Vieux <victor.vieux@docker.com>
+Victoria Bialas <victoria.bialas@docker.com>
+Vijaya Kumar K <vijayak@caviumnetworks.com>
+Viktor Stanchev <me@viktorstanchev.com>
+Viktor Vojnovski <viktor.vojnovski@amadeus.com>
+VinayRaghavanKS <raghavan.vinay@gmail.com>
+Vincent Batts <vbatts@redhat.com>
+Vincent Bernat <bernat@luffy.cx>
+Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Demeester <vincent@sbr.pm>
+Vincent Giersch <vincent.giersch@ovh.net>
+Vincent Mayers <vincent.mayers@inbloom.org>
+Vincent Woo <me@vincentwoo.com>
+Vinod Kulkarni <vinod.kulkarni@gmail.com>
+Vishal Doshi <vishal.doshi@gmail.com>
+Vishnu Kannan <vishnuk@google.com>
+Vitor Monteiro <vmrmonteiro@gmail.com>
+Vivek Agarwal <me@vivek.im>
+Vivek Dasgupta <vdasgupt@redhat.com>
+Vivek Goyal <vgoyal@redhat.com>
+Vladimir Bulyga <xx@ccxx.cc>
+Vladimir Kirillov <proger@wilab.org.ua>
+Vladimir Pouzanov <farcaller@google.com>
+Vladimir Rutsky <altsysrq@gmail.com>
+Vladimir Varankin <nek.narqo+git@gmail.com>
+VladimirAus <v_roudakov@yahoo.com>
+Vojtech Vitek (V-Teq) <vvitek@redhat.com>
+waitingkuo <waitingkuo0527@gmail.com>
+Walter Leibbrandt <github@wrl.co.za>
+Walter Stanish <walter@pratyeka.org>
+WANG Chao <wcwxyz@gmail.com>
+Wang Long <long.wanglong@huawei.com>
+Wang Ping <present.wp@icloud.com>
+Wang Xing <hzwangxing@corp.netease.com>
+Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Ward Vandewege <ward@jhvc.com>
+WarheadsSE <max@warheads.net>
+Wayne Chang <wayne@neverfear.org>
+Wei Wu <wuwei4455@gmail.com>
+Wei-Ting Kuo <waitingkuo0527@gmail.com>
+weiyan <weiyan3@huawei.com>
+Weiyang Zhu <cnresonant@gmail.com>
+Wen Cheng Ma <wenchma@cn.ibm.com>
+Wendel Fleming <wfleming@usc.edu>
+Wenkai Yin <yinw@vmware.com>
+Wentao Zhang <zhangwentao234@huawei.com>
+Wenxuan Zhao <viz@linux.com>
+Wenyu You <21551128@zju.edu.cn>
+Wenzhi Liang <wenzhi.liang@gmail.com>
+Wes Morgan <cap10morgan@gmail.com>
+Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
+Will Dietz <w@wdtz.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+Will Weaver <monkey@buildingbananas.com>
+willhf <willhf@gmail.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Hubbs <w.d.hubbs@gmail.com>
+William Martin <wmartin@pivotal.io>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+WiseTrem <shepelyov.g@gmail.com>
+wlan0 <sidharthamn@gmail.com>
+Wolfgang Powisch <powo@powo.priv.at>
+wonderflow <wonderflow.sun@gmail.com>
+Wonjun Kim <wonjun.kim@navercorp.com>
+xamyzhao <x.amy.zhao@gmail.com>
+Xianglin Gao <xlgao@zju.edu.cn>
+Xianlu Bird <xianlubird@gmail.com>
+XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
+xiekeyang <xiekeyang@huawei.com>
+Xinbo Weng <xihuanbo_0521@zju.edu.cn>
+Xinzi Zhou <imdreamrunner@gmail.com>
+Xiuming Chen <cc@cxm.cc>
+xlgao-zju <xlgao@zju.edu.cn>
+xuzhaokui <cynicholas@gmail.com>
+Yahya <ya7yaz@gmail.com>
+YAMADA Tsuyoshi <tyamada@minimum2scp.org>
+Yan Feng <yanfeng2@huawei.com>
+Yang Bai <hamo.by@gmail.com>
+yangshukui <yangshukui@huawei.com>
+Yanqiang Miao <miao.yanqiang@zte.com.cn>
+Yasunori Mahata <nori@mahata.net>
+Yestin Sun <sunyi0804@gmail.com>
+Yi EungJun <eungjun.yi@navercorp.com>
+Yibai Zhang <xm1994@gmail.com>
+Yihang Ho <hoyihang5@gmail.com>
+Ying Li <ying.li@docker.com>
+Yohei Ueda <yohei@jp.ibm.com>
+Yong Tang <yong.tang.github@outlook.com>
+Yongzhi Pan <panyongzhi@gmail.com>
+yorkie <yorkiefixer@gmail.com>
+You-Sheng Yang (楊有勝) <vicamo@gmail.com>
+Youcef YEKHLEF <yyekhlef@gmail.com>
+Yu Peng <yu.peng36@zte.com.cn>
+Yuan Sun <sunyuan3@huawei.com>
+yuchangchun <yuchangchun1@huawei.com>
+yuchengxia <yuchengxia@huawei.com>
+Yunxiang Huang <hyxqshk@vip.qq.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+yuzou <zouyu7@huawei.com>
+Zac Dover <zdover@redhat.com>
+Zach Borboa <zachborboa@gmail.com>
+Zachary Jaffee <zij@case.edu>
+Zain Memon <zain@inzain.net>
+Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
+Zefan Li <lizefan@huawei.com>
+Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
+Zhang Kun <zkazure@gmail.com>
+Zhang Wei <zhangwei555@huawei.com>
+Zhang Wentao <zhangwentao234@huawei.com>
+zhangxianwei <xianwei.zw@alibaba-inc.com>
+Zhenan Ye <21551168@zju.edu.cn>
+zhenghenghuo <zhenghenghuo@zju.edu.cn>
+Zhenkun Bi <bi.zhenkun@zte.com.cn>
+zhouhao <zhouhao@cn.fujitsu.com>
+Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
+Zhu Kunjia <zhu.kunjia@zte.com.cn>
+Zhuoyun Wei <wzyboy@wzyboy.org>
+Zilin Du <zilin.du@gmail.com>
+zimbatm <zimbatm@zimbatm.com>
+Ziming Dong <bnudzm@foxmail.com>
+ZJUshuaizhou <21551191@zju.edu.cn>
+zmarouf <zeid.marouf@gmail.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
+zqh <zqhxuyuan@gmail.com>
+Zuhayr Elahi <elahi.zuhayr@gmail.com>
+Zunayed Ali <zunayed@gmail.com>
+Álex González <agonzalezro@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
+Átila Camurça Alves <camurca.home@gmail.com>
+尹吉峰 <jifeng.yin@gmail.com>
+搏通 <yufeng.pyf@alibaba-inc.com>
diff --git a/docker/types.go b/docker/types.go
new file mode 100644
index 0000000..b0ed2e4
--- /dev/null
+++ b/docker/types.go
@@ -0,0 +1,258 @@
+package docker
+
+//
+// Types extracted from Docker
+//
+
+import (
+ "time"
+
+ "github.com/containers/image/v5/pkg/strslice"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// github.com/moby/moby/image/rootfs.go
+const TypeLayers = "layers"
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
+
+// github.com/moby/moby/image/rootfs.go
+// V2S2RootFS describes images root filesystem
+// This is currently a placeholder that only supports layers. In the future
+// this can be made into an interface that supports different implementations.
+type V2S2RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// V2S2History stores build commands that were used to create an image
+type V2S2History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// ID is the content-addressable ID of an image.
+type ID digest.Digest
+
+// github.com/moby/moby/api/types/container/config.go
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // Time to wait after the container starts before running the first check.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// github.com/docker/go-connections/nat/nat.go
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// github.com/docker/go-connections/nat/nat.go
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// github.com/moby/moby/api/types/container/config.go
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// github.com/docker/distribution/manifest/schema1/config_builder.go
+// For non-top-level layers, create fake V1Compatibility strings that
+// fit the format and don't collide with anything else, but don't
+// result in runnable images on their own.
+type V1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// V1Image stores the V1 image configuration.
+type V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // Variant is a variant of the CPU that the image is built and runs on
+ Variant string `json:"variant,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// V2Image stores the image configuration
+type V2Image struct {
+ V1Image
+ Parent ID `json:"parent,omitempty"` // nolint:govet
+ RootFS *V2S2RootFS `json:"rootfs,omitempty"`
+ History []V2S2History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+}
+
+// github.com/docker/distribution/manifest/versioned.go
+// V2Versioned provides a struct with the manifest schemaVersion and mediaType.
+// Incoming content with unknown schema version can be decoded against this
+// struct to check the version.
+type V2Versioned struct {
+ // SchemaVersion is the image manifest schema that this image follows
+ SchemaVersion int `json:"schemaVersion"`
+
+ // MediaType is the media type of this schema.
+ MediaType string `json:"mediaType,omitempty"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// V2S1FSLayer is a container struct for BlobSums defined in an image manifest
+type V2S1FSLayer struct {
+ // BlobSum is the tarsum of the referenced filesystem image layer
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// V2S1History stores unstructured v1 compatibility information
+type V2S1History struct {
+ // V1Compatibility is the raw v1 compatibility information
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// V2S1Manifest provides the base accessible fields for working with V2 image
+// format in the registry.
+type V2S1Manifest struct {
+ V2Versioned
+
+ // Name is the name of the image's repository
+ Name string `json:"name"`
+
+ // Tag is the tag of the image specified by this manifest
+ Tag string `json:"tag"`
+
+ // Architecture is the host architecture on which this image is intended to
+ // run
+ Architecture string `json:"architecture"`
+
+ // FSLayers is a list of filesystem layer blobSums contained in this image
+ FSLayers []V2S1FSLayer `json:"fsLayers"`
+
+ // History is a list of unstructured historical data for v1 compatibility
+ History []V2S1History `json:"history"`
+}
+
+// github.com/docker/distribution/blobs.go
+// V2S2Descriptor describes targeted content. Used in conjunction with a blob
+// store, a descriptor can be used to fetch, store and target any kind of
+// blob. The struct also describes the wire protocol format. Fields should
+// only be added but never changed.
+type V2S2Descriptor struct {
+ // MediaType describe the type of the content. All text based formats are
+ // encoded as utf-8.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Size in bytes of content.
+ Size int64 `json:"size,omitempty"`
+
+ // Digest uniquely identifies the content. A byte stream can be verified
+ // against against this digest.
+ Digest digest.Digest `json:"digest,omitempty"`
+
+ // URLs contains the source URLs of this content.
+ URLs []string `json:"urls,omitempty"`
+
+ // NOTE: Before adding a field here, please ensure that all
+ // other options have been exhausted. Much of the type relationships
+ // depend on the simplicity of this type.
+}
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+// V2S2Manifest defines a schema2 manifest.
+type V2S2Manifest struct {
+ V2Versioned
+
+ // Config references the image configuration as a blob.
+ Config V2S2Descriptor `json:"config"`
+
+ // Layers lists descriptors for the layers referenced by the
+ // configuration.
+ Layers []V2S2Descriptor `json:"layers"`
+}
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..4584cf5
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,33 @@
+PREFIX := /usr/local
+DATADIR := ${PREFIX}/share
+MANDIR := $(DATADIR)/man
+# Following go-md2man is guaranteed on host
+GOMD2MAN ?= ../tests/tools/build/go-md2man
+ifeq ($(shell uname -s),FreeBSD)
+SED=gsed
+else
+SED=sed
+endif
+
+docs: $(patsubst %.md,%,$(wildcard *.md))
+
+%.1: %.1.md
+### sed is used to filter http/s links as well as relative links
+### replaces "\" at the end of a line with two spaces
+### this ensures that manpages are rendered correctly
+ @$(SED) -e 's/\((buildah[^)]*\.md\(#.*\)\?)\)//g' \
+ -e 's/\[\(buildah[^]]*\)\]/\1/g' \
+ -e 's/\[\([^]]*\)](http[^)]\+)/\1/g' \
+ -e 's;<\(/\)\?\(a\|a\s\+[^>]*\|sup\)>;;g' \
+ -e 's/\\$$/ /g' $< | \
+ $(GOMD2MAN) -in /dev/stdin -out $@
+
+.PHONY: install
+install:
+ install -d ${DESTDIR}/${MANDIR}/man1
+ install -m 0644 buildah*.1 ${DESTDIR}/${MANDIR}/man1
+ install -m 0644 links/buildah*.1 ${DESTDIR}/${MANDIR}/man1
+
+.PHONY: clean
+clean:
+ $(RM) buildah*.1
diff --git a/docs/buildah-add.1.md b/docs/buildah-add.1.md
new file mode 100644
index 0000000..11f9505
--- /dev/null
+++ b/docs/buildah-add.1.md
@@ -0,0 +1,141 @@
+# buildah-add "1" "April 2021" "buildah"
+
+## NAME
+buildah\-add - Add the contents of a file, URL, or a directory to a container.
+
+## SYNOPSIS
+**buildah add** [*options*] *container* *src* [[*src* ...] *dest*]
+
+## DESCRIPTION
+Adds the contents of a file, URL, or a directory to a container's working
+directory or a specified location in the container. If a local source file
+appears to be an archive, its contents are extracted and added instead of the
+archive file itself. If a local directory is specified as a source, its
+*contents* are copied to the destination.
+
+## OPTIONS
+
+**--add-history**
+
+Add an entry to the history which will note the digest of the added content.
+Defaults to false.
+
+Note: You can also override the default value of --add-history by setting the
+BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
+
+**--checksum** *checksum*
+
+Checksum the source content. The value of *checksum* must be a standard
+container digest string. Only supported for HTTP sources.
+
+**--chmod** *permissions*
+
+Sets the access permissions of the destination content. Accepts the numerical format.
+
+**--chown** *owner*:*group*
+
+Sets the user and group ownership of the destination content.
+
+**--contextdir** *directory*
+
+Build context directory. Specifying a context directory causes Buildah to
+chroot into that context directory. This means copying files pointed at
+by symbolic links outside of the chroot will fail.
+
+**--from** *containerOrImage*
+
+Use the root directory of the specified working container or image as the root
+directory when resolving absolute source paths and the path of the context
+directory. If an image needs to be pulled, options recognized by `buildah pull`
+can be used.
+
+**--ignorefile** *file*
+
+Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
+
+**--quiet**, **-q**
+
+Refrain from printing a digest of the added content.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing pull of images from registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing pull of images from registry.
+
+Defaults to `2s`.
+
+## EXAMPLE
+
+buildah add containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah add --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah add --chmod 660 containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah add containerID '/home/myuser/myproject.go'
+
+buildah add containerID '/home/myuser/myfiles.tar' '/tmp'
+
+buildah add containerID '/tmp/workingdir' '/tmp/workingdir'
+
+buildah add containerID 'https://github.com/containers/buildah/blob/main/README.md' '/tmp'
+
+buildah add containerID 'passwd' 'certs.d' /etc
+
+## FILES
+
+### .containerignore
+
+If a .containerignore or .dockerignore file exists in the context directory,
+`buildah add` reads its contents. If both exist, then .containerignore is used.
+
+When the `--ignorefile` option is specified Buildah reads it and
+uses it to decide which content to exclude when copying content into the
+working container.
+
+Users can specify a series of Unix shell glob patterns in an ignore file to
+identify files/directories to exclude.
+
+Buildah supports a special wildcard string `**` which matches any number of
+directories (including zero). For example, **/*.go will exclude all files that
+end with .go that are found in all directories.
+
+Example .containerignore/.dockerignore file:
+
+```
+# here are files we want to exclude
+*/*.c
+**/output*
+src
+```
+
+`*/*.c`
+Excludes files and directories whose names end with .c in any top level subdirectory. For example, the source file include/rootless.c.
+
+`**/output*`
+Excludes files and directories starting with `output` from any directory.
+
+`src`
+Excludes files named src and the directory src as well as any content in it.
+
+Lines starting with ! (exclamation mark) can be used to make exceptions to
+exclusions. The following is an example .containerignore file that uses this
+mechanism:
+```
+*.doc
+!Help.doc
+```
+
+Exclude all doc files except Help.doc when copying content into the container.
+
+This functionality is compatible with the handling of .containerignore files described here:
+
+https://github.com/containers/buildah/blob/main/docs/containerignore.5.md
+
+## SEE ALSO
+buildah(1), containerignore(5)
diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md
new file mode 100644
index 0000000..c2ebe95
--- /dev/null
+++ b/docs/buildah-build.1.md
@@ -0,0 +1,1316 @@
+# buildah-build "1" "April 2017" "buildah"
+
+## NAME
+buildah\-build - Build an image using instructions from Containerfiles
+
+## SYNOPSIS
+
+**buildah build** [*options*] [*context*]
+
+**buildah bud** [*options*] [*context*]
+
+## DESCRIPTION
+Builds an image using instructions from one or more Containerfiles or Dockerfiles and a specified
+build context directory. A Containerfile uses the same syntax as a Dockerfile internally. For this
+document, a file referred to as a Containerfile can be a file named either 'Containerfile' or 'Dockerfile'.
+
+The build context directory can be specified as the http(s) URL of an archive, git repository or Containerfile.
+
+If no context directory is specified, then Buildah will assume the current working directory as build context, which should contain a Containerfile.
+
+Containerfiles ending with a ".in" suffix will be preprocessed via cpp(1). This can be useful to decompose Containerfiles into several reusable parts that can be used via CPP's **#include** directive. Notice, a Containerfile.in file can still be used by other tools when manually preprocessing them via `cpp -E`. Any comments ( Lines beginning with `#` ) in included Containerfile(s) that are not preprocess commands, will be printed as warnings during builds.
+
+When the URL is an archive, the contents of the URL is downloaded to a temporary location and extracted before execution.
+
+When the URL is a Containerfile, the file is downloaded to a temporary location.
+
+When a Git repository is set as the URL, the repository is cloned locally and then used as the build context. A non-default branch (or commit ID) and subdirectory of the cloned git repository can be used by including their names at the end of the URL in the form `myrepo.git#mybranch:subdir`, `myrepo.git#mycommit:subdir`, or `myrepo.git#:subdir` if the subdirectory should be used from the default branch.
+
+## OPTIONS
+
+**--add-host**=[]
+
+Add a custom host-to-IP mapping (host:ip)
+
+Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. Conflicts with the --no-hosts option.
+
+**--all-platforms**
+
+Instead of building for a set of platforms specified using the **--platform** option, inspect the build's base images, and build for all of the platforms for which they are all available. Stages that use *scratch* as a starting point can not be inspected, so at least one non-*scratch* stage must be present for detection to work usefully.
+
+**--annotation** *annotation[=value]*
+
+Add an image *annotation* (e.g. annotation=*value*) to the image metadata. Can be used multiple times.
+If *annotation* is named, but neither `=` nor a `value` is provided, then the *annotation* is set to an empty value.
+
+Note: this information is not present in Docker image formats, so it is discarded when writing images in Docker formats.
+
+**--arch**="ARCH"
+
+Set the ARCH of the image to be built, and that of the base image to be pulled, if the build uses one, to the provided value instead of using the architecture of the host. (Examples: arm, arm64, 386, amd64, ppc64le, s390x)
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--build-arg** *arg=value*
+
+Specifies a build argument and its value, which will be interpolated in
+instructions read from the Containerfiles in the same way that environment
+variables are, but which will not be added to environment variable list in the
+resulting image's configuration.
+
+Please refer to the [BUILD TIME VARIABLES](#build-time-variables) section for the
+list of variables that can be overridden within the Containerfile at run time.
+
+**--build-arg-file** *path*
+
+Specifies a file containing lines of build arguments of the form arg=value. The suggested file name is argfile.conf.
+
+Comment lines beginning with `#` are ignored, along with blank lines. All others should be of the `arg=value` format passed to `--build-arg`.
+
+If several arguments are provided via the `--build-arg-file` and `--build-arg` options, the build arguments will be merged across all of the provided files and command line arguments.
+
+Any file provided in a `--build-arg-file` option will be read before the arguments supplied via the `--build-arg` option.
+
+When a given argument name is specified several times, the last instance is the one that is passed to the resulting builds. This means `--build-arg` values always override those in a `--build-arg-file`.
+
+**--build-context** *name=value*
+
+Specify an additional build context using its short name and its location. Additional
+build contexts can be referenced in the same manner as we access different stages in `COPY`
+instruction.
+
+Valid values could be:
+* Local directory – e.g. --build-context project2=../path/to/project2/src
+* HTTP URL to a tarball – e.g. --build-context src=https://example.org/releases/src.tar
+* Container image – specified with a container-image:// prefix, e.g. --build-context alpine=container-image://alpine:3.15, (also accepts docker://, docker-image://)
+
+On the Containerfile side, you can reference the build context on all commands that accept the “from” parameter.
+Here’s how that might look:
+
+```Dockerfile
+FROM [name]
+COPY --from=[name] ...
+RUN --mount=from=[name] …
+```
+
+The value of `[name]` is matched with the following priority order:
+
+* Named build context defined with --build-context [name]=..
+* Stage defined with AS [name] inside Containerfile
+* Image [name], either local or in a remote registry
+
+**--cache-from**
+
+Repository to utilize as a potential list of cache sources. When specified, Buildah will try to look for
+cache images in the specified repositories and will attempt to pull cache images instead of actually
+executing the build steps locally. Buildah will only attempt to pull previously cached images if they
+are considered as valid cache hits.
+
+Use the `--cache-to` option to populate a remote repository or repositories with cache content.
+
+Example
+
+```bash
+# populate a cache and also consult it
+buildah build -t test --layers --cache-to registry/myrepo/cache --cache-from registry/myrepo/cache .
+```
+
+Note: `--cache-from` option is ignored unless `--layers` is specified.
+
+Note: Buildah's `--cache-from` option is designed differently than Docker and BuildKit's `--cache-from` option. Buildah's
+distributed cache mechanism pulls intermediate images from the remote registry itself, unlike Docker and BuildKit where
+the intermediate image is stored in the image itself. Buildah's approach is similar to kaniko, which
+does not inflate the size of the original image with intermediate images. Also, intermediate images can truly be
+kept distributed across one or more remote registries using Buildah's caching mechanism.
+
+**--cache-to**
+
+Set this flag to specify list of remote repositories that will be used to store cache images. Buildah will attempt to
+push newly built cache image to the remote repositories.
+
+Note: Use the `--cache-from` option in order to use cache content in a remote repository.
+
+Example
+
+```bash
+# populate a cache and also consult it
+buildah build -t test --layers --cache-to registry/myrepo/cache --cache-from registry/myrepo/cache .
+```
+
+Note: `--cache-to` option is ignored unless `--layers` is specified.
+
+Note: Buildah's `--cache-to` option is designed differently than Docker and BuildKit's `--cache-to` option. Buildah's
+distributed cache mechanism push intermediate images to the remote registry itself, unlike Docker and BuildKit where
+the intermediate image is stored in the image itself. Buildah's approach is similar to kaniko, which
+does not inflate the size of the original image with intermediate images. Also, intermediate images can truly be
+kept distributed across one or more remote registries using Buildah's caching mechanism.
+
+**--cache-ttl** *duration*
+
+Limit the use of cached images to only consider images with created timestamps less than *duration* ago.
+For example if `--cache-ttl=1h` is specified, Buildah will only consider intermediate cache images which are created
+under the duration of one hour, and intermediate cache images outside this duration will be ignored.
+
+Note: Setting `--cache-ttl=0` manually is equivalent to using `--no-cache` in the implementation since this would
+effectively mean that user is not willing to use cache at all.
+
+**--cap-add**=*CAP\_xxx*
+
+When executing RUN instructions, run the command specified in the instruction
+with the specified capability added to its capability set.
+Certain capabilities are granted by default; this option can be used to add
+more.
+
+**--cap-drop**=*CAP\_xxx*
+
+When executing RUN instructions, run the command specified in the instruction
+with the specified capability removed from its capability set.
+The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER, CAP\_FSETID, CAP\_KILL,
+CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP, CAP\_SETGID, CAP\_SETPCAP, and
+CAP\_SETUID capabilities are granted by default; this option can be used to
+remove them. The list of default capabilities is managed in containers.conf(5).
+
+If a capability is specified to both the **--cap-add** and **--cap-drop**
+options, it will be dropped, regardless of the order in which the options were
+given.
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--cgroup-parent**=""
+
+Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
+**--cgroupns** *how*
+
+Sets the configuration for cgroup namespaces when handling `RUN` instructions.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new cgroup namespace should be created, or it can be "host" to indicate
+that the cgroup namespace in which `buildah` itself is being run should be reused.
+
+**--compress**
+
+This option is added to be aligned with other containers CLIs.
+Buildah doesn't send a copy of the context directory to a daemon or a remote server.
+Thus, compressing the data before sending it is irrelevant to Buildah.
+
+**--cpp-flag**=""
+
+Set additional flags to pass to the C Preprocessor cpp(1).
+Containerfiles ending with a ".in" suffix will be preprocessed via cpp(1). This option can be used to pass additional flags to cpp.
+Note: You can also set default CPPFLAGS by setting the BUILDAH\_CPPFLAGS
+environment variable (e.g., `export BUILDAH_CPPFLAGS="-DDEBUG"`).
+
+**--cpu-period**=*0*
+
+Set the CPU period for the Completely Fair Scheduler (CFS), which is a
+duration in microseconds. Once the container's CPU quota is used up, it will
+not be scheduled to run until the current period ends. Defaults to 100000
+microseconds.
+
+On some systems, changing the CPU limits may not be allowed for non-root
+users. For more details, see
+https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error
+
+**--cpu-quota**=*0*
+
+Limit the CPU CFS (Completely Fair Scheduler) quota
+
+Limit the container's CPU usage. By default, containers run with the full
+CPU resource. This flag tells the kernel to restrict the container's CPU usage
+to the quota you specify.
+
+On some systems, changing the CPU limits may not be allowed for non-root
+users. For more details, see
+https://github.com/containers/podman/blob/main/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error
+
+**--cpu-shares**, **-c**=*0*
+
+CPU shares (relative weight)
+
+By default, all containers get the same proportion of CPU cycles. This proportion
+can be modified by changing the container's CPU share weighting relative
+to the weighting of all other running containers.
+
+To modify the proportion from the default of 1024, use the **--cpu-shares**
+flag to set the weighting to 2 or higher.
+
+The proportion will only apply when CPU-intensive processes are running.
+When tasks in one container are idle, other containers can use the
+left-over CPU time. The actual amount of CPU time will vary depending on
+the number of containers running on the system.
+
+For example, consider three containers, one has a cpu-share of 1024 and
+two others have a cpu-share setting of 512. When processes in all three
+containers attempt to use 100% of CPU, the first container would receive
+50% of the total CPU time. If you add a fourth container with a cpu-share
+of 1024, the first container only gets 33% of the CPU. The remaining containers
+receive 16.5%, 16.5% and 33% of the CPU.
+
+On a multi-core system, the shares of CPU time are distributed over all CPU
+cores. Even if a container is limited to less than 100% of CPU time, it can
+use 100% of each individual CPU core.
+
+For example, consider a system with more than three cores. If you start one
+container **{C0}** with **-c=512** running one process, and another container
+**{C1}** with **-c=1024** running two processes, this can result in the following
+division of CPU shares:
+
+ PID container CPU CPU share
+ 100 {C0} 0 100% of CPU0
+ 101 {C1} 1 100% of CPU1
+ 102 {C1} 2 100% of CPU2
+
+**--cpuset-cpus**=""
+
+ CPUs in which to allow execution (0-3, 0,1)
+
+**--cpuset-mems**=""
+
+Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+
+If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+then processes in your container will only use memory from the first
+two memory nodes.
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--cw** *options*
+
+Produce an image suitable for use as a confidential workload running in a
+trusted execution environment (TEE) using krun (i.e., *crun* built with the
+libkrun feature enabled and invoked as *krun*). Instead of the conventional
+contents, the root filesystem of the image will contain an encrypted disk image
+and configuration information for krun.
+
+The value for *options* is a comma-separated list of key=value pairs, supplying
+configuration information which is needed for producing the additional data
+which will be included in the container image.
+
+Recognized _keys_ are:
+
+*attestation_url*: The location of a key broker / attestation server.
+If a value is specified, the new image's workload ID, along with the passphrase
+used to encrypt the disk image, will be registered with the server, and the
+server's location will be stored in the container image.
+At run-time, krun is expected to contact the server to retrieve the passphrase
+using the workload ID, which is also stored in the container image.
+If no value is specified, a *passphrase* value *must* be specified.
+
+*cpus*: The number of virtual CPUs which the image expects to be run with at
+run-time. If not specified, a default value will be supplied.
+
+*firmware_library*: The location of the libkrunfw-sev shared library. If not
+specified, `buildah` checks for its presence in a number of hard-coded
+locations.
+
+*memory*: The amount of memory which the image expects to be run with at
+run-time, as a number of megabytes. If not specified, a default value will be
+supplied.
+
+*passphrase*: The passphrase to use to encrypt the disk image which will be
+included in the container image.
+If no value is specified, but an *attestation_url* value is specified, a
+randomly-generated passphrase will be used.
+The authors recommend setting an *attestation_url* but not a *passphrase*.
+
+*slop*: Extra space to allocate for the disk image compared to the size of the
+container image's contents, expressed either as a percentage (..%) or a size
+value (bytes, or larger units if suffixes like KB or MB are present), or a sum
+of two or more such specifications. If not specified, `buildah` guesses that
+25% more space than the contents will be enough, but this option is provided in
+case its guess is wrong.
+
+*type*: The type of trusted execution environment (TEE) which the image should
+be marked for use with. Accepted values are "SEV" (AMD Secure Encrypted
+Virtualization - Encrypted State) and "SNP" (AMD Secure Encrypted
+Virtualization - Secure Nested Paging). If not specified, defaults to "SNP".
+
+*workload_id*: A workload identifier which will be recorded in the container
+image, to be used at run-time for retrieving the passphrase which was used to
+encrypt the disk image. If not specified, a semi-random value will be derived
+from the base image's image ID.
+
+**--decryption-key** *key[:passphrase]*
+
+The [key[:passphrase]] to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
+
+**--device**=*device*
+
+Add a host device to the container. Optional *permissions* parameter
+can be used to specify device permissions, it is combination of
+**r** for read, **w** for write, and **m** for **mknod**(2).
+
+Example: **--device=/dev/sdc:/dev/xvdc:rwm**.
+
+Note: if _host_device_ is a symbolic link then it will be resolved first.
+The container will only store the major and minor numbers of the host device.
+
+Note: if the user only has access rights via a group, accessing the device
+from inside a rootless container will fail. The **crun**(1) runtime offers a
+workaround for this by adding the option **--annotation run.oci.keep_original_groups=1**.
+
+**--disable-compression**, **-D**
+
+Don't compress filesystem layers when building the image unless it is required
+by the location where the image is being written. This is the default setting,
+because image layers are compressed automatically when they are pushed to
+registries, and images being written to local storage would only need to be
+decompressed again to be stored. Compression can be forced in all cases by
+specifying **--disable-compression=false**.
+
+**--disable-content-trust**
+
+This is a Docker specific option to disable image verification to a Container
+registry and is not supported by Buildah. This flag is a NOOP and provided
+solely for scripting compatibility.
+
+**--dns**=[]
+
+Set custom DNS servers. Invalid if using **--dns** with **--network=none**.
+
+This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the `--dns` flag is necessary for every run.
+
+The special value **none** can be specified to disable creation of /etc/resolv.conf in the container by Buildah. The /etc/resolv.conf file in the image will be used without changes.
+
+**--dns-option**=[]
+
+Set custom DNS options. Invalid if using **--dns-option** with **--network=none**.
+
+**--dns-search**=[]
+
+Set custom DNS search domains. Invalid if using **--dns-search** with **--network=none**.
+
+**--env** *env[=value]*
+
+Add a value (e.g. env=*value*) to the built image. Can be used multiple times.
+If neither `=` nor a `*value*` are specified, but *env* is set in the current
+environment, the value from the current environment will be added to the image.
+The value of *env* can be overridden by ENV instructions in the Containerfile.
+To remove an environment variable from the built image, use the `--unsetenv`
+option.
+
+**--file**, **-f** *Containerfile*
+
+Specifies a Containerfile which contains instructions for building the image,
+either a local file or an **http** or **https** URL. If more than one
+Containerfile is specified, *FROM* instructions will only be accepted from the
+last specified file.
+
+If a local file is specified as the Containerfile and it does not exist, the
+context directory will be prepended to the local file value.
+
+If you specify `-f -`, the Containerfile contents will be read from stdin.
+
+**--force-rm** *bool-value*
+
+Always remove intermediate containers after a build, even if the build fails (default false).
+
+**--format**
+
+Control the format for the built image's manifest and configuration data.
+Recognized formats include *oci* (OCI image-spec v1.0, the default) and
+*docker* (version 2, using schema format 2 for the manifest).
+
+Note: You can also override the default format by setting the BUILDAH\_FORMAT
+environment variable. `export BUILDAH_FORMAT=docker`
+
+**--from**
+
+Overrides the first `FROM` instruction within the Containerfile. If there are multiple
+FROM instructions in a Containerfile, only the first is changed.
+
+**--group-add**=*group* | *keep-groups*
+
+Assign additional groups to the primary user running within the container
+process.
+
+- `keep-groups` is a special flag that tells Buildah to keep the supplementary
+group access.
+
+Allows container to use the user's supplementary group access. If file systems
+or devices are only accessible by the rootless user's group, this flag tells the
+OCI runtime to pass the group access into the container. Currently only
+available with the `crun` OCI runtime. Note: `keep-groups` is exclusive, other
+groups cannot be specified with this flag.
+
+**--help**, **-h**
+
+Print usage statement
+
+**--hooks-dir** *path*
+
+Each `*.json` file in the path configures a hook for buildah build containers. For more details on the syntax of the JSON files and the semantics of hook injection, see oci-hooks(5). Buildah currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
+
+This option may be set multiple times; paths from later options have higher precedence (oci-hooks(5) discusses directory precedence).
+
+For the annotation conditions, buildah uses any annotations set in the generated OCI configuration.
+
+For the bind-mount conditions, only mounts explicitly requested by the caller via --volume are considered. Bind mounts that buildah inserts by default (e.g. /dev/shm) are not considered.
+
+If --hooks-dir is unset for root callers, Buildah will currently default to /usr/share/containers/oci/hooks.d and /etc/containers/oci/hooks.d in order of increasing precedence. Using these defaults is deprecated, and callers should migrate to explicitly setting --hooks-dir.
+
+**--http-proxy**=true
+
+By default proxy environment variables are passed into the container if set
+for the buildah process. This can be disabled by setting the `--http-proxy`
+option to `false`. The environment variables passed in include `http_proxy`,
+`https_proxy`, `ftp_proxy`, `no_proxy`, and also the upper case versions of
+those.
+
+**--identity-label** *bool-value*
+
+Adds default identity label `io.buildah.version` if set. (default true).
+
+**--ignorefile** *file*
+
+Path to an alternative .containerignore (.dockerignore) file.
+
+**--iidfile** *ImageIDfile*
+
+Write the built image's ID to the file. When `--platform` is specified more
+than once, attempting to use this option will trigger an error.
+
+**--ipc** *how*
+
+Sets the configuration for IPC namespaces when handling `RUN` instructions.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new IPC namespace should be created, or it can be "host" to indicate
+that the IPC namespace in which `buildah` itself is being run should be reused,
+or it can be the path to an IPC namespace which is already in use by
+another process.
+
+**--isolation** *type*
+
+Controls what type of isolation is used for running processes as part of `RUN`
+instructions. Recognized types include *oci* (OCI-compatible runtime, the
+default), *rootless* (OCI-compatible runtime invoked using a modified
+configuration, with *--no-new-keyring* added to its *create* invocation,
+reusing the host's network and UTS namespaces, and creating private IPC, PID,
+mount, and user namespaces; the default for unprivileged users), and *chroot*
+(an internal wrapper that leans more toward chroot(1) than container
+technology, reusing the host's control group, network, IPC, and PID namespaces,
+and creating private mount and UTS namespaces, and creating user namespaces
+only when they're required for ID mapping).
+
+Note: You can also override the default isolation type by setting the
+BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
+
+**--jobs** *N*
+
+Run up to N concurrent stages in parallel. If the number of jobs is greater than 1,
+stdin will be read from /dev/null. If 0 is specified, then there is
+no limit on the number of jobs that run in parallel.
+
+**--label** *label[=value]*
+
+Add an image *label* (e.g. label=*value*) to the image metadata. Can be used multiple times.
+If *label* is named, but neither `=` nor a `value` is provided, then the *label* is set to an empty value.
+
+Users can set a special LABEL **io.containers.capabilities=CAP1,CAP2,CAP3** in
+a Containerfile that specifies the list of Linux capabilities required for the
+container to run properly. This label specified in a container image tells
+container engines, like Podman, to run the container with just these
+capabilities. The container engine launches the container with just the specified
+capabilities, as long as this list of capabilities is a subset of the default
+list.
+
+If the specified capabilities are not in the default set, container engines
+should print an error message and will run the container with the default
+capabilities.
+
+**--layer-label** *label[=value]*
+
+Add an intermediate image *label* (e.g. label=*value*) to the intermediate image metadata. It can be used multiple times.
+If *label* is named, but neither `=` nor a `value` is provided, then the *label* is set to an empty value.
+
+**--layers** *bool-value*
+
+Cache intermediate images during the build process (Default is `false`).
+
+Note: You can also override the default value of layers by setting the BUILDAH\_LAYERS
+environment variable. `export BUILDAH_LAYERS=true`
+
+**--logfile** *filename*
+
+Log output which would be sent to standard output and standard error to the
+specified file instead of to standard output and standard error.
+
+**--logsplit** *bool-value*
+
+If --logfile and --platform is specified following flag allows end-users to split log file for each
+platform into different files with naming convention as `${logfile}_${platform-os}_${platform-arch}`.
+
+**--manifest** *listName*
+
+Name of the manifest list to which the built image will be added. Creates the
+manifest list if it does not exist. This option is useful for building multi
+architecture images.
+If _listName_ does not include a registry name component, the registry name
+*localhost* will be prepended to the list name.
+
+**--memory**, **-m**=""
+
+Memory limit (format: <number>[<unit>], where unit = b, k, m or g)
+
+Allows you to constrain the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
+**--memory-swap**="LIMIT"
+
+A limit value equal to memory plus swap. Must be used with the **-m**
+(**--memory**) flag. The swap `LIMIT` should always be larger than **-m**
+(**--memory**) value. By default, the swap `LIMIT` will be set to double
+the value of --memory.
+
+The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
+`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
+unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
+
+**--network**, **--net**=*mode*
+
+Sets the configuration for network namespaces when handling `RUN` instructions.
+
+Valid _mode_ values are:
+
+- **none**: no networking. Invalid if using **--dns**, **--dns-opt**, or **--dns-search**;
+- **host**: use the host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure;
+- **ns:**_path_: path to a network namespace to join;
+- **private**: create a new namespace for the container (default)
+- **\<network name|ID\>**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users.
+- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf:
+ - **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false.
+ - **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`).
+ - **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
+ - **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`).
+ - **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only).
+ - **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to.
+ - **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only).
+ - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to.
+- **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking
+ stack. \
+ This is only supported in rootless mode. \
+ By default, IPv4 and IPv6 addresses and routes, as well as the pod interface
+ name, are copied from the host. If port forwarding isn't configured, ports
+ are forwarded dynamically as services are bound on either side (init
+ namespace or container namespace). Port forwarding preserves the original
+ source IP address. Options described in pasta(1) can be specified as
+ comma-separated arguments. \
+ In terms of pasta(1) options, **--config-net** is given by default, in
+ order to configure networking when the container is started, and
+ **--no-map-gw** is also assumed by default, to avoid direct access from
+ container to host using the gateway address. The latter can be overridden
+ by passing **--map-gw** in the pasta-specific options (despite not being an
+ actual pasta(1) option). \
+ Also, **-t none** and **-u none** are passed to disable
+ automatic port forwarding based on bound ports. Similarly, **-T none** and
+ **-U none** are given to disable the same functionality from container to
+ host. \
+ Some examples:
+ - **pasta:--map-gw**: Allow the container to directly reach the host using the
+ gateway address.
+ - **pasta:--mtu,1500**: Specify a 1500 bytes MTU for the _tap_ interface in
+ the container.
+ - **pasta:--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,-m,1500,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options: disable IPv6, assign
+ `10.0.2.0/24` to the `tap0` interface in the container, with gateway
+ `10.0.2.3`, enable DNS forwarder reachable at `10.0.2.3`, set MTU to 1500
+ bytes, disable NDP, DHCPv6 and DHCP support.
+ - **pasta:-I,tap0,--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options with Podman overrides: same as
+ above, but leave the MTU to 65520 bytes
+ - **pasta:-t,auto,-u,auto,-T,auto,-U,auto**: enable automatic port forwarding
+ based on observed bound ports from both host and container sides
+ - **pasta:-T,5201**: enable forwarding of TCP port 5201 from container to
+ host, using the loopback interface instead of the tap interface for improved
+ performance
+
+**--no-cache**
+
+Do not use existing cached images for the container build. Build from the start with a new set of cached layers.
+
+**--no-hostname**
+
+Do not create the _/etc/hostname_ file in the container for RUN instructions.
+
+By default, Buildah manages the _/etc/hostname_ file, adding the container's own hostname. When the **--no-hostname** option is set, the image's _/etc/hostname_ will be preserved unmodified if it exists.
+
+**--no-hosts**
+
+Do not create the _/etc/hosts_ file in the container for RUN instructions.
+
+By default, Buildah manages _/etc/hosts_, adding the container's own IP address.
+**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified. Conflicts with the --add-host option.
+
+**--omit-history** *bool-value*
+
+Omit build history information in the built image. (default false).
+
+This option is useful for the cases where end users explicitly
+want to set `--omit-history` to omit the optional `History` from
+built images or when working with images built using build tools that
+do not include `History` information in their images.
+
+**--os**="OS"
+
+Set the OS of the image to be built, and that of the base image to be pulled, if the build uses one, instead of using the current operating system of the host.
+
+**--os-feature** *feature*
+
+Set the name of a required operating system *feature* for the image which will
+be built. By default, if the image is not based on *scratch*, the base image's
+required OS feature list is kept, if the base image specified any. This option
+is typically only meaningful when the image's OS is Windows.
+
+If *feature* has a trailing `-`, then the *feature* is removed from the set of
+required features which will be listed in the image.
+
+**--os-version** *version*
+
+Set the exact required operating system *version* for the image which will be
+built. By default, if the image is not based on *scratch*, the base image's
+required OS version is kept, if the base image specified one. This option is
+typically only meaningful when the image's OS is Windows, and is typically set in
+Windows base images, so using this option is usually unnecessary.
+
+**--output**, **-o**=""
+
+Output destination (format: type=local,dest=path)
+
+The --output (or -o) option extends the default behavior of building a container image by allowing users to export the contents of the image as files on the local filesystem, which can be useful for generating local binaries, code generation, etc.
+
+The value for --output is a comma-separated sequence of key=value pairs, defining the output type and options.
+
+Supported _keys_ are:
+- **dest**: Destination path for exported output. Valid value is absolute or relative path, `-` means the standard output.
+- **type**: Defines the type of output to be used. Valid values is documented below.
+
+Valid _type_ values are:
+- **local**: write the resulting build files to a directory on the client-side.
+- **tar**: write the resulting files as a single tarball (.tar).
+
+If no type is specified, the value defaults to **local**.
+Alternatively, instead of a comma-separated sequence, the value of **--output** can be just a destination (in the `**dest** format) (e.g. `--output some-path`, `--output -`) where `--output some-path` is treated as if **type=local** and `--output -` is treated as if **type=tar**.
+
+**--pid** *how*
+
+Sets the configuration for PID namespaces when handling `RUN` instructions.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new PID namespace should be created, or it can be "host" to indicate
+that the PID namespace in which `buildah` itself is being run should be reused,
+or it can be the path to a PID namespace which is already in use by another
+process.
+
+**--platform**="OS/ARCH[/VARIANT]"
+
+Set the OS/ARCH of the built image (and its base image, if your build uses one)
+to the provided value instead of using the current operating system and
+architecture of the host (for example `linux/arm`, `linux/arm64`, `linux/amd64`).
+
+The `--platform` flag can be specified more than once, or given a
+comma-separated list of values as its argument. When more than one platform is
+specified, the `--manifest` option should be used instead of the `--tag`
+option.
+
+OS/ARCH pairs are those used by the Go Programming Language. In several cases
+the ARCH value for a platform differs from one produced by other tools such as
+the `arch` command. Valid OS and architecture name combinations are listed as
+values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environment,
+and can also be found by running `go tool dist list`.
+
+The `buildah build` command allows building images for all Linux architectures, even non-native architectures. When building images for a different architecture, the `RUN` instructions require emulation software installed on the host provided by packages like `qemu-user-static`. Note: it is always preferred to build images on the native architecture if possible.
+
+**NOTE:** The `--platform` option may not be used in combination with the `--arch`, `--os`, or `--variant` options.
+
+**--pull**
+
+When the flag is enabled or set explicitly to `true` (with *--pull=true*), attempt to pull the latest image from the registries
+listed in registries.conf if a local image does not exist or the image is newer
+than the one in storage. Raise an error if the image is not in any listed
+registry and is not present locally.
+
+If the flag is disabled (with *--pull=false*), do not pull the image from the
+registry, use only the local version. Raise an error if the image is not
+present locally.
+
+If the pull flag is set to `always` (with *--pull=always*),
+pull the image from the first registry it is found in as listed in registries.conf.
+Raise an error if not found in the registries, even if the image is present locally.
+
+If the pull flag is set to `missing` (with *--pull=missing*),
+pull the image only if it could not be found in the local containers storage.
+Raise an error if no image could be found and the pull fails.
+
+If the pull flag is set to `never` (with *--pull=never*),
+Do not pull the image from the registry, use only the local version.
+Raise an error if the image is not present locally.
+
+Defaults to *true*.
+
+**--quiet**, **-q**
+
+Suppress output messages which indicate which instruction is being processed,
+and of progress when pulling images from a registry, and when writing the
+output image.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing push/pull of images to/from registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing push/pull of images to/from registry.
+
+Defaults to `2s`.
+
+**--rm** *bool-value*
+
+Remove intermediate containers after a successful build (default true).
+
+**--runtime** *path*
+
+The *path* to an alternate OCI-compatible runtime, which will be used to run
+commands specified by the **RUN** instruction. Default is `runc`, or `crun` when machine is configured to use cgroups V2.
+
+Note: You can also override the default runtime by setting the BUILDAH\_RUNTIME
+environment variable. `export BUILDAH_RUNTIME=/usr/bin/crun`
+
+**--runtime-flag** *flag*
+
+Adds global flags for the container rutime. To list the supported flags, please
+consult the manpages of the selected container runtime.
+
+Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
+to buildah build, the option given would be `--runtime-flag log-format=json`.
+
+**--secret**=**id=id,src=path**
+
+Pass secret information to be used in the Containerfile for building images
+in a safe way that will not end up stored in the final image, or be seen in other stages.
+The secret will be mounted in the container at the default location of `/run/secrets/id`.
+
+To later use the secret, use the --mount flag in a `RUN` instruction within a `Containerfile`:
+
+`RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret`
+
+Note: Changing the contents of secret files will not trigger a rebuild of layers that use said secrets.
+
+**--security-opt**=[]
+
+Security Options
+
+ "apparmor=unconfined" : Turn off apparmor confinement for the container
+ "apparmor=your-profile" : Set the apparmor confinement profile for the container
+
+ "label=user:USER" : Set the label user for the container
+ "label=role:ROLE" : Set the label role for the container
+ "label=type:TYPE" : Set the label type for the container
+ "label=level:LEVEL" : Set the label level for the container
+ "label=disable" : Turn off label confinement for the container
+ "no-new-privileges" : Disable container processes from gaining additional privileges
+
+ "seccomp=unconfined" : Turn off seccomp confinement for the container
+ "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter
+
+**--shm-size**=""
+
+Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
+Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes).
+If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
+
+**--sign-by** *fingerprint*
+
+Sign the built image using the GPG key that matches the specified fingerprint.
+
+**--skip-unused-stages** *bool-value*
+
+Skip stages in multi-stage builds which don't affect the target stage. (Default is `true`).
+
+**--squash**
+
+Squash all layers, including those from base image(s), into one single layer. (Default is false).
+
+By default, Buildah preserves existing base-image layers and adds only one new layer on a build.
+The --layers option can be used to preserve intermediate build layers.
+
+**--ssh**=**default**|*id[=socket>|<key>[,<key>]*
+
+SSH agent socket or keys to expose to the build.
+The socket path can be left empty to use the value of `default=$SSH_AUTH_SOCK`
+
+To later use the ssh agent, use the --mount flag in a `RUN` instruction within a `Containerfile`:
+
+`RUN --mount=type=secret,id=id mycmd`
+
+**--stdin**
+
+Pass stdin into the RUN containers. Sometimes commands being RUN within a Containerfile
+want to request information from the user. For example apt asking for a confirmation for install.
+Use --stdin to be able to interact from the terminal during the build.
+
+**--tag**, **-t** *imageName*
+
+Specifies the name which will be assigned to the resulting image if the build
+process completes successfully.
+If _imageName_ does not include a registry name component, the registry name *localhost* will be prepended to the image name.
+
+**--target** *stageName*
+
+Set the target build stage to build. When building a Containerfile with multiple build stages, --target
+can be used to specify an intermediate build stage by name as the final stage for the resulting image.
+Commands after the target stage will be skipped.
+
+**--timestamp** *seconds*
+
+Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
+By default, the created timestamp is changed and written into the image manifest with every commit,
+causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
+When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+**--ulimit** *type*=*soft-limit*[:*hard-limit*]
+
+Specifies resource limits to apply to processes launched when processing `RUN` instructions.
+This option can be specified multiple times. Recognized resource types
+include:
+ "core": maximum core dump size (ulimit -c)
+ "cpu": maximum CPU time (ulimit -t)
+ "data": maximum size of a process's data segment (ulimit -d)
+ "fsize": maximum size of new files (ulimit -f)
+ "locks": maximum number of file locks (ulimit -x)
+ "memlock": maximum amount of locked memory (ulimit -l)
+ "msgqueue": maximum amount of data in message queues (ulimit -q)
+ "nice": niceness adjustment (nice -n, ulimit -e)
+ "nofile": maximum number of open files (ulimit -n)
+ "nofile": maximum number of open files (1048576); when run by root
+ "nproc": maximum number of processes (ulimit -u)
+ "nproc": maximum number of processes (1048576); when run by root
+ "rss": maximum size of a process's (ulimit -m)
+ "rtprio": maximum real-time scheduling priority (ulimit -r)
+ "rttime": maximum amount of real-time execution between blocking syscalls
+ "sigpending": maximum number of pending signals (ulimit -i)
+ "stack": maximum stack size (ulimit -s)
+
+**--unsetenv** *env*
+
+Unset environment variables from the final image.
+
+**--unsetlabel** *label*
+
+Unset the image label, causing the label not to be inherited from the base image.
+
+**--userns** *how*
+
+Sets the configuration for user namespaces when handling `RUN` instructions.
+The configured value can be "" (the empty string) , "private" or "auto" to indicate
+that a new user namespace should be created, it can be "host" to indicate that
+the user namespace in which `buildah` itself is being run should be reused, or
+it can be the path to an user namespace which is already in use by another
+process.
+
+auto: automatically create a unique user namespace.
+
+The --userns=auto flag, requires that the user name containers and a range of subordinate user ids that the build container is allowed to use be specified in the /etc/subuid and /etc/subgid files.
+
+Example: `containers:2147483647:2147483648`.
+
+Buildah allocates unique ranges of UIDs and GIDs from the containers subordinate user ids. The size of the ranges is based on the number of UIDs required in the image. The number of UIDs and GIDs can be overridden with the size option.
+
+Valid `auto` options:
+
+* gidmapping=CONTAINER_GID:HOST_GID:SIZE: to force a GID mapping to be present in the user namespace.
+* size=SIZE: to specify an explicit size for the automatic user namespace. e.g. --userns=auto:size=8192. If size is not specified, auto will estimate a size for the user namespace.
+* uidmapping=CONTAINER_UID:HOST_UID:SIZE: to force a UID mapping to be present in the user namespace.
+
+**--userns-gid-map** *mapping*
+
+Directly specifies a GID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container GID, a corresponding starting host-level GID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-gids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-gid-map setting is
+supplied, settings from the global option will be used.
+
+**--userns-gid-map-group** *group*
+
+Specifies that a GID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents, can be found in entries
+in the `/etc/subgid` file which correspond to the specified group.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+If --userns-uid-map-user is specified, but --userns-gid-map-group is not
+specified, `buildah` will assume that the specified user name is also a
+suitable group name to use as the default setting for this option.
+
+Users can specify the maps directly using `--userns-gid-map` described in the buildah(1) man page.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--userns-uid-map** *mapping*
+
+Directly specifies a UID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container UID, a corresponding starting host-level UID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-uids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-uid-map setting is
+supplied, settings from the global option will be used.
+
+**--userns-uid-map-user** *user*
+
+Specifies that a UID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents, can be found in entries
+in the `/etc/subuid` file which correspond to the specified user.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+If --userns-gid-map-group is specified, but --userns-uid-map-user is not
+specified, `buildah` will assume that the specified group name is also a
+suitable user name to use as the default setting for this option.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--uts** *how*
+
+Sets the configuration for UTS namespaces when handling `RUN` instructions.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new UTS namespace should be created, or it can be "host" to indicate
+that the UTS namespace in which `buildah` itself is being run should be reused,
+or it can be the path to a UTS namespace which is already in use by another
+process.
+
+**--variant**=""
+
+Set the architecture variant of the image to be pulled.
+
+**--volume**, **-v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
+
+Mount a host directory into containers when executing *RUN* instructions during
+the build. The `OPTIONS` are a comma delimited list and can be:
+<sup>[[1]](#Footnote1)</sup>
+
+ * [rw|ro]
+ * [U]
+ * [z|Z|O]
+ * [`[r]shared`|`[r]slave`|`[r]private`]
+
+The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
+must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the
+path you specify. For example, if you supply `/foo` as the host path,
+Buildah copies the contents of `/foo` to the container filesystem on the host
+and bind mounts that into the container.
+
+You can specify multiple **-v** options to mount one or more mounts to a
+container.
+
+ `Write Protected Volume Mounts`
+
+You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
+read-write mode, respectively. By default, the volumes are mounted read-write.
+See examples.
+
+ `Chowning Volume Mounts`
+
+By default, Buildah does not change the owner and group of source volume directories mounted into containers. If a container is created in a new user namespace, the UID and GID in the container may correspond to another UID and GID on the host.
+
+The `:U` suffix tells Buildah to use the correct host UID and GID based on the UID and GID within the container, to change the owner and group of the source volume.
+
+ `Labeling Volume Mounts`
+
+Labeling systems like SELinux require that proper labels are placed on volume
+content mounted into a container. Without a label, the security system might
+prevent the processes running inside the container from using the content. By
+default, Buildah does not change the labels set by the OS.
+
+To change a label in the container context, you can add either of two suffixes
+`:z` or `:Z` to the volume mount. These suffixes tell Buildah to relabel file
+objects on the shared volumes. The `z` option tells Buildah that two containers
+share the volume content. As a result, Buildah labels the content with a shared
+content label. Shared volume labels allow all containers to read/write content.
+The `Z` option tells Buildah to label the content with a private unshared label.
+Only the current container can use a private volume.
+
+ `Overlay Volume Mounts`
+
+ The `:O` flag tells Buildah to mount the directory from the host as a temporary storage using the Overlay file system. The `RUN` command containers are allowed to modify contents within the mountpoint and are stored in the container storage in a separate directory. In Overlay FS terms the source directory will be the lower, and the container storage directory will be the upper. Modifications to the mount point are destroyed when the `RUN` command finishes executing, similar to a tmpfs mount point.
+
+ Any subsequent execution of `RUN` commands sees the original source directory content, any changes from previous RUN commands no longer exist.
+
+ One use case of the `overlay` mount is sharing the package cache from the host into the container to allow speeding up builds.
+
+ Note:
+
+ - The `O` flag is not allowed to be specified with the `Z` or `z` flags. Content mounted into the container is labeled with the private label.
+ On SELinux systems, labels in the source directory must be readable by the container label. If not, SELinux container separation must be disabled for the container to work.
+ - Modification of the directory volume mounted into the container with an overlay mount can cause unexpected failures. It is recommended that you do not modify the directory until the container finishes running.
+
+By default bind mounted volumes are `private`. That means any mounts done
+inside container will not be visible on the host and vice versa. This behavior can
+be changed by specifying a volume mount propagation property.
+
+When the mount propagation policy is set to `shared`, any mounts completed inside
+the container on that volume will be visible to both the host and container. When
+the mount propagation policy is set to `slave`, one way mount propagation is enabled
+and any mounts completed on the host for that volume will be visible only inside of the container.
+To control the mount propagation property of the volume use the `:[r]shared`,
+`:[r]slave` or `:[r]private` propagation flag. The propagation property can
+be specified only for bind mounted volumes and not for internal volumes or
+named volumes. For mount propagation to work on the source mount point (the mount point
+where source dir is mounted on) it has to have the right propagation properties. For
+shared volumes, the source mount point has to be shared. And for slave volumes,
+the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
+
+Use `df <source-dir>` to determine the source mount and then use
+`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
+properties of source mount, if `findmnt` utility is not available, the source mount point
+can be determined by looking at the mount entry in `/proc/self/mountinfo`. Look
+at `optional fields` and see if any propagation properties are specified.
+`shared:X` means the mount is `shared`, `master:X` means the mount is `slave` and if
+nothing is there that means the mount is `private`. <sup>[[1]](#Footnote1)</sup>
+
+To change propagation properties of a mount point use the `mount` command. For
+example, to bind mount the source directory `/foo` do
+`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
+will convert /foo into a `shared` mount point. The propagation properties of the source
+mount can be changed directly. For instance if `/` is the source mount for
+`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
+
+## BUILD TIME VARIABLES
+
+The ENV instruction in a Containerfile can be used to define variable values. When the image
+is built, the values will persist in the container image. At times it is more convenient to
+change the values in the Containerfile via a command-line option rather than changing the
+values within the Containerfile itself.
+
+The following variables can be used in conjunction with the `--build-arg` option to override the
+corresponding values set in the Containerfile using the `ENV` instruction.
+
+ * HTTP_PROXY
+ * HTTPS_PROXY
+ * FTP_PROXY
+ * NO_PROXY
+
+Please refer to the [Using Build Time Variables](#using-build-time-variables) section of the Examples.
+
+## EXAMPLE
+
+### Build an image using local Containerfiles
+
+buildah build .
+
+buildah build -f Containerfile .
+
+cat ~/Containerfile | buildah build -f - .
+
+buildah build -f Containerfile.simple -f Containerfile.notsosimple .
+
+buildah build --timestamp=$(date '+%s') -t imageName .
+
+buildah build -t imageName .
+
+buildah build --tls-verify=true -t imageName -f Containerfile.simple .
+
+buildah build --tls-verify=false -t imageName .
+
+buildah build --runtime-flag log-format=json .
+
+buildah build -f Containerfile --runtime-flag debug .
+
+buildah build --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password -t imageName -f Containerfile.simple .
+
+buildah build --memory 40m --cpu-period 10000 --cpu-quota 50000 --ulimit nofile=1024:1028 -t imageName .
+
+buildah build --security-opt label=level:s0:c100,c200 --cgroup-parent /path/to/cgroup/parent -t imageName .
+
+buildah build --arch=arm --variant v7 -t imageName .
+
+buildah build --volume /home/test:/myvol:ro,Z -t imageName .
+
+buildah build -v /home/test:/myvol:z,U -t imageName .
+
+buildah build -v /var/lib/dnf:/var/lib/dnf:O -t imageName .
+
+buildah build --layers -t imageName .
+
+buildah build --no-cache -t imageName .
+
+buildah build -f Containerfile --layers --force-rm -t imageName .
+
+buildah build --no-cache --rm=false -t imageName .
+
+buildah build --dns-search=example.com --dns=223.5.5.5 --dns-option=use-vc .
+
+buildah build -f Containerfile.in --cpp-flag="-DDEBUG" -t imageName .
+
+buildah build --network mynet .
+
+buildah build --env LANG=en_US.UTF-8 -t imageName .
+
+buildah build --env EDITOR -t imageName .
+
+buildah build --unsetenv LANG -t imageName .
+
+buildah build --os-version 10.0.19042.1645 -t imageName .
+
+buildah build --os-feature win32k -t imageName .
+
+buildah build --os-feature win32k- -t imageName .
+
+### Building an multi-architecture image using the --manifest option (requires emulation software)
+
+buildah build --arch arm --manifest myimage /tmp/mysrc
+
+buildah build --arch amd64 --manifest myimage /tmp/mysrc
+
+buildah build --arch s390x --manifest myimage /tmp/mysrc
+
+buildah bud --platform linux/s390x,linux/ppc64le,linux/amd64 --manifest myimage /tmp/mysrc
+
+buildah build --platform linux/arm64 --platform linux/amd64 --manifest myimage /tmp/mysrc
+
+buildah bud --all-platforms --manifest myimage /tmp/mysrc
+
+### Building an image using (--output) custom build output
+
+buildah build -o out .
+
+buildah build --output type=local,dest=out .
+
+buildah build --output type=tar,dest=out.tar .
+
+buildah build -o - . > out.tar
+
+### Building an image using a URL
+
+ This will clone the specified GitHub repository from the URL and use it as context. The Containerfile or Dockerfile at the root of the repository is used as the context of the build. This only works if the GitHub repository is a dedicated repository.
+
+ buildah build https://github.com/containers/PodmanHello.git
+
+ Note: Github does not support using `git://` for performing `clone` operation due to recent changes in their security guidance (https://github.blog/2021-09-01-improving-git-protocol-security-github/). Use an `https://` URL if the source repository is hosted on Github.
+
+### Building an image using a URL to a tarball'ed context
+
+ Buildah will fetch the tarball archive, decompress it and use its contents as the build context. The Containerfile or Dockerfile at the root of the archive and the rest of the archive will get used as the context of the build. If you pass an -f PATH/Containerfile option as well, the system will look for that file inside the contents of the tarball.
+
+ buildah build -f dev/Containerfile https://10.10.10.1/buildah/context.tar.gz
+
+ Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression).
+
+### Using Build Time Variables
+
+#### Replace the value set for the HTTP_PROXY environment variable within the Containerfile.
+
+buildah build --build-arg=HTTP_PROXY="http://127.0.0.1:8321"
+
+## ENVIRONMENT
+
+**BUILD\_REGISTRY\_SOURCES**
+
+BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
+lists of registry names under the keys `insecureRegistries`,
+`blockedRegistries`, and `allowedRegistries`.
+
+When pulling an image from a registry, if the name of the registry matches any
+of the items in the `blockedRegistries` list, the image pull attempt is denied.
+If there are registries in the `allowedRegistries` list, and the registry's
+name is not in the list, the pull attempt is denied.
+
+**TMPDIR**
+The TMPDIR environment variable allows the user to specify where temporary files
+are stored while pulling and pushing images. Defaults to '/var/tmp'.
+
+## Files
+
+### `.containerignore`/`.dockerignore`
+
+If the .containerignore/.dockerignore file exists in the context directory,
+`buildah build` reads its contents. If both exist, then .containerignore is used.
+Use the `--ignorefile` flag to override the ignore file path location. Buildah uses the content to exclude files and directories from the context directory, when executing COPY and ADD directives in the Containerfile/Dockerfile
+
+Users can specify a series of Unix shell globals in a
+.containerignore/.dockerignore file to identify files/directories to exclude.
+
+Buildah supports a special wildcard string `**` which matches any number of
+directories (including zero). For example, **/*.go will exclude all files that
+end with .go that are found in all directories.
+
+Example .containerignore file:
+
+```
+# exclude this content for image
+*/*.c
+**/output*
+src
+```
+
+`*/*.c`
+Excludes files and directories whose names end with .c in any top level subdirectory. For example, the source file include/rootless.c.
+
+`**/output*`
+Excludes files and directories starting with `output` from any directory.
+
+`src`
+Excludes files named src and the directory src as well as any content in it.
+
+Lines starting with ! (exclamation mark) can be used to make exceptions to
+exclusions. The following is an example .containerignore/.dockerignore file that uses this
+mechanism:
+```
+*.doc
+!Help.doc
+```
+
+Exclude all doc files except Help.doc from the image.
+
+This functionality is compatible with the handling of .containerignore files described here:
+
+https://github.com/containers/buildah/blob/main/docs/containerignore.5.md
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**policy.json** (`/etc/containers/policy.json`)
+
+Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
+
+## SEE ALSO
+buildah(1), cpp(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), crun(1), runc(8), containers.conf(5), oci-hooks(5)
+
+## FOOTNOTES
+<a name="Footnote1">1</a>: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately.
diff --git a/docs/buildah-commit.1.md b/docs/buildah-commit.1.md
new file mode 100644
index 0000000..d1891b1
--- /dev/null
+++ b/docs/buildah-commit.1.md
@@ -0,0 +1,263 @@
+# buildah-commit "1" "March 2017" "buildah"
+
+## NAME
+buildah\-commit - Create an image from a working container.
+
+## SYNOPSIS
+**buildah commit** [*options*] *container* [*image*]
+
+## DESCRIPTION
+Writes a new image using the specified container's read-write layer and if it
+is based on an image, the layers of that image. If *image* does not begin
+with a registry name component, `localhost` will be added to the name. If
+*image* is not provided, the image will have no name. When an image has no
+name, the `buildah images` command will display `<none>` in the `REPOSITORY` and
+`TAG` columns.
+
+## RETURN VALUE
+The image ID of the image that was created. On error, 1 is returned and errno is returned.
+
+## OPTIONS
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--change**, **-c** *"INSTRUCTION"*
+
+Apply the change to the committed image that would have been made if it had
+been built using a Containerfile which included the specified instruction.
+This option can be specified multiple times.
+
+**--config** *filename*
+
+Read a JSON-encoded version of an image configuration object from the specified
+file, and merge the values from it with the configuration of the image being
+committed.
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--cw** *options*
+
+Produce an image suitable for use as a confidential workload running in a
+trusted execution environment (TEE) using krun (i.e., *crun* built with the
+libkrun feature enabled and invoked as *krun*). Instead of the conventional
+contents, the root filesystem of the image will contain an encrypted disk image
+and configuration information for krun.
+
+The value for *options* is a comma-separated list of key=value pairs, supplying
+configuration information which is needed for producing the additional data
+which will be included in the container image.
+
+Recognized _keys_ are:
+
+*attestation_url*: The location of a key broker / attestation server.
+If a value is specified, the new image's workload ID, along with the passphrase
+used to encrypt the disk image, will be registered with the server, and the
+server's location will be stored in the container image.
+At run-time, krun is expected to contact the server to retrieve the passphrase
+using the workload ID, which is also stored in the container image.
+If no value is specified, a *passphrase* value *must* be specified.
+
+*cpus*: The number of virtual CPUs which the image expects to be run with at
+run-time. If not specified, a default value will be supplied.
+
+*firmware_library*: The location of the libkrunfw-sev shared library. If not
+specified, `buildah` checks for its presence in a number of hard-coded
+locations.
+
+*memory*: The amount of memory which the image expects to be run with at
+run-time, as a number of megabytes. If not specified, a default value will be
+supplied.
+
+*passphrase*: The passphrase to use to encrypt the disk image which will be
+included in the container image.
+If no value is specified, but an *attestation_url* value is specified, a
+randomly-generated passphrase will be used.
+The authors recommend setting an *attestation_url* but not a *passphrase*.
+
+*slop*: Extra space to allocate for the disk image compared to the size of the
+container image's contents, expressed either as a percentage (..%) or a size
+value (bytes, or larger units if suffixes like KB or MB are present), or a sum
+of two or more such specifications separated by "+". If not specified,
+`buildah` guesses that 25% more space than the contents will be enough, but
+this option is provided in case its guess is wrong. If the specified or
+computed size is less than 10 megabytes, it will be increased to 10 megabytes.
+
+*type*: The type of trusted execution environment (TEE) which the image should
+be marked for use with. Accepted values are "SEV" (AMD Secure Encrypted
+Virtualization - Encrypted State) and "SNP" (AMD Secure Encrypted
+Virtualization - Secure Nested Paging). If not specified, defaults to "SNP".
+
+*workload_id*: A workload identifier which will be recorded in the container
+image, to be used at run-time for retrieving the passphrase which was used to
+encrypt the disk image. If not specified, a semi-random value will be derived
+from the base image's image ID.
+
+**--disable-compression**, **-D**
+
+Don't compress filesystem layers when building the image unless it is required
+by the location where the image is being written. This is the default setting,
+because image layers are compressed automatically when they are pushed to
+registries, and images being written to local storage would only need to be
+decompressed again to be stored. Compression can be forced in all cases by
+specifying **--disable-compression=false**.
+
+**--encrypt-layer** *layer(s)*
+
+Layer(s) to encrypt: 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified.
+
+**--encryption-key** *key*
+
+The [protocol:keyfile] specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
+
+**--format**, **-f** *[oci | docker]*
+
+Control the format for the image manifest and configuration data. Recognized
+formats include *oci* (OCI image-spec v1.0, the default) and *docker* (version
+2, using schema format 2 for the manifest).
+
+Note: You can also override the default format by setting the BUILDAH\_FORMAT
+environment variable. `export BUILDAH\_FORMAT=docker`
+
+**--identity-label** *bool-value*
+
+Adds default identity label `io.buildah.version` if set. (default true).
+
+**--iidfile** *ImageIDfile*
+
+Write the image ID to the file.
+
+**--manifest** "listName"
+
+Name of the manifest list to which the built image will be added. Creates the manifest list
+if it does not exist. This option is useful for building multi architecture images.
+
+**--omit-history** *bool-value*
+
+Omit build history information in the built image. (default false).
+
+This option is useful for the cases where end users explicitly
+want to set `--omit-history` to omit the optional `History` from
+built images or when working with images built using build tools that
+do not include `History` information in their images.
+
+**--quiet**, **-q**
+
+When writing the output image, suppress progress output.
+
+**--rm**
+Remove the working container and its contents after creating the image.
+Default leaves the container and its content in place.
+
+**--sign-by** *fingerprint*
+
+Sign the new image using the GPG key that matches the specified fingerprint.
+
+**--squash**
+
+Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
+
+**--timestamp** *seconds*
+
+Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
+By default, the created timestamp is changed and written into the image manifest with every commit,
+causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
+When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+**--unsetenv** *env*
+
+Unset environment variables from the final image.
+
+## EXAMPLE
+
+This example saves an image based on the container.
+ `buildah commit containerID newImageName`
+
+This example saves an image named newImageName based on the container.
+ `buildah commit --rm containerID newImageName`
+
+This example saves an image with no name, removes the working container, and creates a new container using the image's ID.
+ `buildah from $(buildah commit --rm containerID)`
+
+This example saves an image based on the container disabling compression.
+ `buildah commit --disable-compression containerID`
+
+This example saves an image named newImageName based on the container disabling compression.
+ `buildah commit --disable-compression containerID newImageName`
+
+This example commits the container to the image on the local registry while turning off tls verification.
+ `buildah commit --tls-verify=false containerID docker://localhost:5000/imageId`
+
+This example commits the container to the image on the local registry using credentials and certificates for authentication.
+ `buildah commit --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
+
+This example commits the container to the image on the local registry using credentials from the /tmp/auths/myauths.json file and certificates for authentication.
+ `buildah commit --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageName`
+
+This example saves an image based on the container, but stores dates based on epoch time.
+`buildah commit --timestamp=0 containerID newImageName`
+
+### Building an multi-architecture image using the --manifest option (requires emulation software)
+
+```
+#!/bin/sh
+build() {
+ ctr=$(./bin/buildah from --arch $1 ubi8)
+ ./bin/buildah run $ctr dnf install -y iputils
+ ./bin/buildah commit --manifest ubi8ping $ctr
+}
+build arm
+build amd64
+build s390x
+```
+
+## ENVIRONMENT
+
+**BUILD\_REGISTRY\_SOURCES**
+
+BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
+lists of registry names under the keys `insecureRegistries`,
+`blockedRegistries`, and `allowedRegistries`.
+
+When committing an image, if the image is to be given a name, the portion of
+the name that corresponds to a registry is compared to the items in the
+`blockedRegistries` list, and if it matches any of them, the commit attempt is
+denied. If there are registries in the `allowedRegistries` list, and the
+portion of the name that corresponds to the registry is not in the list, the
+commit attempt is denied.
+
+**TMPDIR**
+The TMPDIR environment variable allows the user to specify where temporary files
+are stored while pulling and pushing images. Defaults to '/var/tmp'.
+
+## FILES
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**policy.json** (`/etc/containers/policy.json`)
+
+Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
+
+## SEE ALSO
+buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5)
diff --git a/docs/buildah-config.1.md b/docs/buildah-config.1.md
new file mode 100644
index 0000000..b62b6b0
--- /dev/null
+++ b/docs/buildah-config.1.md
@@ -0,0 +1,274 @@
+# buildah-config "1" "March 2017" "buildah"
+
+## NAME
+buildah\-config - Update image configuration settings.
+
+## SYNOPSIS
+**buildah config** [*options*] *container*
+
+## DESCRIPTION
+Updates one or more of the settings kept for a container.
+
+## OPTIONS
+
+**--add-history**
+
+Add an entry to the image's history which will note changes to the settings for
+**--cmd**, **--entrypoint**, **--env**, **--healthcheck**, **--label**,
+**--onbuild**, **--port**, **--shell**, **--stop-signal**, **--user**,
+**--volume**, and **--workingdir**.
+Defaults to false.
+
+Note: You can also override the default value of --add-history by setting the
+BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
+
+**--annotation**, **-a** *annotation*=*annotation*
+
+Add an image *annotation* (e.g. annotation=*annotation*) to the image manifest
+of any images which will be built using the specified container. Can be used multiple times.
+If *annotation* has a trailing `-`, then the *annotation* is removed from the config.
+If the *annotation* is set to "-" then all annotations are removed from the config.
+
+**--arch** *architecture*
+
+Set the target *architecture* for any images which will be built using the
+specified container. By default, if the container was based on an image, that
+image's target architecture is kept, otherwise the host's architecture is
+recorded.
+
+**--author** *author*
+
+Set contact information for the *author* for any images which will be built
+using the specified container.
+
+**--cmd** *command*
+
+Set the default *command* to run for containers based on any images which will
+be built using the specified container. When used in combination with an
+*entry point*, this specifies the default parameters for the *entry point*.
+
+**--comment** *comment*
+
+Set the image-level comment for any images which will be built using the
+specified container.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--created-by** *created*
+
+Set the description of how the topmost layer was *created* for any images which
+will be created using the specified container.
+
+**--domainname** *domain*
+
+Set the domainname to set when running containers based on any images built
+using the specified container.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--entrypoint** *"command"* | *'["command", "arg1", ...]'*
+
+Set the *entry point* for containers based on any images which will be built
+using the specified container. buildah supports two formats for entrypoint. It
+can be specified as a simple string, or as an array of commands.
+
+Note: When the entrypoint is specified as a string, container runtimes will
+ignore the `cmd` value of the container image. However if you use the array
+form, then the cmd will be appended onto the end of the entrypoint cmd and be
+executed together.
+
+**--env**, **-e** *env[=value]*
+
+Add a value (e.g. env=*value*) to the environment for containers based on any
+images which will be built using the specified container. Can be used multiple times.
+If *env* is named but neither `=` nor a `value` is specified, then the value
+will be taken from the current process environment.
+If *env* has a trailing `-`, then the *env* is removed from the config.
+If the *env* is set to "-" then all environment variables are removed from the config.
+
+**--healthcheck** *command*
+
+Specify a command which should be run to check if a container is running correctly.
+
+Values can be *NONE*, "*CMD* ..." (run the specified command directly), or
+"*CMD-SHELL* ..." (run the specified command using the system's shell), or the
+empty value (remove a previously-set value and related settings).
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--healthcheck-interval** *interval*
+
+Specify how often the command specified using the *--healthcheck* option should
+be run.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--healthcheck-retries** *count*
+
+Specify how many times the command specified using the *--healthcheck* option
+can fail before the container is considered to be unhealthy.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--healthcheck-start-period** *interval*
+
+Specify how much time can elapse after a container has started before a failure
+to run the command specified using the *--healthcheck* option should be treated
+as an indication that the container is failing. During this time period,
+failures will be attributed to the container not yet having fully started, and
+will not be counted as errors. After the command succeeds, or the time period
+has elapsed, failures will be counted as errors.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--healthcheck-timeout** *interval*
+
+Specify how long to wait after starting the command specified using the
+*--healthcheck* option to wait for the command to return its exit status. If
+the command has not returned within this time, it should be considered to have
+failed.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--history-comment** *comment*
+
+Sets a comment on the topmost layer in any images which will be created
+using the specified container.
+
+**--hostname** *host*
+
+Set the hostname to set when running containers based on any images built using
+the specified container.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--label**, **-l** *label*=*value*
+
+Add an image *label* (e.g. label=*value*) to the image configuration of any
+images which will be built using the specified container. Can be used multiple times.
+If *label* has a trailing `-`, then the *label* is removed from the config.
+If the *label* is set to "-" then all labels are removed from the config.
+
+**--onbuild** *onbuild command*
+
+Add an ONBUILD command to the image. ONBUILD commands are automatically run
+when images are built based on the image you are creating.
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--os** *operating system*
+
+Set the target *operating system* for any images which will be built using
+the specified container. By default, if the container was based on an image,
+its OS is kept, otherwise the host's OS's name is recorded.
+
+**--os-feature** *feature*
+
+Set the name of a required operating system *feature* for any images which will
+be built using the specified container. By default, if the container was based
+on an image, the base image's required OS feature list is kept, if it specified
+one. This option is typically only meaningful when the image's OS is Windows.
+
+If *feature* has a trailing `-`, then the *feature* is removed from the set of
+required features which will be listed in the image. If the *feature* is set
+to "-" then the entire features list is removed from the config.
+
+**--os-version** *version*
+
+Set the exact required operating system *version* for any images which will be
+built using the specified container. By default, if the container was based on
+an image, the base image's required OS version is kept, if it specified one.
+This option is typically only meaningful when the image's OS is Windows, and is
+typically set in Windows base images, so using this option is usually
+unnecessary.
+
+**--port**, **-p** *port/protocol*
+
+Add a *port* to expose when running containers based on any images which
+will be built using the specified container. Can be used multiple times.
+To specify whether the port listens on TCP or UDP, use "port/protocol".
+The default is TCP if the protocol is not specified. To expose the port on both TCP and UDP,
+specify the port option multiple times. If *port* has a trailing `-` and is already set,
+then the *port* is removed from the configuration. If the port is set to `-` then all exposed
+ports settings are removed from the configuration.
+
+**--shell** *shell*
+
+Set the default *shell* to run inside of the container image.
+The shell instruction allows the default shell used for the shell form of commands to be overridden. The default shell for Linux containers is "/bin/sh -c".
+
+Note: this setting is not present in the OCIv1 image format, so it is discarded when writing images using OCIv1 formats.
+
+**--stop-signal** *signal*
+
+Set default *stop signal* for container. This signal will be sent when container is stopped, default is SIGINT.
+
+**--unsetlabel** *label*
+
+Unset the image label, causing the label not to be inherited from the base image.
+
+**--user**, **-u** *user*[:*group*]
+
+Set the default *user* to be used when running containers based on this image.
+The user can be specified as a user name
+or UID, optionally followed by a group name or GID, separated by a colon (':').
+If names are used, the container should include entries for those names in its
+*/etc/passwd* and */etc/group* files.
+
+**--variant** *variant*
+
+Set the target architecture *variant* for any images which will be built using
+the specified container. By default, if the container was based on an image,
+that image's target architecture and variant information is kept, otherwise the
+host's architecture and variant are recorded.
+
+**--volume**, **-v** *volume*
+
+Add a location in the directory tree which should be marked as a *volume* in any images which will be built using the specified container. Can be used multiple times. If *volume* has a trailing `-`, and is already set, then the *volume* is removed from the config.
+If the *volume* is set to "-" then all volumes are removed from the config.
+
+**--workingdir** *directory*
+
+Set the initial working *directory* for containers based on images which will
+be built using the specified container.
+
+## EXAMPLE
+
+buildah config --author='Jane Austen' --workingdir='/etc/mycontainers' containerID
+
+buildah config --entrypoint /entrypoint.sh containerID
+
+buildah config --entrypoint '[ "/entrypoint.sh", "dev" ]' containerID
+
+buildah config --env foo=bar --env PATH=$PATH containerID
+
+buildah config --env foo- containerID
+
+buildah config --label Name=Mycontainer --label Version=1.0 containerID
+
+buildah config --label Name- containerID
+
+buildah config --annotation note=myNote containerID
+
+buildah config --annotation note-
+
+buildah config --volume /usr/myvol containerID
+
+buildah config --volume /usr/myvol- containerID
+
+buildah config --port 1234 --port 8080 containerID
+
+buildah config --port 514/tcp --port 514/udp containerID
+
+buildah config --env 1234=5678 containerID
+
+buildah config --env 1234- containerID
+
+buildah config --os-version 10.0.19042.1645 containerID
+
+buildah config --os-feature win32k containerID
+
+buildah config --os-feature win32k- containerID
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-containers.1.md b/docs/buildah-containers.1.md
new file mode 100644
index 0000000..e787b5c
--- /dev/null
+++ b/docs/buildah-containers.1.md
@@ -0,0 +1,123 @@
+# buildah-containers "1" "March 2017" "buildah"
+
+## NAME
+buildah\-containers - List the working containers and their base images.
+
+## SYNOPSIS
+**buildah containers** [*options*]
+
+## DESCRIPTION
+Lists containers which appear to be Buildah working containers, their names and
+IDs, and the names and IDs of the images from which they were initialized.
+
+## OPTIONS
+
+**--all**, **-a**
+
+List information about all containers, including those which were not created
+by and are not being used by Buildah. Containers created by Buildah are
+denoted with an '*' in the 'BUILDER' column.
+
+**--filter**, **-f**
+
+Filter output based on conditions provided.
+
+Valid filters are listed below:
+
+| **Filter** | **Description** |
+| --------------- | ------------------------------------------------------------------- |
+| id | [ID] Container's ID |
+| name | [Name] Container's name |
+| ancestor | [ImageName] Image or descendant used to create container |
+
+**--format**
+
+Pretty-print containers using a Go template.
+
+Valid placeholders for the Go template are listed below:
+
+| **Placeholder** | **Description** |
+| --------------- | -----------------------------------------|
+| .ContainerID | Container ID |
+| .Builder | Whether container was created by buildah |
+| .ImageID | Image ID |
+| .ImageName | Image name |
+| .ContainerName | Container name |
+
+**--json**
+
+Output in JSON format.
+
+**--noheading**, **-n**
+
+Omit the table headings from the listing of containers.
+
+**--notruncate**
+
+Do not truncate IDs and image names in the output.
+
+**--quiet**, **-q**
+
+Displays only the container IDs.
+
+## EXAMPLE
+
+buildah containers
+```
+CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
+ccf84de04b80 * 53ce4390f2ad registry.access.redhat.com/ub... ubi8-working-container
+45be1d806fc5 * 16ea53ea7c65 docker.io/library/busybox:latest busybox-working-container
+```
+
+buildah containers --quiet
+```
+ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
+45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
+```
+
+buildah containers -q --noheading --notruncate
+```
+ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
+45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
+```
+
+buildah containers --json
+```
+[
+ {
+ "id": "ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8",
+ "builder": true,
+ "imageid": "53ce4390f2adb1681eb1a90ec8b48c49c015e0a8d336c197637e7f65e365fa9e",
+ "imagename": "registry.access.redhat.com/ubi8:latest",
+ "containername": "ubi8-working-container"
+ },
+ {
+ "id": "45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07",
+ "builder": true,
+ "imageid": "16ea53ea7c652456803632d67517b78a4f9075a10bfdc4fc6b7b4cbf2bc98497",
+ "imagename": "docker.io/library/busybox:latest",
+ "containername": "busybox-working-container"
+ }
+]
+```
+
+buildah containers --format "{{.ContainerID}} {{.ContainerName}}"
+```
+ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8 ubi8-working-container
+45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07 busybox-working-container
+```
+
+buildah containers --format "Container ID: {{.ContainerID}}"
+```
+Container ID: ccf84de04b80c309ce6586997c79a769033dc4129db903c1882bc24a058438b8
+Container ID: 45be1d806fc533fcfc2beee77e424d87e5990d3ce9214d6b374677d6630bba07
+```
+
+buildah containers --filter ancestor=ubuntu
+```
+CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
+fbfd3505376e * 0ff04b2e7b63 docker.io/library/ubuntu:latest ubuntu-working-container
+```
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-copy.1.md b/docs/buildah-copy.1.md
new file mode 100644
index 0000000..0981416
--- /dev/null
+++ b/docs/buildah-copy.1.md
@@ -0,0 +1,139 @@
+# buildah-copy "1" "April 2021" "buildah"
+
+## NAME
+buildah\-copy - Copies the contents of a file, URL, or directory into a container's working directory.
+
+## SYNOPSIS
+**buildah copy** *container* *src* [[*src* ...] *dest*]
+
+## DESCRIPTION
+Copies the contents of a file, URL, or a directory to a container's working
+directory or a specified location in the container. If a local directory is
+specified as a source, its *contents* are copied to the destination.
+
+## OPTIONS
+
+**--add-history**
+
+Add an entry to the history which will note the digest of the added content.
+Defaults to false.
+
+Note: You can also override the default value of --add-history by setting the
+BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
+
+**--checksum** *checksum*
+
+Checksum the source content. The value of *checksum* must be a standard
+container digest string. Only supported for HTTP sources.
+
+**--chmod** *permissions*
+
+Sets the access permissions of the destination content. Accepts the numerical format.
+
+**--chown** *owner*:*group*
+
+Sets the user and group ownership of the destination content.
+
+**--contextdir** *directory*
+
+Build context directory. Specifying a context directory causes Buildah to
+chroot into the context directory. This means copying files pointed at
+by symbolic links outside of the chroot will fail.
+
+**--from** *containerOrImage*
+
+Use the root directory of the specified working container or image as the root
+directory when resolving absolute source paths and the path of the context
+directory. If an image needs to be pulled, options recognized by `buildah pull`
+can be used.
+
+**--ignorefile** *file*
+
+Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
+
+**--quiet**, **-q**
+
+Refrain from printing a digest of the copied content.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing pull of images from registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing pull of images from registry.
+
+Defaults to `2s`.
+
+## EXAMPLE
+
+buildah copy containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah copy --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah copy --chmod 660 containerID '/myapp/app.conf' '/myapp/app.conf'
+
+buildah copy containerID '/home/myuser/myproject.go'
+
+buildah copy containerID '/home/myuser/myfiles.tar' '/tmp'
+
+buildah copy containerID '/tmp/workingdir' '/tmp/workingdir'
+
+buildah copy containerID 'https://github.com/containers/buildah' '/tmp'
+
+buildah copy containerID 'passwd' 'certs.d' /etc
+
+## FILES
+
+### .containerignore/.dockerignore
+
+If the .containerignore/.dockerignore file exists in the context directory,
+`buildah copy` reads its contents. If both exist, then .containerignore is used.
+
+When the `--ignorefile` option is specified Buildah reads it and
+uses it to decide which content to exclude when copying content into the
+working container.
+
+Users can specify a series of Unix shell glob patterns in an ignore file to
+identify files/directories to exclude.
+
+Buildah supports a special wildcard string `**` which matches any number of
+directories (including zero). For example, **/*.go will exclude all files that
+end with .go that are found in all directories.
+
+Example .containerignore/.dockerignore file:
+
+```
+# here are files we want to exclude
+*/*.c
+**/output*
+src
+```
+
+`*/*.c`
+Excludes files and directories whose names end with .c in any top level subdirectory. For example, the source file include/rootless.c.
+
+`**/output*`
+Excludes files and directories starting with `output` from any directory.
+
+`src`
+Excludes files named src and the directory src as well as any content in it.
+
+Lines starting with ! (exclamation mark) can be used to make exceptions to
+exclusions. The following is an example .containerignore/.dockerignore file that uses this
+mechanism:
+```
+*.doc
+!Help.doc
+```
+
+Exclude all doc files except Help.doc when copying content into the container.
+
+This functionality is compatible with the handling of .containerignore files described here:
+
+https://github.com/containers/buildah/blob/main/docs/containerignore.5.md
+
+## SEE ALSO
+buildah(1), containerignore(5)
diff --git a/docs/buildah-from.1.md b/docs/buildah-from.1.md
new file mode 100644
index 0000000..9b368df
--- /dev/null
+++ b/docs/buildah-from.1.md
@@ -0,0 +1,743 @@
+# buildah-from "1" "March 2017" "buildah"
+
+## NAME
+buildah\-from - Creates a new working container, either from scratch or using a specified image as a starting point.
+
+## SYNOPSIS
+**buildah from** [*options*] *image*
+
+## DESCRIPTION
+Creates a working container based upon the specified image name. If the
+supplied image name is "scratch" a new empty container is created. Image names
+use a "transport":"details" format.
+
+Multiple transports are supported:
+
+ **dir:**_path_
+ An existing local directory _path_ containing the manifest, layer tarballs, and signatures in individual files. This is a non-standardized format, primarily useful for debugging or noninvasive image inspection.
+
+ **docker://**_docker-reference_ (Default)
+ An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG\_RUNTIME\_DIR/containers/auth.json`, which is set using `(buildah login)`. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
+ If _docker-reference_ does not include a registry name, *localhost* will be consulted first, followed by any registries named in the registries configuration.
+
+ **docker-archive:**_path_
+ An image is retrieved as a `podman load` formatted file.
+
+ **docker-daemon:**_docker-reference_
+ An image _docker-reference_ stored in the docker daemon's internal storage. _docker-reference_ must include either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
+
+ **oci:**_path_**:**_tag_**
+ An image tag in a directory compliant with "Open Container Image Layout Specification" at _path_.
+
+ **oci-archive:**_path_**:**_tag_
+ An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
+
+### DEPENDENCIES
+
+Buildah resolves the path to the registry to pull from by using the /etc/containers/registries.conf
+file, containers-registries.conf(5). If the `buildah from` command fails with an "image not known" error,
+first verify that the registries.conf file is installed and configured appropriately.
+
+## RETURN VALUE
+The container ID of the container that was created. On error 1 is returned.
+
+## OPTIONS
+
+**--add-host**=[]
+
+Add a custom host-to-IP mapping (host:ip)
+
+Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times.
+
+**--arch**="ARCH"
+
+Set the ARCH of the image to be pulled to the provided value instead of using the architecture of the host. (Examples: arm, arm64, 386, amd64, ppc64le, s390x)
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cap-add**=*CAP\_xxx*
+
+Add the specified capability to the default set of capabilities which will be
+supplied for subsequent *buildah run* invocations which use this container.
+Certain capabilities are granted by default; this option can be used to add
+more.
+
+**--cap-drop**=*CAP\_xxx*
+
+Remove the specified capability from the default set of capabilities which will
+be supplied for subsequent *buildah run* invocations which use this container.
+The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER, CAP\_FSETID, CAP\_KILL,
+CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP, CAP\_SETGID, CAP\_SETPCAP,
+and CAP\_SETUID capabilities are granted by default; this option can be used to remove them. The list of default capabilities is managed in containers.conf(5).
+
+If a capability is specified to both the **--cap-add** and **--cap-drop**
+options, it will be dropped, regardless of the order in which the options were
+given.
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--cgroup-parent**=""
+
+Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
+**--cgroupns** *how*
+
+Sets the configuration for IPC namespaces when the container is subsequently
+used for `buildah run`.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new cgroup namespace should be created, or it can be "host" to indicate
+that the cgroup namespace in which `buildah` itself is being run should be reused.
+
+**--cidfile** *ContainerIDFile*
+
+Write the container ID to the file.
+
+**--cpu-period**=*0*
+
+Limit the CPU CFS (Completely Fair Scheduler) period
+
+Limit the container's CPU usage. This flag tells the kernel to restrict the container's CPU usage to the period you specify.
+
+**--cpu-quota**=*0*
+
+Limit the CPU CFS (Completely Fair Scheduler) quota
+
+Limit the container's CPU usage. By default, containers run with the full
+CPU resource. This flag tells the kernel to restrict the container's CPU usage
+to the quota you specify.
+
+**--cpu-shares**, **-c**=*0*
+
+CPU shares (relative weight)
+
+By default, all containers get the same proportion of CPU cycles. This proportion
+can be modified by changing the container's CPU share weighting relative
+to the weighting of all other running containers.
+
+To modify the proportion from the default of 1024, use the **--cpu-shares**
+flag to set the weighting to 2 or higher.
+
+The proportion will only apply when CPU-intensive processes are running.
+When tasks in one container are idle, other containers can use the
+left-over CPU time. The actual amount of CPU time will vary depending on
+the number of containers running on the system.
+
+For example, consider three containers, one has a cpu-share of 1024 and
+two others have a cpu-share setting of 512. When processes in all three
+containers attempt to use 100% of CPU, the first container would receive
+50% of the total CPU time. If you add a fourth container with a cpu-share
+of 1024, the first container only gets 33% of the CPU. The remaining containers
+receive 16.5%, 16.5% and 33% of the CPU.
+
+On a multi-core system, the shares of CPU time are distributed over all CPU
+cores. Even if a container is limited to less than 100% of CPU time, it can
+use 100% of each individual CPU core.
+
+For example, consider a system with more than three cores. If you start one
+container **{C0}** with **-c=512** running one process, and another container
+**{C1}** with **-c=1024** running two processes, this can result in the following
+division of CPU shares:
+
+ PID container CPU CPU share
+ 100 {C0} 0 100% of CPU0
+ 101 {C1} 1 100% of CPU1
+ 102 {C1} 2 100% of CPU2
+
+**--cpuset-cpus**=""
+
+ CPUs in which to allow execution (0-3, 0,1)
+
+**--cpuset-mems**=""
+
+Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+
+If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+then processes in your container will only use memory from the first
+two memory nodes.
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--decryption-key** *key[:passphrase]*
+
+The [key[:passphrase]] to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
+
+**--device**=*device*
+
+Add a host device or devices under a directory to the container. The format is `<device-on-host>[:<device-on-container>][:<permissions>]` (e.g. --device=/dev/sdc:/dev/xvdc:rwm)
+
+**--dns**=[]
+
+Set custom DNS servers
+
+This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the `--dns` flag is necessary for every run.
+
+The special value **none** can be specified to disable creation of /etc/resolv.conf in the container by Buildah. The /etc/resolv.conf file in the image will be used without changes.
+
+**--dns-option**=[]
+
+Set custom DNS options
+
+**--dns-search**=[]
+
+Set custom DNS search domains
+
+**--format**, **-f** *oci* | *docker*
+
+Control the format for the built image's manifest and configuration data.
+Recognized formats include *oci* (OCI image-spec v1.0, the default) and
+*docker* (version 2, using schema format 2 for the manifest).
+
+Note: You can also override the default format by setting the BUILDAH\_FORMAT
+environment variable. `export BUILDAH_FORMAT=docker`
+
+**--group-add**=*group* | *keep-groups*
+
+Assign additional groups to the primary user running within the container
+process.
+
+- `keep-groups` is a special flag that tells Buildah to keep the supplementary
+group access.
+
+Allows container to use the user's supplementary group access. If file systems
+or devices are only accessible by the rootless user's group, this flag tells the
+OCI runtime to pass the group access into the container. Currently only
+available with the `crun` OCI runtime. Note: `keep-groups` is exclusive, other
+groups cannot be specified with this flag.
+
+**--http-proxy**
+
+By default proxy environment variables are passed into the container if set
+for the Buildah process. This can be disabled by setting the `--http-proxy`
+option to `false`. The environment variables passed in include `http_proxy`,
+`https_proxy`, `ftp_proxy`, `no_proxy`, and also the upper case versions of
+those.
+
+Defaults to `true`
+
+**--ipc** *how*
+
+Sets the configuration for IPC namespaces when the container is subsequently
+used for `buildah run`.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new IPC namespace should be created, or it can be "host" to indicate
+that the IPC namespace in which `Buildah` itself is being run should be reused,
+or it can be the path to an IPC namespace which is already in use by
+another process.
+
+**--isolation** *type*
+
+Controls what type of isolation is used for running processes under `buildah
+run`. Recognized types include *oci* (OCI-compatible runtime, the default),
+*rootless* (OCI-compatible runtime invoked using a modified
+configuration, with *--no-new-keyring* added to its *create* invocation,
+reusing the host's network and UTS namespaces, and creating private IPC, PID,
+mount, and user namespaces; the default for unprivileged users), and *chroot*
+(an internal wrapper that leans more toward chroot(1) than container
+technology, reusing the host's control group, network, IPC, and PID namespaces,
+and creating private mount and UTS namespaces, and creating user namespaces
+only when they're required for ID mapping).
+
+Note: You can also override the default isolation type by setting the
+BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
+
+**--memory**, **-m**=""
+
+Memory limit (format: <number>[<unit>], where unit = b, k, m or g)
+
+Allows you to constrain the memory available to a container. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
+**--memory-swap**="LIMIT"
+
+A limit value equal to memory plus swap. Must be used with the **-m**
+(**--memory**) flag. The swap `LIMIT` should always be larger than **-m**
+(**--memory**) value. By default, the swap `LIMIT` will be set to double
+the value of --memory.
+
+The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
+`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
+unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
+
+**--name** *name*
+
+A *name* for the working container
+
+**--network**=*mode*, **--net**=*mode*
+
+Sets the configuration for network namespaces when the container is subsequently
+used for `buildah run`.
+
+Valid _mode_ values are:
+
+- **none**: no networking. Invalid if using **--dns**, **--dns-opt**, or **--dns-search**;
+- **host**: use the host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure;
+- **ns:**_path_: path to a network namespace to join;
+- **private**: create a new namespace for the container (default)
+- **\<network name|ID\>**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users.
+- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf:
+ - **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false.
+ - **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`).
+ - **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
+ - **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`).
+ - **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only).
+ - **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to.
+ - **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only).
+ - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to.
+- **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking
+ stack. \
+ This is only supported in rootless mode. \
+ By default, IPv4 and IPv6 addresses and routes, as well as the pod interface
+ name, are copied from the host. If port forwarding isn't configured, ports
+ are forwarded dynamically as services are bound on either side (init
+ namespace or container namespace). Port forwarding preserves the original
+ source IP address. Options described in pasta(1) can be specified as
+ comma-separated arguments. \
+ In terms of pasta(1) options, **--config-net** is given by default, in
+ order to configure networking when the container is started, and
+ **--no-map-gw** is also assumed by default, to avoid direct access from
+ container to host using the gateway address. The latter can be overridden
+ by passing **--map-gw** in the pasta-specific options (despite not being an
+ actual pasta(1) option). \
+ Also, **-t none** and **-u none** are passed to disable
+ automatic port forwarding based on bound ports. Similarly, **-T none** and
+ **-U none** are given to disable the same functionality from container to
+ host. \
+ Some examples:
+ - **pasta:--map-gw**: Allow the container to directly reach the host using the
+ gateway address.
+ - **pasta:--mtu,1500**: Specify a 1500 bytes MTU for the _tap_ interface in
+ the container.
+ - **pasta:--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,-m,1500,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options: disable IPv6, assign
+ `10.0.2.0/24` to the `tap0` interface in the container, with gateway
+ `10.0.2.3`, enable DNS forwarder reachable at `10.0.2.3`, set MTU to 1500
+ bytes, disable NDP, DHCPv6 and DHCP support.
+ - **pasta:-I,tap0,--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options with Podman overrides: same as
+ above, but leave the MTU to 65520 bytes
+ - **pasta:-t,auto,-u,auto,-T,auto,-U,auto**: enable automatic port forwarding
+ based on observed bound ports from both host and container sides
+ - **pasta:-T,5201**: enable forwarding of TCP port 5201 from container to
+ host, using the loopback interface instead of the tap interface for improved
+ performance
+
+**--os**="OS"
+
+Set the OS of the image to be pulled to the provided value instead of using the current operating system of the host.
+
+**--pid** *how*
+
+Sets the configuration for PID namespaces when the container is subsequently
+used for `buildah run`.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new PID namespace should be created, or it can be "host" to indicate
+that the PID namespace in which `Buildah` itself is being run should be reused,
+or it can be the path to a PID namespace which is already in use by another
+process.
+
+**--platform**="OS/ARCH[/VARIANT]"
+
+Set the OS/ARCH of the image to be pulled
+to the provided value instead of using the current operating system and
+architecture of the host (for example `linux/arm`).
+
+OS/ARCH pairs are those used by the Go Programming Language. In several cases
+the ARCH value for a platform differs from one produced by other tools such as
+the `arch` command. Valid OS and architecture name combinations are listed as
+values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environment,
+and can also be found by running `go tool dist list`.
+
+While `buildah from` is happy to pull an image for any platform that exists,
+`buildah run` will not be able to run binaries provided by that image without
+the help of emulation provided by packages like `qemu-user-static`.
+
+**NOTE:** The `--platform` option may not be used in combination with the `--arch`, `--os`, or `--variant` options.
+
+**--pull**
+
+When the flag is enabled or set explicitly to `true` (with *--pull=true*), attempt to pull the latest image from the registries
+listed in registries.conf if a local image does not exist or the image is newer
+than the one in storage. Raise an error if the image is not in any listed
+registry and is not present locally.
+
+If the flag is disabled (with *--pull=false*), do not pull the image from the
+registry, use only the local version. Raise an error if the image is not
+present locally.
+
+If the pull flag is set to `always` (with *--pull=always*),
+pull the image from the first registry it is found in as listed in registries.conf.
+Raise an error if not found in the registries, even if the image is present locally.
+
+If the pull flag is set to `never` (with *--pull=never*),
+Do not pull the image from the registry, use only the local version. Raise an error
+if the image is not present locally.
+
+Defaults to *true*.
+
+**--quiet**, **-q**
+
+If an image needs to be pulled from the registry, suppress progress output.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing pull of images from registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing pull of images from registry.
+
+Defaults to `2s`.
+
+**--security-opt**=[]
+
+Security Options
+
+ "label=user:USER" : Set the label user for the container
+ "label=role:ROLE" : Set the label role for the container
+ "label=type:TYPE" : Set the label type for the container
+ "label=level:LEVEL" : Set the label level for the container
+ "label=disable" : Turn off label confinement for the container
+ "no-new-privileges" : Not supported
+
+ "seccomp=unconfined" : Turn off seccomp confinement for the container
+ "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter
+
+ "apparmor=unconfined" : Turn off apparmor confinement for the container
+ "apparmor=your-profile" : Set the apparmor confinement profile for the container
+
+**--shm-size**=""
+
+Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
+Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes).
+If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+**--ulimit** *type*=*soft-limit*[:*hard-limit*]
+
+Specifies resource limits to apply to processes launched during `buildah run`.
+This option can be specified multiple times. Recognized resource types
+include:
+ "core": maximum core dump size (ulimit -c)
+ "cpu": maximum CPU time (ulimit -t)
+ "data": maximum size of a process's data segment (ulimit -d)
+ "fsize": maximum size of new files (ulimit -f)
+ "locks": maximum number of file locks (ulimit -x)
+ "memlock": maximum amount of locked memory (ulimit -l)
+ "msgqueue": maximum amount of data in message queues (ulimit -q)
+ "nice": niceness adjustment (nice -n, ulimit -e)
+ "nofile": maximum number of open files (ulimit -n)
+ "nofile": maximum number of open files (1048576); when run by root
+ "nproc": maximum number of processes (ulimit -u)
+ "nproc": maximum number of processes (1048576); when run by root
+ "rss": maximum size of a process's (ulimit -m)
+ "rtprio": maximum real-time scheduling priority (ulimit -r)
+ "rttime": maximum amount of real-time execution between blocking syscalls
+ "sigpending": maximum number of pending signals (ulimit -i)
+ "stack": maximum stack size (ulimit -s)
+
+**--userns** *how*
+
+Sets the configuration for user namespaces when the container is subsequently
+used for `buildah run`.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new user namespace should be created, it can be "host" to indicate that
+the user namespace in which `Buildah` itself is being run should be reused, or
+it can be the path to an user namespace which is already in use by another
+process.
+
+**--userns-gid-map** *mapping*
+
+Directly specifies a GID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container GID, a corresponding starting host-level GID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-gids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-gid-map setting is
+supplied, settings from the global option will be used.
+
+**--userns-gid-map-group** *mapping*
+
+Directly specifies a GID mapping which should be used to set ownership, at the
+filesystem level, on the container's contents.
+Commands run using `buildah run` will default to being run in their own user
+namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more triples of a starting
+in-container GID, a corresponding starting host-level GID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-gids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-gid-map setting is
+supplied, settings from the global option will be used.
+
+If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-gid-map
+are specified, but --userns-uid-map is specified, the GID map will be set to
+use the same numeric values as the UID map.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--userns-gid-map-group** *group*
+
+Specifies that a GID mapping which should be used to set ownership, at the
+filesystem level, on the container's contents, can be found in entries in the
+`/etc/subgid` file which correspond to the specified group.
+Commands run using `buildah run` will default to being run in their own user
+namespaces, configured using the UID and GID maps.
+If --userns-uid-map-user is specified, but --userns-gid-map-group is not
+specified, `Buildah` will assume that the specified user name is also a
+suitable group name to use as the default setting for this option.
+
+**--userns-uid-map** *mapping*
+
+Directly specifies a UID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container UID, a corresponding starting host-level UID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-uids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-uid-map setting is
+supplied, settings from the global option will be used.
+
+**--userns-uid-map-user** *mapping*
+
+Directly specifies a UID mapping which should be used to set ownership, at the
+filesystem level, on the container's contents.
+Commands run using `buildah run` will default to being run in their own user
+namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more triples of a starting
+in-container UID, a corresponding starting host-level UID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-uids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-uid-map setting is
+supplied, settings from the global option will be used.
+
+If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-uid-map
+are specified, but --userns-gid-map is specified, the UID map will be set to
+use the same numeric values as the GID map.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--userns-uid-map-user** *user*
+
+Specifies that a UID mapping which should be used to set ownership, at the
+filesystem level, on the container's contents, can be found in entries in the
+`/etc/subuid` file which correspond to the specified user.
+Commands run using `buildah run` will default to being run in their own user
+namespaces, configured using the UID and GID maps.
+If --userns-gid-map-group is specified, but --userns-uid-map-user is not
+specified, `Buildah` will assume that the specified group name is also a
+suitable user name to use as the default setting for this option.
+
+**--uts** *how*
+
+Sets the configuration for UTS namespaces when the container is subsequently
+used for `buildah run`.
+The configured value can be "" (the empty string) or "container" to indicate
+that a new UTS namespace should be created, or it can be "host" to indicate
+that the UTS namespace in which `Buildah` itself is being run should be reused,
+or it can be the path to a UTS namespace which is already in use by another
+process.
+
+**--variant**=""
+
+Set the architecture variant of the image to be pulled.
+
+**--volume**, **-v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
+
+ Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah
+ bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah
+ container. The `OPTIONS` are a comma delimited list and can be: <sup>[[1]](#Footnote1)</sup>
+
+ * [rw|ro]
+ * [U]
+ * [z|Z|O]
+ * [`[r]shared`|`[r]slave`|`[r]private`|`[r]unbindable`]
+
+The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
+must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the
+path you specify. For example, if you supply `/foo` as the host path,
+Buildah copies the contents of `/foo` to the container filesystem on the host
+and bind mounts that into the container.
+
+You can specify multiple **-v** options to mount one or more mounts to a
+container.
+
+ `Write Protected Volume Mounts`
+
+You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
+read-write mode, respectively. By default, the volumes are mounted read-write.
+See examples.
+
+ `Chowning Volume Mounts`
+
+By default, Buildah does not change the owner and group of source volume directories mounted into containers. If a container is created in a new user namespace, the UID and GID in the container may correspond to another UID and GID on the host.
+
+The `:U` suffix tells Buildah to use the correct host UID and GID based on the UID and GID within the container, to change the owner and group of the source volume.
+
+ `Labeling Volume Mounts`
+
+Labeling systems like SELinux require that proper labels are placed on volume
+content mounted into a container. Without a label, the security system might
+prevent the processes running inside the container from using the content. By
+default, Buildah does not change the labels set by the OS.
+
+To change a label in the container context, you can add either of two suffixes
+`:z` or `:Z` to the volume mount. These suffixes tell Buildah to relabel file
+objects on the shared volumes. The `z` option tells Buildah that two containers
+share the volume content. As a result, Buildah labels the content with a shared
+content label. Shared volume labels allow all containers to read/write content.
+The `Z` option tells Buildah to label the content with a private unshared label.
+Only the current container can use a private volume.
+
+ `Overlay Volume Mounts`
+
+ The `:O` flag tells Buildah to mount the directory from the host as a temporary storage using the Overlay file system. The `RUN` command containers are allowed to modify contents within the mountpoint and are stored in the container storage in a separate directory. In Overlay FS terms the source directory will be the lower, and the container storage directory will be the upper. Modifications to the mount point are destroyed when the `RUN` command finishes executing, similar to a tmpfs mount point.
+
+ Any subsequent execution of `RUN` commands sees the original source directory content, any changes from previous RUN commands no longer exist.
+
+ One use case of the `overlay` mount is sharing the package cache from the host into the container to allow speeding up builds.
+
+ Note:
+
+ - The `O` flag is not allowed to be specified with the `Z` or `z` flags. Content mounted into the container is labeled with the private label.
+ On SELinux systems, labels in the source directory need to be readable by the container label. If not, SELinux container separation must be disabled for the container to work.
+ - Modification of the directory volume mounted into the container with an overlay mount can cause unexpected failures. It is recommended that you do not modify the directory until the container finishes running.
+
+By default bind mounted volumes are `private`. That means any mounts done
+inside container will not be visible on the host and vice versa. This behavior can
+be changed by specifying a volume mount propagation property.
+
+When the mount propagation policy is set to `shared`, any mounts completed inside
+the container on that volume will be visible to both the host and container. When
+the mount propagation policy is set to `slave`, one way mount propagation is enabled
+and any mounts completed on the host for that volume will be visible only inside of the container.
+To control the mount propagation property of the volume use the `:[r]shared`,
+`:[r]slave`, `[r]private` or `[r]unbindable`propagation flag. The propagation property can
+be specified only for bind mounted volumes and not for internal volumes or
+named volumes. For mount propagation to work on the source mount point (the mount point
+where source dir is mounted on) it has to have the right propagation properties. For
+shared volumes, the source mount point has to be shared. And for slave volumes,
+the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
+
+Use `df <source-dir>` to determine the source mount and then use
+`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
+properties of source mount, if `findmnt` utility is not available, the source mount point
+can be determined by looking at the mount entry in `/proc/self/mountinfo`. Look
+at `optional fields` and see if any propagation properties are specified.
+`shared:X` means the mount is `shared`, `master:X` means the mount is `slave` and if
+nothing is there that means the mount is `private`. <sup>[[1]](#Footnote1)</sup>
+
+To change propagation properties of a mount point use the `mount` command. For
+example, to bind mount the source directory `/foo` do
+`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
+will convert /foo into a `shared` mount point. The propagation properties of the source
+mount can be changed directly. For instance if `/` is the source mount for
+`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
+
+## EXAMPLE
+
+buildah from --pull imagename
+
+buildah from --pull docker://myregistry.example.com/imagename
+
+buildah from docker-daemon:imagename:imagetag
+
+buildah from --name mycontainer docker-archive:filename
+
+buildah from oci-archive:filename
+
+buildah from --name mycontainer dir:directoryname
+
+buildah from --pull-always --name "mycontainer" myregistry.example.com/imagename
+
+buildah from --tls-verify=false myregistry/myrepository/imagename:imagetag
+
+buildah from --creds=myusername:mypassword --cert-dir ~/auth myregistry/myrepository/imagename:imagetag
+
+buildah from --authfile=/tmp/auths/myauths.json myregistry/myrepository/imagename:imagetag
+
+buildah from --memory 40m --cpu-shares 2 --cpuset-cpus 0,2 --security-opt label=level:s0:c100,c200 myregistry/myrepository/imagename:imagetag
+
+buildah from --ulimit nofile=1024:1028 --cgroup-parent /path/to/cgroup/parent myregistry/myrepository/imagename:imagetag
+
+buildah from --volume /home/test:/myvol:ro,Z myregistry/myrepository/imagename:imagetag
+
+buildah from -v /home/test:/myvol:z,U myregistry/myrepository/imagename:imagetag
+
+buildah from -v /var/lib/yum:/var/lib/yum:O myregistry/myrepository/imagename:imagetag
+
+buildah from --arch=arm --variant v7 myregistry/myrepository/imagename:imagetag
+
+## ENVIRONMENT
+
+**BUILD\_REGISTRY\_SOURCES**
+
+BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
+lists of registry names under the keys `insecureRegistries`,
+`blockedRegistries`, and `allowedRegistries`.
+
+When pulling an image from a registry, if the name of the registry matches any
+of the items in the `blockedRegistries` list, the image pull attempt is denied.
+If there are registries in the `allowedRegistries` list, and the registry's
+name is not in the list, the pull attempt is denied.
+
+**TMPDIR**
+The TMPDIR environment variable allows the user to specify where temporary files
+are stored while pulling and pushing images. Defaults to '/var/tmp'.
+
+## FILES
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**policy.json** (`/etc/containers/policy.json`)
+
+Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
+
+## SEE ALSO
+buildah(1), buildah-pull(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), containers.conf(5)
+
+## FOOTNOTES
+<a name="Footnote1">1</a>: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately.
diff --git a/docs/buildah-images.1.md b/docs/buildah-images.1.md
new file mode 100644
index 0000000..fe9ec71
--- /dev/null
+++ b/docs/buildah-images.1.md
@@ -0,0 +1,137 @@
+# buildah-images "1" "March 2017" "buildah"
+
+## NAME
+buildah\-images - List images in local storage.
+
+## SYNOPSIS
+**buildah images** [*options*] [*image*]
+
+## DESCRIPTION
+Displays locally stored images, their names, sizes, created date and their IDs.
+The created date is displayed in the time locale of the local machine.
+
+## OPTIONS
+
+**--all**, **-a**
+
+Show all images, including intermediate images from a build.
+
+**--digests**
+
+Show the image digests.
+
+**--filter**, **-f**=[]
+
+Filter output based on conditions provided (default []).
+
+ Filters:
+
+ **after,since=image**
+ Filter on images created since the given image.
+
+ **before=image**
+ Filter on images created before the given image.
+
+ **dangling=true|false**
+ Show dangling images. An images is considered to be dangling if it has no associated names and tags.
+
+ **id=id**
+ Show image with this specific ID.
+
+ **intermediate=true|false**
+ Show intermediate images. An images is considered to be an indermediate image if it is dangling and has no children.
+
+ **label=key[=value]**
+ Filter by images labels key and/or value.
+
+ **readonly=true|false**
+ Show only read only images or Read/Write images. The default is to show both. Read/Only images can be configured by modifying the "additionalimagestores" in the /etc/containers/storage.conf file.
+
+ **reference=reference**
+ Show images matching the specified reference. Wildcards are supported (e.g., "reference=*fedora:3*").
+
+**--format**="TEMPLATE"
+
+Pretty-print images using a Go template.
+
+Valid placeholders for the Go template are listed below:
+
+| **Placeholder** | **Description** |
+| --------------- | -----------------------------------------|
+| .Created | Creation date in epoch time |
+| .CreatedAt | Creation date Pretty Formatted |
+| .CreatedAtRaw | Creation date in raw format |
+| .Digest | Image Digest |
+| .ID | Image ID |
+| .Name | Image Name |
+| .ReadOnly | Indicates if image came from a R/O store |
+| .Size | Image Size |
+| .Tag | Image Tag |
+
+**--history**
+
+Display the image name history.
+
+**--json**
+
+Display the output in JSON format.
+
+**--no-trunc**
+
+Do not truncate output.
+
+**--noheading**, **-n**
+
+Omit the table headings from the listing of images.
+
+**--quiet**, **-q**
+
+Displays only the image IDs.
+
+## EXAMPLE
+
+buildah images
+
+buildah images fedora:latest
+
+buildah images --json
+
+buildah images --quiet
+
+buildah images -q --noheading --no-trunc
+
+buildah images --quiet fedora:latest
+
+buildah images --filter dangling=true
+
+buildah images --format "ImageID: {{.ID}}"
+
+```
+$ buildah images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+registry.access.redhat.com/ubi8 latest 53ce4390f2ad 3 weeks ago 233 MB
+docker.io/library/busybox latest 16ea53ea7c65 3 weeks ago 1.46 MB
+quay.io/libpod/testimage 20210610 9f9ec7f2fdef 4 months ago 7.99 MB
+```
+
+```
+# buildah images -a
+IMAGE NAME IMAGE TAG IMAGE ID CREATED AT SIZE
+registry.access.redhat.com/ubi8 latest 53ce4390f2ad 3 weeks ago 233 MB
+<none> <none> 8c6e16890c2b Jun 13, 2018 15:52 4.42 MB
+localhost/test latest c0cfe75da054 Jun 13, 2018 15:52 4.42 MB
+```
+
+```
+# buildah images --format '{{.ID}} {{.CreatedAtRaw}}'
+3f53bb00af943dfdf815650be70c0fa7b426e56a66f5e3362b47a129d57d5991 2018-12-20 19:21:30.122610396 -0500 EST
+8e09da8f6701d7cde1526d79e3123b0f1109b78d925dfe9f9bac6d59d702a390 2019-01-08 09:22:52.330623532 -0500 EST
+```
+
+```
+# buildah images --format '{{.ID}} {{.Name}} {{.Digest}} {{.CreatedAt}} {{.Size}} {{.CreatedAtRaw}}'
+3f53bb00af943dfdf815650be70c0fa7b426e56a66f5e3362b47a129d57d5991 docker.io/library/alpine sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e Dec 20, 2018 19:21 4.67 MB 2018-12-20 19:21:30.122610396 -0500 EST
+8e09da8f6701d7cde1526d79e3123b0f1109b78d925dfe9f9bac6d59d702a390 <none> sha256:894532ec56e0205ce68ca7230b00c18aa3c8ee39fcdb310615c60e813057229c Jan 8, 2019 09:22 4.67 MB 2019-01-08 09:22:52.330623532 -0500 EST
+```
+## SEE ALSO
+buildah(1), containers-storage.conf(5)
diff --git a/docs/buildah-info.1.md b/docs/buildah-info.1.md
new file mode 100644
index 0000000..d7483e7
--- /dev/null
+++ b/docs/buildah-info.1.md
@@ -0,0 +1,73 @@
+# buildah-info "1" "November 2018" "Buildah"
+
+## NAME
+buildah\-info - Display Buildah system information.
+
+## SYNOPSIS
+**buildah info** [*options*]
+
+## DESCRIPTION
+The information displayed pertains to the host and current storage statistics which is useful when reporting issues.
+
+## OPTIONS
+
+**--debug**, **-d**
+
+Show additional information.
+
+**--format** *template*
+
+Use *template* as a Go template when formatting the output.
+
+## EXAMPLE
+Run buildah info response:
+```
+$ buildah info
+{
+ "host": {
+ "Distribution": {
+ "distribution": "ubuntu",
+ "version": "18.04"
+ },
+ "MemTotal": 16702980096,
+ "MemFree": 309428224,
+ "SwapFree": 2146693120,
+ "SwapTotal": 2147479552,
+ "arch": "amd64",
+ "cpus": 4,
+ "hostname": "localhost.localdomain",
+ "kernel": "4.15.0-36-generic",
+ "os": "linux",
+ "rootless": false,
+ "uptime": "91h 30m 59.9s (Approximately 3.79 days)"
+ },
+ "store": {
+ "ContainerStore": {
+ "number": 2
+ },
+ "GraphDriverName": "overlay",
+ "GraphOptions": [
+ "overlay.override_kernel_check=true"
+ ],
+ "GraphRoot": "/var/lib/containers/storage",
+ "GraphStatus": {
+ "Backing Filesystem": "extfs",
+ "Native Overlay Diff": "true",
+ "Supports d_type": "true"
+ },
+ "ImageStore": {
+ "number": 1
+ },
+ "RunRoot": "/run/containers/storage"
+ }
+}
+```
+
+Run buildah info and retrieve only the store information:
+```
+$ buildah info --format={{".store"}}
+map[GraphOptions:[overlay.override_kernel_check=true] GraphStatus:map[Backing Filesystem:extfs Supports d_type:true Native Overlay Diff:true] ImageStore:map[number:1] ContainerStore:map[number:2] GraphRoot:/var/lib/containers/storage RunRoot:/run/containers/storage GraphDriverName:overlay]
+```
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-inspect.1.md b/docs/buildah-inspect.1.md
new file mode 100644
index 0000000..6b43642
--- /dev/null
+++ b/docs/buildah-inspect.1.md
@@ -0,0 +1,39 @@
+# buildah-inspect "1" "May 2017" "buildah"
+
+## NAME
+buildah\-inspect - Display information about working containers or images or manifest lists.
+
+## SYNOPSIS
+**buildah inspect** [*options*] [**--**] *object*
+
+## DESCRIPTION
+Prints the low-level information on Buildah object(s) (e.g. container, images, manifest lists) identified by name or ID. By default, this will render all results in a
+JSON array. If the container, image, or manifest lists have the same name, this will return container JSON for an unspecified type. If a format is specified,
+the given template will be executed for each result.
+
+## OPTIONS
+
+**--format**, **-f** *template*
+
+Use *template* as a Go template when formatting the output.
+
+Users of this option should be familiar with the [*text/template*
+package](https://golang.org/pkg/text/template/) in the Go standard library, and
+of internals of Buildah's implementation.
+
+**--type**, **-t** **container** | **image** | **manifest**
+
+Specify whether *object* is a container, image or a manifest list.
+
+## EXAMPLE
+
+buildah inspect containerID
+
+buildah inspect --type container containerID
+
+buildah inspect --type image imageID
+
+buildah inspect --format '{{.OCIv1.Config.Env}}' alpine
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-login.1.md b/docs/buildah-login.1.md
new file mode 100644
index 0000000..6075e06
--- /dev/null
+++ b/docs/buildah-login.1.md
@@ -0,0 +1,114 @@
+# buildah-login "1" "Apr 2019" "buildah"
+
+## NAME
+buildah\-login - Login to a container registry
+
+## SYNOPSIS
+**buildah login** [*options*] *registry*
+
+## DESCRIPTION
+**buildah login** logs into a specified registry server with the correct username
+and password. **buildah login** reads in the username and password from STDIN.
+The username and password can also be set using the **username** and **password** flags.
+The path of the authentication file can be specified by the user by setting the **authfile**
+flag. The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. If XDG_RUNTIME_DIR
+is not set, the default is /run/containers/$UID/auth.json.
+
+**buildah [GLOBAL OPTIONS]**
+
+**buildah login [GLOBAL OPTIONS]**
+
+**buildah login [OPTIONS] REGISTRY [GLOBAL OPTIONS]**
+
+## OPTIONS
+
+**--authfile**
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/user/$UID/containers/auth.json. This file is created using `buildah login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--compat-auth-file**=*path*
+
+Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
+
+**--get-login**
+
+Return the logged-in user for the registry. Return error if no login is found.
+
+**--help**, **-h**
+
+Print usage statement
+
+**--password**, **-p**
+
+Password for registry
+
+**--password-stdin**
+
+Take the password from stdin
+
+**--tls-verify**
+
+Require HTTPS and verification of certificates when talking to container registries (default: true). If explicitly set to true,
+then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
+TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
+TLS verification cannot be used when talking to an insecure registry.
+
+**--username**, **-u**
+
+Username for registry
+
+**--verbose**, **-v**
+
+print detailed information about credential store
+
+## EXAMPLES
+
+```
+$ buildah login quay.io
+Username: qiwanredhat
+Password:
+Login Succeeded!
+```
+
+```
+$ buildah login -u testuser -p testpassword localhost:5000
+Login Succeeded!
+```
+
+```
+$ buildah login --authfile ./auth.json quay.io
+Username: qiwanredhat
+Password:
+Login Succeeded!
+```
+
+```
+$ buildah login --tls-verify=false -u test -p test localhost:5000
+Login Succeeded!
+```
+
+```
+$ buildah login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
+Login Succeeded!
+```
+
+```
+$ buildah login -u testuser --password-stdin < pw.txt quay.io
+Login Succeeded!
+```
+
+```
+$ echo $testpassword | buildah login -u testuser --password-stdin quay.io
+Login Succeeded!
+```
+
+## SEE ALSO
+buildah(1), buildah-logout(1)
diff --git a/docs/buildah-logout.1.md b/docs/buildah-logout.1.md
new file mode 100644
index 0000000..9b00e20
--- /dev/null
+++ b/docs/buildah-logout.1.md
@@ -0,0 +1,60 @@
+# buildah-logout "1" "Apr 2019" "buildah"
+
+## NAME
+buildah\-logout - Logout of a container registry
+
+## SYNOPSIS
+**buildah logout** [*options*] *registry*
+
+## DESCRIPTION
+**buildah logout** logs out of a specified registry server by deleting the cached credentials
+stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
+The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json.
+All the cached credentials can be removed by setting the **all** flag.
+
+**buildah [GLOBAL OPTIONS]**
+
+**buildah logout [GLOBAL OPTIONS]**
+
+**buildah logout [OPTIONS] REGISTRY [GLOBAL OPTIONS]**
+
+## OPTIONS
+
+**--all**, **-a**
+
+Remove the cached credentials for all registries in the auth file
+
+**--authfile**
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/user/$UID/containers/auth.json.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--compat-auth-file**=*path*
+
+Instead of updating the default credentials file, update the one at *path*, and use a Docker-compatible format.
+
+**--help**, **-h**
+
+Print usage statement
+
+## EXAMPLES
+
+```
+$ buildah logout quay.io
+Removed login credentials for quay.io
+```
+
+```
+$ buildah logout --authfile authdir/myauths.json quay.io
+Removed login credentials for quay.io
+```
+
+```
+$ buildah logout --all
+Remove login credentials for all registries
+```
+
+## SEE ALSO
+buildah(1), buildah-login(1)
diff --git a/docs/buildah-manifest-add.1.md b/docs/buildah-manifest-add.1.md
new file mode 100644
index 0000000..b9fe7c9
--- /dev/null
+++ b/docs/buildah-manifest-add.1.md
@@ -0,0 +1,109 @@
+# buildah-manifest-add "1" "September 2019" "buildah"
+
+## NAME
+
+buildah\-manifest\-add - Add an image to a manifest list or image index.
+
+## SYNOPSIS
+
+**buildah manifest add** *listNameOrIndexName* *imageName*
+
+## DESCRIPTION
+
+Adds the specified image to the specified manifest list or image index.
+
+## RETURN VALUE
+
+The list image's ID and the digest of the image's manifest.
+
+## OPTIONS
+
+**--all**
+
+If the image which should be added to the list or index is itself a list or
+index, add all of the contents to the local list. By default, only one image
+from such a list or index will be added to the list or index. Combining
+*--all* with any of the other options described below is NOT recommended.
+
+**--annotation** *annotation=value*
+
+Set an annotation on the entry for the newly-added image.
+
+**--arch**
+
+Override the architecture which the list or index records as a requirement for
+the image. If *imageName* refers to a manifest list or image index, the
+architecture information will be retrieved from it. Otherwise, it will be
+retrieved from the image's configuration information.
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--features**
+
+Specify the features list which the list or index records as requirements for
+the image. This option is rarely used.
+
+**--os**
+
+Override the OS which the list or index records as a requirement for the image.
+If *imageName* refers to a manifest list or image index, the OS information
+will be retrieved from it. Otherwise, it will be retrieved from the image's
+configuration information.
+
+**--os-features**
+
+Specify the OS features list which the list or index records as requirements
+for the image. This option is rarely used.
+
+**--os-version**
+
+Specify the OS version which the list or index records as a requirement for the
+image. This option is rarely used.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+**--variant**
+
+Specify the variant which the list or index records for the image. This option
+is typically used to distinguish between multiple entries which share the same
+architecture value, but which expect different versions of its instruction set.
+
+## EXAMPLE
+
+```
+buildah manifest add mylist:v1.11 docker://fedora
+506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
+```
+
+```
+buildah manifest add --all mylist:v1.11 docker://fedora
+506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
+```
+
+```
+buildah manifest add --arch arm64 --variant v8 mylist:v1.11 docker://fedora@sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
+506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
+```
+
+## SEE ALSO
+buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1), docker-login(1)
diff --git a/docs/buildah-manifest-annotate.1.md b/docs/buildah-manifest-annotate.1.md
new file mode 100644
index 0000000..86e9341
--- /dev/null
+++ b/docs/buildah-manifest-annotate.1.md
@@ -0,0 +1,66 @@
+# buildah-manifest-annotate "1" "September 2019" "buildah"
+
+## NAME
+
+buildah\-manifest\-annotate - Add and update information about an image to a manifest list or image index.
+
+## SYNOPSIS
+
+**buildah manifest annotate** [options...] *listNameOrIndexName* *imageManifestDigest*
+
+## DESCRIPTION
+
+Adds or updates information about an image included in a manifest list or image index.
+
+## RETURN VALUE
+
+The list image's ID and the digest of the image's manifest.
+
+## OPTIONS
+
+**--annotation** *annotation=value*
+
+Set an annotation on the entry for the specified image.
+
+**--arch**
+
+Override the architecture which the list or index records as a requirement for
+the image. This is usually automatically retrieved from the image's
+configuration information, so it is rarely necessary to use this option.
+
+**--features**
+
+Specify the features list which the list or index records as requirements for
+the image. This option is rarely used.
+
+**--os**
+
+Override the OS which the list or index records as a requirement for the image.
+This is usually automatically retrieved from the image's configuration
+information, so it is rarely necessary to use this option.
+
+**--os-features**
+
+Specify the OS features list which the list or index records as requirements
+for the image. This option is rarely used.
+
+**--os-version**
+
+Specify the OS version which the list or index records as a requirement for the
+image. This option is rarely used.
+
+**--variant**
+
+Specify the variant which the list or index records for the image. This option
+is typically used to distinguish between multiple entries which share the same
+architecture value, but which expect different versions of its instruction set.
+
+## EXAMPLE
+
+```
+buildah manifest annotate --arch arm64 --variant v8 mylist:v1.11 sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
+506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b
+```
+
+## SEE ALSO
+buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)
diff --git a/docs/buildah-manifest-create.1.md b/docs/buildah-manifest-create.1.md
new file mode 100644
index 0000000..67ad77d
--- /dev/null
+++ b/docs/buildah-manifest-create.1.md
@@ -0,0 +1,62 @@
+# buildah-manifest-create "16" "August 2022" "buildah"
+
+## NAME
+
+buildah\-manifest\-create - Create a manifest list or image index.
+
+## SYNOPSIS
+
+**buildah manifest create** *listNameOrIndexName* [*imageName* ...]
+
+## DESCRIPTION
+
+Creates a new manifest list and stores it as an image in local storage using
+the specified name.
+
+If additional images are specified, they are added to the newly-created list or
+index.
+
+## RETURN VALUE
+
+The randomly-generated image ID of the newly-created list or index. The image
+can be deleted using the *buildah rmi* command.
+
+## OPTIONS
+
+**--all**
+
+If any of the images which should be added to the new list or index are
+themselves lists or indexes, add all of their contents. By default, only one
+image from such a list will be added to the newly-created list or index.
+
+**--amend**
+
+If a manifest list named *listNameOrIndexName* already exists, modify the
+preexisting list instead of exiting with an error. The contents of
+*listNameOrIndexName* are not modified if no *imageName*s are given.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+## EXAMPLE
+
+```
+buildah manifest create mylist:v1.11
+941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
+buildah manifest create --amend mylist:v1.11
+941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
+```
+
+```
+buildah manifest create mylist:v1.11 docker://fedora
+941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
+```
+
+```
+buildah manifest create --all mylist:v1.11 docker://fedora
+941c1259e4b85bebf23580a044e4838aa3c1e627528422c9bf9262ff1661fca9
+```
+
+## SEE ALSO
+buildah(1), buildah-manifest(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)
diff --git a/docs/buildah-manifest-exists.1.md b/docs/buildah-manifest-exists.1.md
new file mode 100644
index 0000000..8492d05
--- /dev/null
+++ b/docs/buildah-manifest-exists.1.md
@@ -0,0 +1,40 @@
+% buildah-manifest-exists(1)
+
+## NAME
+buildah\-manifest\-exists - Check if the given manifest list exists in local storage
+
+## SYNOPSIS
+**buildah manifest exists** *manifest*
+
+## DESCRIPTION
+**buildah manifest exists** checks if a manifest list exists in local storage. Buildah will
+return an exit code of `0` when the manifest list is found. A `1` will be returned otherwise.
+An exit code of `125` indicates there was another issue.
+
+
+## OPTIONS
+
+#### **--help**, **-h**
+
+Print usage statement.
+
+## EXAMPLE
+
+Check if a manifest list called `list1` exists (the manifest list does actually exist).
+```
+$ buildah manifest exists list1
+$ echo $?
+0
+$
+```
+
+Check if an manifest called `mylist` exists (the manifest list does not actually exist).
+```
+$ buildah manifest exists mylist
+$ echo $?
+1
+$
+```
+
+## SEE ALSO
+**[buildah(1)](buildah.1.md)**, **[buildah-manifest(1)](buildah-manifest.1.md)**
diff --git a/docs/buildah-manifest-inspect.1.md b/docs/buildah-manifest-inspect.1.md
new file mode 100644
index 0000000..8bdf3d3
--- /dev/null
+++ b/docs/buildah-manifest-inspect.1.md
@@ -0,0 +1,37 @@
+# buildah-manifest-inspect "1" "September 2019" "buildah"
+
+## NAME
+
+buildah\-manifest\-inspect - Display a manifest list or image index.
+
+## SYNOPSIS
+
+**buildah manifest inspect** *listNameOrIndexName*
+
+## DESCRIPTION
+
+Displays the manifest list or image index stored using the specified image name.
+
+## RETURN VALUE
+
+A formatted JSON representation of the manifest list or image index.
+
+## OPTIONS
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `buildah login`.
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+## EXAMPLE
+
+```
+buildah manifest inspect mylist:v1.11
+```
+
+## SEE ALSO
+buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-push(1), buildah-rmi(1)
diff --git a/docs/buildah-manifest-push.1.md b/docs/buildah-manifest-push.1.md
new file mode 100644
index 0000000..8b68405
--- /dev/null
+++ b/docs/buildah-manifest-push.1.md
@@ -0,0 +1,101 @@
+# buildah-manifest-push "1" "September 2019" "buildah"
+
+## NAME
+
+buildah\-manifest\-push - Push a manifest list or image index to a registry.
+
+## SYNOPSIS
+
+**buildah manifest push** [options...] *listNameOrIndexName* *transport:details*
+
+## DESCRIPTION
+
+Pushes a manifest list or image index to a registry.
+
+## RETURN VALUE
+
+The list image's ID and the digest of the image's manifest.
+
+## OPTIONS
+
+**--add-compression** *compression*
+
+Makes sure that requested compression variant for each platform is added to the manifest list keeping original instance
+intact in the same manifest list. Supported values are (`gzip`, `zstd` and `zstd:chunked`)
+
+Note: This is different than `--compression` which replaces the instance with requested with specified compression
+while `--add-compression` makes sure than each instance has it variant added to manifest list without modifying the
+original instance.
+
+**--all**
+
+Push the images mentioned in the manifest list or image index, in addition to
+the list or index itself.
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `buildah login`.
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--compression-format** *format*
+
+Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
+
+**--compression-level** *level*
+
+Specify the compression level used with the compression.
+
+Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--digestfile** *Digestfile*
+
+After copying the image, write the digest of the resulting image to the file.
+
+**--force-compression**
+
+If set, push uses the specified compression algorithm even if the destination contains a differently-compressed variant already.
+Defaults to `true` if `--compression-format` is explicitly specified on the command-line, `false` otherwise.
+
+**--format**, **-f**
+
+Manifest list type (oci or v2s2) to use when pushing the list (default is oci).
+
+**--quiet**, **-q**
+
+Don't output progress information when pushing lists.
+
+**--remove-signatures**
+
+Don't copy signatures when pushing images.
+
+**--rm**
+
+Delete the manifest list or image index from local storage if pushing succeeds.
+
+**--sign-by** *fingerprint*
+
+Sign the pushed images using the GPG key that matches the specified fingerprint.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+## EXAMPLE
+
+```
+buildah manifest push mylist:v1.11 registry.example.org/mylist:v1.11
+```
+
+## SEE ALSO
+buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-rmi(1), docker-login(1)
diff --git a/docs/buildah-manifest-remove.1.md b/docs/buildah-manifest-remove.1.md
new file mode 100644
index 0000000..f131a33
--- /dev/null
+++ b/docs/buildah-manifest-remove.1.md
@@ -0,0 +1,27 @@
+# buildah-manifest-remove "1" "September 2019" "buildah"
+
+## NAME
+
+buildah\-manifest\-remove - Remove an image from a manifest list or image index.
+
+## SYNOPSIS
+
+**buildah manifest remove** *listNameOrIndexName* *imageManifestDigest*
+
+## DESCRIPTION
+
+Removes the image with the specified digest from the specified manifest list or image index.
+
+## RETURN VALUE
+
+The list image's ID and the digest of the removed image's manifest.
+
+## EXAMPLE
+
+```
+buildah manifest remove mylist:v1.11 sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
+506d8f4bb54931ea03a7e70173a0ed6302e3fb92dfadb3955ba5c17812e95c51: sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7
+```
+
+## SEE ALSO
+buildah(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1)
diff --git a/docs/buildah-manifest-rm.1.md b/docs/buildah-manifest-rm.1.md
new file mode 100644
index 0000000..c122fda
--- /dev/null
+++ b/docs/buildah-manifest-rm.1.md
@@ -0,0 +1,25 @@
+# buildah-manifest-rm "1" "April 2021" "buildah"
+
+## NAME
+buildah\-manifest\-rm - Removes one or more manifest lists.
+
+## SYNOPSIS
+**buildah manifest rm** [*listNameOrIndexName* ...]
+
+## DESCRIPTION
+Removes one or more locally stored manifest lists.
+
+## EXAMPLE
+
+buildah manifest rm <list>
+
+buildah manifest-rm listID1 listID2
+
+**storage.conf** (`/etc/containers/storage.conf`)
+
+storage.conf is the storage configuration file for all tools using containers/storage
+
+The storage configuration file specifies all of the available container storage options for tools using shared container storage.
+
+## SEE ALSO
+buildah(1), containers-storage.conf(5), buildah-manifest(1)
diff --git a/docs/buildah-manifest.1.md b/docs/buildah-manifest.1.md
new file mode 100644
index 0000000..a0c6c64
--- /dev/null
+++ b/docs/buildah-manifest.1.md
@@ -0,0 +1,76 @@
+# buildah-manifest "1" "September 2019" "buildah"
+
+## NAME
+buildah-manifest - Create and manipulate manifest lists and image indexes.
+
+## SYNOPSIS
+buildah manifest COMMAND [OPTIONS] [ARG...]
+
+## DESCRIPTION
+The `buildah manifest` command provides subcommands which can be used to:
+
+ * Create a working Docker manifest list or OCI image index.
+ * Add an entry to a manifest list or image index for a specified image.
+ * Add or update information about an entry in a manifest list or image index.
+ * Delete a working container or an image.
+ * Push a manifest list or image index to a registry or other location.
+
+## SUBCOMMANDS
+
+| Command | Man Page | Description |
+| ------- | -------------------------------------------------------------- | --------------------------------------------------------------------------- |
+| add | [buildah-manifest-add(1)](buildah-manifest-add.1.md) | Add an image to a manifest list or image index. |
+| annotate | [buildah-manifest-annotate(1)](buildah-manifest-annotate.1.md) | Add or update information about an image in a manifest list or image index. |
+| create | [buildah-manifest-create(1)](buildah-manifest-create.1.md) | Create a manifest list or image index. |
+| exists | [buildah-manifest-exists(1)](buildah-manifest-exists.1.md) | Check if a manifest list exists in local storage. |
+| inspect | [buildah-manifest-inspect(1)](buildah-manifest-inspect.1.md) | Display the contents of a manifest list or image index. |
+| push | [buildah-manifest-push(1)](buildah-manifest-push.1.md) | Push a manifest list or image index to a registry or other location. |
+| remove | [buildah-manifest-remove(1)](buildah-manifest-remove.1.md) | Remove an image from a manifest list or image index. |
+| rm | [buildah-manifest-rm(1)](buildah-manifest-rm.1.md) | Remove manifest list from local storage. |
+
+
+## EXAMPLES
+
+### Building a multi-arch manifest list from a Containerfile
+
+Assuming the `Containerfile` uses `RUN` instructions, the host needs
+a way to execute non-native binaries. Configuring this is beyond
+the scope of this example. Building a multi-arch manifest list
+`shazam` in parallel across 4-threads can be done like this:
+
+ $ platarch=linux/amd64,linux/ppc64le,linux/arm64,linux/s390x
+ $ buildah build --jobs=4 --platform=$platarch --manifest shazam .
+
+**Note:** The `--jobs` argument is optional, and the `-t` or `--tag`
+option should *not* be used.
+
+### Assembling a multi-arch manifest from separately built images
+
+Assuming `example.com/example/shazam:$arch` images are built separately
+on other hosts and pushed to the `example.com` registry. They may
+be combined into a manifest list, and pushed using a simple loop:
+
+ $ REPO=example.com/example/shazam
+ $ buildah manifest create $REPO:latest
+ $ for IMGTAG in amd64 s390x ppc64le arm64; do \
+ buildah manifest add $REPO:latest docker://$REPO:IMGTAG; \
+ done
+ $ buildah manifest push --all $REPO:latest
+
+**Note:** The `add` instruction argument order is `<manifest>` then `<image>`.
+Also, the `--all` push option is required to ensure all contents are
+pushed, not just the native platform/arch.
+
+### Removing and tagging a manifest list before pushing
+
+Special care is needed when removing and pushing manifest lists, as opposed
+to the contents. You almost always want to use the `manifest rm` and
+`manifest push --all` subcommands. For example, a rename and push could
+be performed like this:
+
+ $ buildah tag localhost/shazam example.com/example/shazam
+ $ buildah manifest rm localhost/shazam
+ $ buildah manifest push --all example.com/example/shazam
+
+## SEE ALSO
+buildah(1), buildah-manifest-create(1), buildah-manifest-add(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-manifest-rm(1)
diff --git a/docs/buildah-mkcw.1.md b/docs/buildah-mkcw.1.md
new file mode 100644
index 0000000..2733327
--- /dev/null
+++ b/docs/buildah-mkcw.1.md
@@ -0,0 +1,78 @@
+# buildah-mkcw "1" "July 2023" "buildah"
+
+## NAME
+buildah\-mkcw - Convert a conventional container image into a confidential workload image.
+
+## SYNOPSIS
+**buildah mkcw** [*options*] *source* *destination*
+
+## DESCRIPTION
+Converts the contents of a container image into a new container image which is
+suitable for use in a trusted execution environment (TEE), typically run using
+krun (i.e., crun built with the libkrun feature enabled and invoked as *krun*).
+Instead of the conventional contents, the root filesystem of the created image
+will contain an encrypted disk image and configuration information for krun.
+
+## source
+A container image, stored locally or in a registry
+
+## destination
+A container image, stored locally or in a registry
+
+## OPTIONS
+
+**--attestation-url**, **-u** *url*
+The location of a key broker / attestation server.
+If a value is specified, the new image's workload ID, along with the passphrase
+used to encrypt the disk image, will be registered with the server, and the
+server's location will be stored in the container image.
+At run-time, krun is expected to contact the server to retrieve the passphrase
+using the workload ID, which is also stored in the container image.
+If no value is specified, a *passphrase* value *must* be specified.
+
+**--base-image**, **-b** *image*
+An alternate image to use as the base for the output image. By default,
+the *scratch* non-image is used.
+
+**--cpus**, **-c** *number*
+The number of virtual CPUs which the image expects to be run with at run-time.
+If not specified, a default value will be supplied.
+
+**--firmware-library**, **-f** *file*
+The location of the libkrunfw-sev shared library. If not specified, `buildah`
+checks for its presence in a number of hard-coded locations.
+
+**--memory**, **-m** *number*
+The amount of memory which the image expects to be run with at run-time, as a
+number of megabytes. If not specified, a default value will be supplied.
+
+**--passphrase**, **-p** *text*
+The passphrase to use to encrypt the disk image which will be included in the
+container image.
+If no value is specified, but an *--attestation-url* value is specified, a
+randomly-generated passphrase will be used.
+The authors recommend setting an *--attestation-url* but not a *--passphrase*.
+
+**--slop**, **-s** *{percentage%|sizeKB|sizeMB|sizeGB}*
+Extra space to allocate for the disk image compared to the size of the
+container image's contents, expressed either as a percentage (..%) or a size
+value (bytes, or larger units if suffixes like KB or MB are present), or a sum
+of two or more such specifications. If not specified, `buildah` guesses that
+25% more space than the contents will be enough, but this option is provided in
+case its guess is wrong. If the specified or computed size is less than 10
+megabytes, it will be increased to 10 megabytes.
+
+**--type**, **-t** {SEV|SNP}
+The type of trusted execution environment (TEE) which the image should be
+marked for use with. Accepted values are "SEV" (AMD Secure Encrypted
+Virtualization - Encrypted State) and "SNP" (AMD Secure Encrypted
+Virtualization - Secure Nested Paging). If not specified, defaults to "SNP".
+
+**--workload-id**, **-w** *id*
+A workload identifier which will be recorded in the container image, to be used
+at run-time for retrieving the passphrase which was used to encrypt the disk
+image. If not specified, a semi-random value will be derived from the base
+image's image ID.
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-mount.1.md b/docs/buildah-mount.1.md
new file mode 100644
index 0000000..7d5031b
--- /dev/null
+++ b/docs/buildah-mount.1.md
@@ -0,0 +1,66 @@
+# buildah-mount "1" "March 2017" "buildah"
+
+## NAME
+buildah\-mount - Mount a working container's root filesystem.
+
+## SYNOPSIS
+**buildah mount** [*container* ...]
+
+## DESCRIPTION
+Mounts the specified container's root file system in a location which can be
+accessed from the host, and returns its location.
+
+If the mount command is invoked without any arguments, the tool will list all of the currently mounted containers.
+
+When running in rootless mode, mount runs in a different namespace so
+that the mounted volume might not be accessible from the host when
+using a driver different than `vfs`. To be able to access the file
+system mounted, you might need to create the mount namespace
+separately as part of `buildah unshare`. In the environment created
+with `buildah unshare` you can then use `buildah mount` and have
+access to the mounted file system.
+
+## RETURN VALUE
+The location of the mounted file system. On error an empty string and errno is
+returned.
+
+## OPTIONS
+
+**--json**
+
+Output in JSON format.
+
+## EXAMPLE
+
+```
+buildah mount working-container
+/var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
+```
+
+```
+buildah mount
+working-container /var/lib/containers/storage/overlay2/f3ac502d97b5681989dff84dfedc8354239bcecbdc2692f9a639f4e080a02364/merged
+fedora-working-container /var/lib/containers/storage/overlay2/0ff7d7ca68bed1ace424f9df154d2dd7b5a125c19d887f17653cbcd5b6e30ba1/merged
+```
+
+```
+buildah mount working-container fedora-working-container ubi8-working-container
+working-container /var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
+fedora-working-container /var/lib/containers/storage/overlay/c3ec418be5bda5b72dca74c4d397e05829fe62ecd577dd7518b5f7fc1ca5f491/merged
+ubi8-working-container /var/lib/containers/storage/overlay/03a071f206f70f4fcae5379bd5126be86b5352dc2a0c3449cd6fca01b77ea868/merged
+```
+
+If running in rootless mode, you need to do a buildah unshare first to use
+the mount point.
+```
+$ buildah unshare
+# buildah mount working-container
+/var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
+# cp foobar /var/lib/containers/storage/overlay/f8cac5cce73e5102ab321cc5b57c0824035b5cb82b6822e3c86ebaff69fefa9c/merged
+# buildah unmount working-container
+# exit
+$ buildah commit working-container newimage
+```
+
+## SEE ALSO
+buildah(1), buildah-unshare(1), buildah-umount(1)
diff --git a/docs/buildah-prune.1.md b/docs/buildah-prune.1.md
new file mode 100644
index 0000000..5c5cfb3
--- /dev/null
+++ b/docs/buildah-prune.1.md
@@ -0,0 +1,33 @@
+# buildah-rmi "1" "Jan 2023" "buildah"
+
+## NAME
+
+buildah\-prune - Cleanup intermediate images as well as build and mount cache.
+
+## SYNOPSIS
+
+**buildah prune**
+
+## DESCRIPTION
+
+Cleanup intermediate images as well as build and mount cache.
+
+## OPTIONS
+
+**--all**, **-a**
+
+All local images will be removed from the system that do not have containers using the image as a reference image.
+
+**--force**, **-f**
+
+This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
+
+## EXAMPLE
+
+buildah prune
+
+buildah prune --force
+
+## SEE ALSO
+
+buildah(1), containers-registries.conf(5), containers-storage.conf(5)
diff --git a/docs/buildah-pull.1.md b/docs/buildah-pull.1.md
new file mode 100644
index 0000000..b05907e
--- /dev/null
+++ b/docs/buildah-pull.1.md
@@ -0,0 +1,162 @@
+# buildah-pull "1" "July 2018" "buildah"
+
+## NAME
+buildah\-pull - Pull an image from a registry.
+
+## SYNOPSIS
+**buildah pull** [*options*] *image*
+
+## DESCRIPTION
+Pulls an image based upon the specified input. It supports all transports from `containers-transports(5)` (see examples below). If no transport is specified, the input is subject to short-name resolution (see `containers-registries.conf(5)`) and the `docker` (i.e., container registry) transport is used.
+
+### DEPENDENCIES
+
+Buildah resolves the path to the registry to pull from by using the /etc/containers/registries.conf
+file, containers-registries.conf(5). If the `buildah pull` command fails with an "image not known" error,
+first verify that the registries.conf file is installed and configured appropriately.
+
+## RETURN VALUE
+The image ID of the image that was pulled. On error 1 is returned.
+
+## OPTIONS
+
+**--all-tags**, **-a**
+
+All tagged images in the repository will be pulled.
+
+**--arch**="ARCH"
+
+Set the ARCH of the image to be pulled to the provided value instead of using the architecture of the host. (Examples: arm, arm64, 386, amd64, ppc64le, s390x)
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--decryption-key** *key[:passphrase]*
+
+The [key[:passphrase]] to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
+
+**--os**="OS"
+
+Set the OS of the image to be pulled instead of using the current operating system of the host.
+
+**--platform**="OS/ARCH[/VARIANT]"
+
+Set the OS/ARCH of the image to be pulled
+to the provided value instead of using the current operating system and
+architecture of the host (for example `linux/arm`).
+
+OS/ARCH pairs are those used by the Go Programming Language. In several cases
+the ARCH value for a platform differs from one produced by other tools such as
+the `arch` command. Valid OS and architecture name combinations are listed as
+values for $GOOS and $GOARCH at https://golang.org/doc/install/source#environment,
+and can also be found by running `go tool dist list`.
+
+**NOTE:** The `--platform` option may not be used in combination with the `--arch`, `--os`, or `--variant` options.
+
+**--policy**=**always**|**missing**|**never**|**newer**
+
+Pull image policy. The default is **missing**.
+
+- **always**: Always pull the image and throw an error if the pull fails.
+- **missing**: Pull the image only if it could not be found in the local containers storage. Throw an error if no image could be found and the pull fails.
+- **never**: Never pull the image but use the one from the local containers storage. Throw an error if no image could be found.
+- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
+
+**--quiet**, **-q**
+
+If an image needs to be pulled from the registry, suppress progress output.
+
+**--remove-signatures**
+
+Don't copy signatures when pulling images.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing pull of images from registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing pull of images from registry.
+
+Defaults to `2s`.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+**--variant**=""
+
+Set the architecture variant of the image to be pulled.
+
+## EXAMPLE
+
+buildah pull imagename
+
+buildah pull docker://myregistry.example.com/imagename
+
+buildah pull docker-daemon:imagename:imagetag
+
+buildah pull docker-archive:filename
+
+buildah pull oci-archive:filename
+
+buildah pull dir:directoryname
+
+buildah pull --tls-verify=false myregistry/myrepository/imagename:imagetag
+
+buildah pull --creds=myusername:mypassword --cert-dir ~/auth myregistry/myrepository/imagename:imagetag
+
+buildah pull --authfile=/tmp/auths/myauths.json myregistry/myrepository/imagename:imagetag
+
+buildah pull --arch=aarch64 myregistry/myrepository/imagename:imagetag
+
+buildah pull --arch=arm --variant=v7 myregistry/myrepository/imagename:imagetag
+
+## ENVIRONMENT
+
+**BUILD\_REGISTRY\_SOURCES**
+
+BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
+lists of registry names under the keys `insecureRegistries`,
+`blockedRegistries`, and `allowedRegistries`.
+
+When pulling an image from a registry, if the name of the registry matches any
+of the items in the `blockedRegistries` list, the image pull attempt is denied.
+If there are registries in the `allowedRegistries` list, and the registry's
+name is not in the list, the pull attempt is denied.
+
+**TMPDIR**
+The TMPDIR environment variable allows the user to specify where temporary files
+are stored while pulling and pushing images. Defaults to '/var/tmp'.
+
+## FILES
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**policy.json** (`/etc/containers/policy.json`)
+
+Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
+
+## SEE ALSO
+buildah(1), buildah-from(1), buildah-login(1), docker-login(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5)
diff --git a/docs/buildah-push.1.md b/docs/buildah-push.1.md
new file mode 100644
index 0000000..9b173a6
--- /dev/null
+++ b/docs/buildah-push.1.md
@@ -0,0 +1,184 @@
+# buildah-push "1" "June 2017" "buildah"
+
+## NAME
+buildah\-push - Push an image, manifest list or image index from local storage to elsewhere.
+
+## SYNOPSIS
+**buildah push** [*options*] *image* [*destination*]
+
+## DESCRIPTION
+Pushes an image from local storage to a specified destination, decompressing
+and recompessing layers as needed.
+
+## imageID
+Image stored in local container/storage
+
+## DESTINATION
+
+ DESTINATION is the location the container image is pushed to. It supports all transports from `containers-transports(5)` (see examples below). If no transport is specified, the `docker` (i.e., container registry) transport is used.
+
+## OPTIONS
+
+**--all**
+
+If specified image is a manifest list or image index, push the images in addition to
+the list or index itself.
+
+**--authfile** *path*
+
+Path of the authentication file. Default is ${XDG_\RUNTIME\_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.
+
+If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
+
+Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
+environment variable. `export REGISTRY_AUTH_FILE=path`
+
+**--cert-dir** *path*
+
+Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
+The default certificates directory is _/etc/containers/certs.d_.
+
+**--compression-format** *format*
+
+Specifies the compression format to use. Supported values are: `gzip`, `zstd` and `zstd:chunked`.
+
+**--compression-level** *level*
+
+Specify the compression level used with the compression.
+
+Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--digestfile** *Digestfile*
+
+After copying the image, write the digest of the resulting image to the file.
+
+**--disable-compression**, **-D**
+
+Don't compress copies of filesystem layers which will be pushed.
+
+**--encrypt-layer** *layer(s)*
+
+Layer(s) to encrypt: 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified.
+
+**--encryption-key** *key*
+
+The [protocol:keyfile] specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
+
+**--force-compression**
+
+If set, push uses the specified compression algorithm even if the destination contains a differently-compressed variant already.
+Defaults to `true` if `--compression-format` is explicitly specified on the command-line, `false` otherwise.
+
+**--format**, **-f**
+
+Manifest Type (oci, v2s2, or v2s1) to use when pushing an image. (default is manifest type of the source image, with fallbacks)
+
+**--quiet**, **-q**
+
+When writing the output image, suppress progress output.
+
+**--remove-signatures**
+
+Don't copy signatures when pushing images.
+
+**--retry** *attempts*
+
+Number of times to retry in case of failure when performing push of images to registry.
+
+Defaults to `3`.
+
+**--retry-delay** *duration*
+
+Duration of delay between retry attempts in case of failure when performing push of images to registry.
+
+Defaults to `2s`.
+
+**--rm**
+
+When pushing a manifest list or image index, delete them from local storage if pushing succeeds.
+
+**--sign-by** *fingerprint*
+
+Sign the pushed image using the GPG key that matches the specified fingerprint.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
+
+## EXAMPLE
+
+This example pushes the image specified by the imageID to a local directory in docker format.
+
+ `# buildah push imageID dir:/path/to/image`
+
+This example pushes the image specified by the imageID to a local directory in oci format.
+
+ `# buildah push imageID oci:/path/to/layout:image:tag`
+
+This example pushes the image specified by the imageID to a tar archive in oci format.
+
+ `# buildah push imageID oci-archive:/path/to/archive:image:tag`
+
+This example pushes the image specified by the imageID to a container registry named registry.example.com.
+
+ `# buildah push imageID docker://registry.example.com/repository:tag`
+
+This example pushes the image specified by the imageID to a container registry named registry.example.com and saves the digest in the specified digestfile.
+
+ `# buildah push --digestfile=/tmp/mydigest imageID docker://registry.example.com/repository:tag`
+
+This example works like **docker push**, assuming *registry.example.com/my_image* is a local image.
+
+ `# buildah push registry.example.com/my_image`
+
+This example pushes the image specified by the imageID to a private container registry named registry.example.com with authentication from /tmp/auths/myauths.json.
+
+ `# buildah push --authfile /tmp/auths/myauths.json imageID docker://registry.example.com/repository:tag`
+
+This example pushes the image specified by the imageID and puts it into the local docker container store.
+
+ `# buildah push imageID docker-daemon:image:tag`
+
+This example pushes the image specified by the imageID and puts it into the registry on the localhost while turning off tls verification.
+ `# buildah push --tls-verify=false imageID localhost:5000/my-imageID`
+
+This example pushes the image specified by the imageID and puts it into the registry on the localhost using credentials and certificates for authentication.
+ `# buildah push --cert-dir ~/auth --tls-verify=true --creds=username:password imageID localhost:5000/my-imageID`
+
+## ENVIRONMENT
+
+**BUILD\_REGISTRY\_SOURCES**
+
+BUILD\_REGISTRY\_SOURCES, if set, is treated as a JSON object which contains
+lists of registry names under the keys `insecureRegistries`,
+`blockedRegistries`, and `allowedRegistries`.
+
+When pushing an image to a registry, if the portion of the destination image
+name that corresponds to a registry is compared to the items in the
+`blockedRegistries` list, and if it matches any of them, the push attempt is
+denied. If there are registries in the `allowedRegistries` list, and the
+portion of the name that corresponds to the registry is not in the list, the
+push attempt is denied.
+
+**TMPDIR**
+The TMPDIR environment variable allows the user to specify where temporary files
+are stored while pulling and pushing images. Defaults to '/var/tmp'.
+
+## FILES
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**policy.json** (`/etc/containers/policy.json`)
+
+Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images.
+
+## SEE ALSO
+buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1)
diff --git a/docs/buildah-rename.1.md b/docs/buildah-rename.1.md
new file mode 100644
index 0000000..e103715
--- /dev/null
+++ b/docs/buildah-rename.1.md
@@ -0,0 +1,19 @@
+# buildah-rename "1" "July 2018" "buildah"
+
+## NAME
+buildah\-rename - Rename a local container.
+
+## SYNOPSIS
+**buildah rename** *container* *new-name*
+
+## DESCRIPTION
+Rename a local container.
+
+## EXAMPLE
+
+buildah rename containerName NewName
+
+buildah rename containerID NewName
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-rm.1.md b/docs/buildah-rm.1.md
new file mode 100644
index 0000000..876e5fa
--- /dev/null
+++ b/docs/buildah-rm.1.md
@@ -0,0 +1,27 @@
+# buildah-rm "1" "March 2017" "buildah"
+
+## NAME
+buildah\-rm - Removes one or more working containers.
+
+## SYNOPSIS
+**buildah rm** [*container* ...]
+
+## DESCRIPTION
+Removes one or more working containers, unmounting them if necessary.
+
+## OPTIONS
+
+**--all**, **-a**
+
+All Buildah containers will be removed. Buildah containers are denoted with an '*' in the 'BUILDER' column listed by the command 'buildah containers'.A container name or id cannot be provided when this option is used.
+
+## EXAMPLE
+
+buildah rm containerID
+
+buildah rm containerID1 containerID2 containerID3
+
+buildah rm --all
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-rmi.1.md b/docs/buildah-rmi.1.md
new file mode 100644
index 0000000..5746e49
--- /dev/null
+++ b/docs/buildah-rmi.1.md
@@ -0,0 +1,77 @@
+# buildah-rmi "1" "March 2017" "buildah"
+
+## NAME
+
+buildah\-rmi - Removes one or more images.
+
+## SYNOPSIS
+
+**buildah rmi** [*image* ...]
+
+## DESCRIPTION
+
+Removes one or more locally stored images.
+Passing an argument _image_ deletes it, along with any of its dangling (untagged) parent images.
+
+## LIMITATIONS
+
+* If the image was pushed to a directory path using the 'dir:' transport,
+ the rmi command can not remove the image. Instead, standard file system
+ commands should be used.
+
+* If _imageID_ is a name, but does not include a registry name, buildah will
+ attempt to find and remove the named image using the registry name _localhost_,
+ if no such image is found, it will search for the intended image by attempting
+ to expand the given name using the names of registries provided in the system's
+ registries configuration file, registries.conf.
+
+* If the _imageID_ refers to a *manifest list* or *image index*, this command
+ will ***not*** do what you expect! This command will remove the images
+ associated with the *manifest list* or *index* (not the manifest list/image index
+ itself). To remove that, use the `buildah manifest rm` subcommand instead.
+
+## OPTIONS
+
+**--all**, **-a**
+
+All local images will be removed from the system that do not have containers using the image as a reference image.
+An image name or id cannot be provided when this option is used. Read/Only images configured by modifying the "additionalimagestores" in the /etc/containers/storage.conf file, can not be removed.
+
+**--force**, **-f**
+
+This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
+
+**--prune**, **-p**
+
+All local images will be removed from the system that do not have a tag and do not have a child image pointing to them.
+An image name or id cannot be provided when this option is used.
+
+## EXAMPLE
+
+buildah rmi imageID
+
+buildah rmi --all
+
+buildah rmi --all --force
+
+buildah rmi --prune
+
+buildah rmi --force imageID
+
+buildah rmi imageID1 imageID2 imageID3
+
+## Files
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**storage.conf** (`/etc/containers/storage.conf`)
+
+storage.conf is the storage configuration file for all tools using containers/storage
+
+The storage configuration file specifies all of the available container storage options for tools using shared container storage.
+
+## SEE ALSO
+
+buildah(1), containers-registries.conf(5), containers-storage.conf(5)
diff --git a/docs/buildah-run.1.md b/docs/buildah-run.1.md
new file mode 100644
index 0000000..d96ddef
--- /dev/null
+++ b/docs/buildah-run.1.md
@@ -0,0 +1,403 @@
+# buildah-run "1" "March 2017" "buildah"
+
+## NAME
+buildah\-run - Run a command inside of the container.
+
+## SYNOPSIS
+**buildah run** [*options*] [**--**] *container* *command*
+
+## DESCRIPTION
+Launches a container and runs the specified command in that container using the
+container's root filesystem as a root filesystem, using configuration settings
+inherited from the container's image or as specified using previous calls to
+the *buildah config* command. To execute *buildah run* within an
+interactive shell, specify the --tty option.
+
+## OPTIONS
+
+**--add-history**
+
+Add an entry to the history which will note what command is being invoked.
+Defaults to false.
+
+Note: You can also override the default value of --add-history by setting the
+BUILDAH\_HISTORY environment variable. `export BUILDAH_HISTORY=true`
+
+**--cap-add**=*CAP\_xxx*
+
+Add the specified capability to the set of capabilities which will be granted
+to the specified command.
+Certain capabilities are granted by default; this option can be used to add
+more beyond the defaults, which may have been modified by **--cap-add** and
+**--cap-drop** options used with the *buildah from* invocation which created
+the container.
+
+**--cap-drop**=*CAP\_xxx*
+
+Drop the specified capability from the set of capabilities which will be granted
+to the specified command.
+The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER,
+CAP\_FSETID, CAP\_KILL, CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP,
+CAP\_SETGID, CAP\_SETPCAP, and CAP\_SETUID capabilities are
+granted by default; this option can be used to remove them from the defaults,
+which may have been modified by **--cap-add** and **--cap-drop** options used
+with the *buildah from* invocation which created the container. The list of default capabilities is managed in containers.conf(5).
+
+If a capability is specified to both the **--cap-add** and **--cap-drop**
+options, it will be dropped, regardless of the order in which the options were
+given.
+
+**--cgroupns** *how*
+
+Sets the configuration for the cgroup namespaces for the container.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new cgroup namespace should be created, or it can be "host" to indicate
+that the cgroup namespace in which `buildah` itself is being run should be reused.
+
+**--contextdir** *directory*
+
+Allows setting context directory for current RUN invocation. Specifying a context
+directory causes RUN context to consider context directory as root directory for
+specified source in `--mount` of type 'bind'.
+
+**--env**, **-e** *env=value*
+
+Temporarily add a value (e.g. env=*value*) to the environment for the running
+process. Unlike `buildah config --env`, the environment will not persist to
+later calls to `buildah run` or to the built image. Can be used multiple times.
+
+**--hostname**
+
+Set the hostname inside of the running container.
+
+**--ipc** *how*
+
+Sets the configuration for the IPC namespaces for the container.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new IPC namespace should be created, or it can be "host" to indicate
+that the IPC namespace in which `buildah` itself is being run should be reused,
+or it can be the path to an IPC namespace which is already in use by another
+process.
+
+**--isolation** *type*
+
+Controls what type of isolation is used for running the process. Recognized
+types include *oci* (OCI-compatible runtime, the default), *rootless*
+(OCI-compatible runtime invoked using a modified configuration, with
+*--no-new-keyring* added to its *create* invocation, reusing the host's network
+and UTS namespaces, and creating private IPC, PID, mount, and user namespaces;
+the default for unprivileged users), and *chroot* (an internal wrapper that
+leans more toward chroot(1) than container technology, reusing the host's
+control group, network, IPC, and PID namespaces, and creating private mount and
+UTS namespaces, and creating user namespaces only when they're required for ID
+mapping).
+
+Note: You can also override the default isolation type by setting the
+BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci`
+
+**--mount**=*type=TYPE,TYPE-SPECIFIC-OPTION[,...]*
+
+Attach a filesystem mount to the container
+
+Current supported mount TYPES are bind, cache, secret and tmpfs. <sup>[[1]](#Footnote1)</sup>
+
+ e.g.
+
+ type=bind,source=/path/on/host,destination=/path/in/container
+
+ type=tmpfs,tmpfs-size=512M,destination=/path/in/container
+
+ type=cache,target=/path/in/container
+
+ Common Options:
+
+ · src, source: mount source spec for bind and volume. Mandatory for bind. If `from` is specified, `src` is the subpath in the `from` field.
+
+ · dst, destination, target: mount destination spec.
+
+ · ro, read-only: true or false (default).
+
+ Options specific to bind:
+
+ · bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2).
+
+ . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive.
+
+ · from: stage or image name for the root of the source. Defaults to the build context.
+
+ · z: Set shared SELinux label on mounted destination. Use if SELinux is enabled on host machine.
+
+ · Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
+
+ Options specific to tmpfs:
+
+ · tmpfs-size: Size of the tmpfs mount in bytes. Unlimited by default in Linux.
+
+ · tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+
+ · tmpcopyup: Path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
+
+ Options specific to secret:
+
+ · id: the identifier for the secret passed into the `buildah bud --secret` or `podman build --secret` command.
+
+ Options specific to cache:
+
+ · id: Create a separate cache directory for a particular id.
+
+ · mode: File mode for new cache directory in octal. Default 0755.
+
+ · ro, readonly: read only cache if set.
+
+ · uid: uid for cache directory.
+
+ · gid: gid for cache directory.
+
+ · from: stage name for the root of the source. Defaults to host cache directory.
+
+ · z: Set shared SELinux label on mounted destination. Enabled by default if SELinux is enabled on the host machine.
+
+ · Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
+
+**--network**, **--net**=*mode*
+
+Sets the configuration for the network namespace for the container.
+
+Valid _mode_ values are:
+
+- **none**: no networking. Invalid if using **--dns**, **--dns-opt**, or **--dns-search**;
+- **host**: use the host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure;
+- **ns:**_path_: path to a network namespace to join;
+- **private**: create a new namespace for the container (default)
+- **\<network name|ID\>**: Join the network with the given name or ID, e.g. use `--network mynet` to join the network with the name mynet. Only supported for rootful users.
+- **slirp4netns[:OPTIONS,...]**: use **slirp4netns**(1) to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options, they can also be set with `network_cmd_options` in containers.conf:
+ - **allow_host_loopback=true|false**: Allow slirp4netns to reach the host loopback IP (default is 10.0.2.2 or the second IP from slirp4netns cidr subnet when changed, see the cidr option below). The default is false.
+ - **mtu=MTU**: Specify the MTU to use for this network. (Default is `65520`).
+ - **cidr=CIDR**: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
+ - **enable_ipv6=true|false**: Enable IPv6. Default is true. (Required for `outbound_addr6`).
+ - **outbound_addr=INTERFACE**: Specify the outbound interface slirp binds to (ipv4 traffic only).
+ - **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp binds to.
+ - **outbound_addr6=INTERFACE**: Specify the outbound interface slirp binds to (ipv6 traffic only).
+ - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp binds to.
+- **pasta[:OPTIONS,...]**: use **pasta**(1) to create a user-mode networking
+ stack. \
+ This is only supported in rootless mode. \
+ By default, IPv4 and IPv6 addresses and routes, as well as the pod interface
+ name, are copied from the host. If port forwarding isn't configured, ports
+ are forwarded dynamically as services are bound on either side (init
+ namespace or container namespace). Port forwarding preserves the original
+ source IP address. Options described in pasta(1) can be specified as
+ comma-separated arguments. \
+ In terms of pasta(1) options, **--config-net** is given by default, in
+ order to configure networking when the container is started, and
+ **--no-map-gw** is also assumed by default, to avoid direct access from
+ container to host using the gateway address. The latter can be overridden
+ by passing **--map-gw** in the pasta-specific options (despite not being an
+ actual pasta(1) option). \
+ Also, **-t none** and **-u none** are passed to disable
+ automatic port forwarding based on bound ports. Similarly, **-T none** and
+ **-U none** are given to disable the same functionality from container to
+ host. \
+ Some examples:
+ - **pasta:--map-gw**: Allow the container to directly reach the host using the
+ gateway address.
+ - **pasta:--mtu,1500**: Specify a 1500 bytes MTU for the _tap_ interface in
+ the container.
+ - **pasta:--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,-m,1500,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options: disable IPv6, assign
+ `10.0.2.0/24` to the `tap0` interface in the container, with gateway
+ `10.0.2.3`, enable DNS forwarder reachable at `10.0.2.3`, set MTU to 1500
+ bytes, disable NDP, DHCPv6 and DHCP support.
+ - **pasta:-I,tap0,--ipv4-only,-a,10.0.2.0,-n,24,-g,10.0.2.2,--dns-forward,10.0.2.3,--no-ndp,--no-dhcpv6,--no-dhcp**,
+ equivalent to default slirp4netns(1) options with Podman overrides: same as
+ above, but leave the MTU to 65520 bytes
+ - **pasta:-t,auto,-u,auto,-T,auto,-U,auto**: enable automatic port forwarding
+ based on observed bound ports from both host and container sides
+ - **pasta:-T,5201**: enable forwarding of TCP port 5201 from container to
+ host, using the loopback interface instead of the tap interface for improved
+ performance
+
+**--no-hostname**
+
+Do not create the _/etc/hostname_ file in the container for RUN instructions.
+
+By default, Buildah manages the _/etc/hostname_ file, adding the container's own hostname. When the **--no-hostname** option is set, the image's _/etc/hostname_ will be preserved unmodified if it exists.
+
+**--no-hosts**
+
+Do not create the _/etc/hosts_ file in the container for RUN instructions.
+
+By default, Buildah manages _/etc/hosts_, adding the container's own IP address.
+**--no-hosts** disables this, and the image's _/etc/hosts_ will be preserved unmodified.
+
+**--no-pivot**
+
+Do not use pivot root to jail process inside rootfs. This should be used
+whenever the rootfs is on top of a ramdisk.
+
+Note: You can make this option the default by setting the BUILDAH\_NOPIVOT
+environment variable. `export BUILDAH_NOPIVOT=true`
+
+**--pid** *how*
+
+Sets the configuration for the PID namespace for the container.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new PID namespace should be created, or it can be "host" to indicate
+that the PID namespace in which `buildah` itself is being run should be reused,
+or it can be the path to a PID namespace which is already in use by another
+process.
+
+**--runtime** *path*
+
+The *path* to an alternate OCI-compatible runtime. Default is `runc`, or `crun` when machine is configured to use cgroups V2.
+
+Note: You can also override the default runtime by setting the BUILDAH\_RUNTIME
+environment variable. `export BUILDAH_RUNTIME=/usr/bin/crun`
+
+**--runtime-flag** *flag*
+
+Adds global flags for the container runtime. To list the supported flags, please
+consult the manpages of the selected container runtime.
+Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
+to buildah run, the option given would be `--runtime-flag log-format=json`.
+
+**--tty**, **--terminal**, **-t**
+
+By default a pseudo-TTY is allocated only when buildah's standard input is
+attached to a pseudo-TTY. Setting the `--tty` option to `true` will cause a
+pseudo-TTY to be allocated inside the container connecting the user's "terminal"
+with the stdin and stdout stream of the container. Setting the `--tty` option to
+`false` will prevent the pseudo-TTY from being allocated.
+
+**--user** *user*[:*group*]
+
+Set the *user* to be used for running the command in the container.
+The user can be specified as a user name
+or UID, optionally followed by a group name or GID, separated by a colon (':').
+If names are used, the container should include entries for those names in its
+*/etc/passwd* and */etc/group* files.
+
+**--uts** *how*
+
+Sets the configuration for the UTS namespace for the container.
+The configured value can be "" (the empty string) or "private" to indicate
+that a new UTS namespace should be created, or it can be "host" to indicate
+that the UTS namespace in which `buildah` itself is being run should be reused,
+or it can be the path to a UTS namespace which is already in use by another
+process.
+
+**--volume**, **-v** *source*:*destination*:*options*
+
+Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah
+bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah
+container. The `OPTIONS` are a comma delimited list and can be: <sup>[[1]](#Footnote1)</sup>
+
+ * [rw|ro]
+ * [U]
+ * [z|Z]
+ * [`[r]shared`|`[r]slave`|`[r]private`]
+
+The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
+must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the
+path you specify. For example, if you supply `/foo` as the host path,
+Buildah copies the contents of `/foo` to the container filesystem on the host
+and bind mounts that into the container.
+
+You can specify multiple **-v** options to mount one or more mounts to a
+container.
+
+ `Write Protected Volume Mounts`
+
+You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
+read-write mode, respectively. By default, the volumes are mounted read-write.
+See examples.
+
+ `Chowning Volume Mounts`
+
+By default, Buildah does not change the owner and group of source volume directories mounted into containers. If a container is created in a new user namespace, the UID and GID in the container may correspond to another UID and GID on the host.
+
+The `:U` suffix tells Buildah to use the correct host UID and GID based on the UID and GID within the container, to change the owner and group of the source volume.
+
+ `Labeling Volume Mounts`
+
+Labeling systems like SELinux require that proper labels are placed on volume
+content mounted into a container. Without a label, the security system might
+prevent the processes running inside the container from using the content. By
+default, Buildah does not change the labels set by the OS.
+
+To change a label in the container context, you can add either of two suffixes
+`:z` or `:Z` to the volume mount. These suffixes tell Buildah to relabel file
+objects on the shared volumes. The `z` option tells Buildah that two containers
+share the volume content. As a result, Buildah labels the content with a shared
+content label. Shared volume labels allow all containers to read/write content.
+The `Z` option tells Buildah to label the content with a private unshared label.
+Only the current container can use a private volume.
+
+By default bind mounted volumes are `private`. That means any mounts done
+inside container will not be visible on the host and vice versa. This behavior can
+be changed by specifying a volume mount propagation property.
+
+When the mount propagation policy is set to `shared`, any mounts completed inside
+the container on that volume will be visible to both the host and container. When
+the mount propagation policy is set to `slave`, one way mount propagation is enabled
+and any mounts completed on the host for that volume will be visible only inside of the container.
+To control the mount propagation property of the volume use the `:[r]shared`,
+`:[r]slave` or `:[r]private` propagation flag. The propagation property can
+be specified only for bind mounted volumes and not for internal volumes or
+named volumes. For mount propagation to work on the source mount point (the mount point
+where source dir is mounted on) it has to have the right propagation properties. For
+shared volumes, the source mount point has to be shared. And for slave volumes,
+the source mount has to be either shared or slave. <sup>[[1]](#Footnote1)</sup>
+
+Use `df <source-dir>` to determine the source mount and then use
+`findmnt -o TARGET,PROPAGATION <source-mount-dir>` to determine propagation
+properties of source mount, if `findmnt` utility is not available, the source mount point
+can be determined by looking at the mount entry in `/proc/self/mountinfo`. Look
+at `optional fields` and see if any propagation properties are specified.
+`shared:X` means the mount is `shared`, `master:X` means the mount is `slave` and if
+nothing is there that means the mount is `private`. <sup>[[1]](#Footnote1)</sup>
+
+To change propagation properties of a mount point use the `mount` command. For
+example, to bind mount the source directory `/foo` do
+`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This
+will convert /foo into a `shared` mount point. The propagation properties of the source
+mount can be changed directly. For instance if `/` is the source mount for
+`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount.
+
+**--workingdir** *directory*
+
+Temporarily set the working *directory* for the running process. Unlike
+`buildah config --workingdir`, the workingdir will not persist to later
+calls to `buildah run` or the built image.
+
+
+NOTE: End parsing of options with the `--` option, so that other
+options can be passed to the command inside of the container.
+
+## EXAMPLE
+
+buildah run containerID -- ps -auxw
+
+buildah run --hostname myhost containerID -- ps -auxw
+
+buildah run containerID -- sh -c 'echo $PATH'
+
+buildah run --runtime-flag log-format=json containerID /bin/bash
+
+buildah run --runtime-flag debug containerID /bin/bash
+
+buildah run --tty containerID /bin/bash
+
+buildah run --tty=false containerID ls /
+
+buildah run --volume /path/on/host:/path/in/container:ro,z containerID sh
+
+buildah run -v /path/on/host:/path/in/container:z,U containerID sh
+
+buildah run --mount type=bind,src=/tmp/on:host,dst=/in:container,ro containerID sh
+
+## SEE ALSO
+buildah(1), buildah-from(1), buildah-config(1), namespaces(7), pid\_namespaces(7), crun(1), runc(8), containers.conf(5)
+
+## FOOTNOTES
+<a name="Footnote1">1</a>: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately.
diff --git a/docs/buildah-source-add.1.md b/docs/buildah-source-add.1.md
new file mode 100644
index 0000000..e830cc9
--- /dev/null
+++ b/docs/buildah-source-add.1.md
@@ -0,0 +1,21 @@
+# buildah-source-add "1" "March 2021" "buildah"
+
+## NAME
+buildah\-source\-add - Add a source artifact to a source image
+
+## SYNOPSIS
+**buildah source add** [*options*] *path* *artifact*
+
+## DESCRIPTION
+Add add a source artifact to a source image. The artifact will be added as a
+gzip-compressed tar ball. Add attempts to auto-tar and auto-compress only if
+necessary.
+
+Note that the buildah-source command and all its subcommands are experimental
+and may be subject to future changes
+
+## OPTIONS
+
+**--annotation** *key=value*
+
+Add an annotation to the layer descriptor in the source-image manifest. The input format is `key=value`.
diff --git a/docs/buildah-source-create.1.md b/docs/buildah-source-create.1.md
new file mode 100644
index 0000000..524e68f
--- /dev/null
+++ b/docs/buildah-source-create.1.md
@@ -0,0 +1,24 @@
+# buildah-source-create "1" "March 2021" "buildah"
+
+## NAME
+buildah\-source\-create - Create and initialize a source image
+
+## SYNOPSIS
+**buildah source create** [*options*] *path*
+
+## DESCRIPTION
+Create and initialize a source image. A source image is an OCI artifact; an
+OCI image with a custom config media type.
+
+Note that the buildah-source command and all its subcommands are experimental
+and may be subject to future changes
+
+## OPTIONS
+
+**--author** *author*
+
+Set the author of the source image mentioned in the config. By default, no author is set.
+
+**--time-stamp** *bool-value*
+
+Set the created time stamp in the image config. By default, the time stamp is set.
diff --git a/docs/buildah-source-pull.1.md b/docs/buildah-source-pull.1.md
new file mode 100644
index 0000000..1c89868
--- /dev/null
+++ b/docs/buildah-source-pull.1.md
@@ -0,0 +1,32 @@
+# buildah-source-pull "1" "March 2021" "buildah"
+
+## NAME
+buildah\-source\-pull - Pull a source image from a registry to a specified path
+
+## SYNOPSIS
+**buildah source pull** [*options*] *registry* *path*
+
+## DESCRIPTION
+Pull a source image from a registry to a specified path. The pull operation
+will fail if the image does not comply with a source-image OCI artifact.
+
+Note that the buildah-source command and all its subcommands are experimental
+and may be subject to future changes.
+
+## OPTIONS
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--quiet**, **-q**
+
+Suppress the progress output when pulling a source image.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container
+registries (defaults to true). TLS verification cannot be used when talking to
+an insecure registry.
diff --git a/docs/buildah-source-push.1.md b/docs/buildah-source-push.1.md
new file mode 100644
index 0000000..e9303d0
--- /dev/null
+++ b/docs/buildah-source-push.1.md
@@ -0,0 +1,31 @@
+# buildah-source-push "1" "March 2021" "buildah"
+
+## NAME
+buildah\-source\-push - Push a source image from a specified path to a registry.
+
+## SYNOPSIS
+**buildah source push** [*options*] *path* *registry*
+
+## DESCRIPTION
+Push a source image from a specified path to a registry.
+
+Note that the buildah-source command and all its subcommands are experimental
+and may be subject to future changes.
+
+## OPTIONS
+
+**--creds** *creds*
+
+The [username[:password]] to use to authenticate with the registry if required.
+If one or both values are not supplied, a command line prompt will appear and the
+value can be entered. The password is entered without echo.
+
+**--quiet**, **-q**
+
+Suppress the progress output when pushing a source image.
+
+**--tls-verify** *bool-value*
+
+Require HTTPS and verification of certificates when talking to container
+registries (defaults to true). TLS verification cannot be used when talking to
+an insecure registry.
diff --git a/docs/buildah-source.1.md b/docs/buildah-source.1.md
new file mode 100644
index 0000000..6fa5ff2
--- /dev/null
+++ b/docs/buildah-source.1.md
@@ -0,0 +1,31 @@
+# buildah-source "1" "March 2021" "buildah"
+
+## NAME
+buildah\-source - Create, push, pull and manage source images and associated source artifacts
+
+## SYNOPSIS
+**buildah source** *subcommand*
+
+## DESCRIPTION
+Create, push, pull and manage source images and associated source artifacts. A
+source image contains all source artifacts an ordinary OCI image has been built
+with. Those artifacts can be any kind of source artifact, such as source RPMs,
+an entire source tree or text files.
+
+Note that the buildah-source command and all its subcommands are experimental
+and may be subject to future changes.
+
+## COMMANDS
+
+| Command | Man Page | Description |
+| -------- | ------------------------------------------------------ | ---------------------------------------------------------- |
+| add | [buildah-source-add(1)](buildah-source-add.1.md) | Add a source artifact to a source image. |
+| create | [buildah-source-create(1)](buildah-source-create.1.md) | Create and initialize a source image. |
+| pull | [buildah-source-pull(1)](buildah-source-pull.1.md) | Pull a source image from a registry to a specified path. |
+| push | [buildah-source-push(1)](buildah-source-push.1.md) | Push a source image from a specified path to a registry. |
+
+## SEE ALSO
+buildah(1)
+
+## HISTORY
+June 2021, Originally compiled by Valentin Rothberg <vrothber@redhat.com>
diff --git a/docs/buildah-tag.1.md b/docs/buildah-tag.1.md
new file mode 100644
index 0000000..b8b4427
--- /dev/null
+++ b/docs/buildah-tag.1.md
@@ -0,0 +1,19 @@
+# buildah-tag "1" "May 2017" "buildah"
+
+## NAME
+buildah\-tag - Add additional names to local images.
+
+## SYNOPSIS
+**buildah tag** *name* *new-name* ...
+
+## DESCRIPTION
+Adds additional names to locally-stored images.
+
+## EXAMPLE
+
+buildah tag imageName firstNewName
+
+buildah tag imageName firstNewName SecondNewName
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah-umount.1.md b/docs/buildah-umount.1.md
new file mode 100644
index 0000000..28a3ad2
--- /dev/null
+++ b/docs/buildah-umount.1.md
@@ -0,0 +1,27 @@
+# buildah-umount "1" "March 2017" "buildah"
+
+## NAME
+buildah\-umount - Unmount the root file system on the specified working containers.
+
+## SYNOPSIS
+**buildah umount** [*options*] [*container* ...]
+
+## DESCRIPTION
+Unmounts the root file system on the specified working containers.
+
+## OPTIONS
+
+**--all**, **-a**
+
+All of the currently mounted containers will be unmounted.
+
+## EXAMPLE
+
+buildah umount containerID
+
+buildah umount containerID1 containerID2 containerID3
+
+buildah umount --all
+
+## SEE ALSO
+buildah(1), buildah-umount(1)
diff --git a/docs/buildah-unshare.1.md b/docs/buildah-unshare.1.md
new file mode 100644
index 0000000..d426cc6
--- /dev/null
+++ b/docs/buildah-unshare.1.md
@@ -0,0 +1,63 @@
+# buildah-unshare "1" "June 2018" "buildah"
+
+## NAME
+buildah\-unshare - Run a command inside of a modified user namespace.
+
+## SYNOPSIS
+**buildah unshare** [*options*] [**--**] [*command*]
+
+## DESCRIPTION
+Launches a process (by default, *$SHELL*) in a new user namespace. The user
+namespace is configured so that the invoking user's UID and primary GID appear
+to be UID 0 and GID 0, respectively. Any ranges which match that user and
+group in /etc/subuid and /etc/subgid are also mapped in as themselves with the
+help of the *newuidmap(1)* and *newgidmap(1)* helpers.
+
+buildah unshare is useful for troubleshooting unprivileged operations and for
+manually clearing storage and other data related to images and containers.
+
+It is also useful if you want to use the `buildah mount` command. If an unprivileged user wants to mount and work with a container, then they need to execute
+buildah unshare. Executing `buildah mount` fails for unprivileged users unless the user is running inside a `buildah unshare` session.
+
+## OPTIONS
+
+**--mount**, **-m** [*VARIABLE=]containerNameOrID*
+
+Mount the *containerNameOrID* container while running *command*, and set the
+environment variable *VARIABLE* to the path of the mountpoint. If *VARIABLE*
+is not specified, it defaults to *containerNameOrID*, which may not be a valid
+name for an environment variable.
+
+## EXAMPLE
+
+buildah unshare id
+
+buildah unshare pwd
+
+buildah unshare cat /proc/self/uid\_map /proc/self/gid\_map
+
+buildah unshare rm -fr $HOME/.local/share/containers/storage /run/user/\`id -u\`/run
+
+buildah unshare --mount containerID sh -c 'cat ${containerID}/etc/os-release'
+
+If you want to use buildah with a mount command then you can create a script that looks something like:
+
+```
+cat buildah-script.sh << _EOF
+#!/bin/sh
+ctr=$(buildah from scratch)
+mnt=$(buildah mount $ctr)
+dnf -y install --installroot=$mnt PACKAGES
+dnf -y clean all --installroot=$mnt
+buildah config --entrypoint="/bin/PACKAGE" --env "FOO=BAR" $ctr
+buildah commit $ctr imagename
+buildah unmount $ctr
+_EOF
+```
+Then execute it with:
+```
+buildah unshare buildah-script.sh
+```
+
+## SEE ALSO
+buildah(1), buildah-mount(1), namespaces(7), newuidmap(1), newgidmap(1), user\_namespaces(7)
diff --git a/docs/buildah-version.1.md b/docs/buildah-version.1.md
new file mode 100644
index 0000000..be298cf
--- /dev/null
+++ b/docs/buildah-version.1.md
@@ -0,0 +1,31 @@
+# buildah-version "1" "June 2017" "Buildah"
+
+## NAME
+buildah\-version - Display the Buildah Version Information.
+
+## SYNOPSIS
+**buildah version** [*options*]
+
+## DESCRIPTION
+Shows the following information: Version, Go Version, Image Spec, Runtime Spec, CNI Spec, libcni Version, Git Commit, Build Time, OS, and Architecture.
+
+## OPTIONS
+
+**--help, -h**
+
+Print usage statement
+
+**--json**
+
+Output in JSON format.
+
+## EXAMPLE
+
+buildah version
+
+buildah version --help
+
+buildah version -h
+
+## SEE ALSO
+buildah(1)
diff --git a/docs/buildah.1.md b/docs/buildah.1.md
new file mode 100644
index 0000000..1f8bbc5
--- /dev/null
+++ b/docs/buildah.1.md
@@ -0,0 +1,207 @@
+# buildah "1" "March 2017" "buildah"
+
+## NAME
+Buildah - A command line tool that facilitates building OCI container images.
+
+## SYNOPSIS
+buildah [OPTIONS] COMMAND [ARG...]
+
+
+## DESCRIPTION
+The Buildah package provides a command line tool which can be used to:
+
+ * Create a working container, either from scratch or using an image as a starting point.
+ * Mount a working container's root filesystem for manipulation.
+ * Unmount a working container's root filesystem.
+ * Use the updated contents of a container's root filesystem as a filesystem layer to create a new image.
+ * Delete a working container or an image.
+ * Rename a local container.
+
+
+## OPTIONS
+
+**--cgroup-manager**=*manager*
+
+The CGroup manager to use for container cgroups. Supported values are cgroupfs or systemd. Default is systemd unless overridden in the containers.conf file.
+
+Note: Setting this flag can cause certain commands to break when called on containers previously created by the other CGroup manager type.
+Note: CGroup manager is not supported in rootless mode when using CGroups Version V1.
+
+**--log-level** **value**
+
+The log level to be used. Either "trace", "debug", "info", "warn", "error", "fatal", or "panic", defaulting to "warn".
+
+**--help, -h**
+
+Show help
+
+**--registries-conf** *path*
+
+Pathname of the configuration file which specifies which container registries should be
+consulted when completing image names which do not include a registry or domain
+portion. It is not recommended that this option be used, as the default
+behavior of using the system-wide configuration
+(*/etc/containers/registries.conf*) is most often preferred.
+
+**--registries-conf-dir** *path*
+
+Pathname of the directory which contains configuration snippets which specify
+registries which should be consulted when completing image names which do not
+include a registry or domain portion. It is not recommended that this option
+be used, as the default behavior of using the system-wide configuration
+(*/etc/containers/registries.d*) is most often preferred.
+
+**--root** **value**
+
+Storage root dir (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users)
+Default root dir is configured in /etc/containers/storage.conf
+
+**--runroot** **value**
+
+Storage state dir (default: "/run/containers/storage" for UID 0, "/run/user/$UID" for other users)
+Default state dir is configured in /etc/containers/storage.conf
+
+**--short-name-alias-conf** *path*
+
+Pathname of the file which contains cached mappings between short image names
+and their corresponding fully-qualified names. It is used for mapping from
+names of images specified using short names like "ubi8" which don't
+include a registry component and a corresponding fully-specified name which
+includes a registry and any other components, such as
+"registry.access.redhat.com/ubi8". It is not recommended that this option be
+used, as the default behavior of using the system-wide cache
+(*/var/cache/containers/short-name-aliases.conf*) or per-user cache
+(*$HOME/.cache/containers/short-name-aliases.conf*) to supplement system-wide
+defaults is most often preferred.
+
+**--storage-driver** **value**
+
+Storage driver. The default storage driver for UID 0 is configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode), and is *vfs* for other users. The `STORAGE_DRIVER` environment variable overrides the default. The --storage-driver specified driver overrides all.
+
+Examples: "overlay", "vfs"
+
+Overriding this option will cause the *storage-opt* settings in /etc/containers/storage.conf to be ignored. The user must
+specify additional options via the `--storage-opt` flag.
+
+**--storage-opt** **value**
+
+Storage driver option, Default storage driver options are configured in /etc/containers/storage.conf (`$HOME/.config/containers/storage.conf` in rootless mode). The `STORAGE_OPTS` environment variable overrides the default. The --storage-opt specified options overrides all.
+
+**--userns-gid-map** *mapping*
+
+Directly specifies a GID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container GID, a corresponding starting host-level GID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-gids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-gid-map setting is
+supplied, settings from the global option will be used.
+
+If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-gid-map
+are specified, but --userns-uid-map is specified, the GID map will be set to
+use the same numeric values as the UID map.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--userns-uid-map** *mapping*
+
+Directly specifies a UID mapping which should be used to set ownership, at the
+filesystem level, on the working container's contents.
+Commands run when handling `RUN` instructions will default to being run in
+their own user namespaces, configured using the UID and GID maps.
+
+Entries in this map take the form of one or more colon-separated triples of a starting
+in-container UID, a corresponding starting host-level UID, and the number of
+consecutive IDs which the map entry represents.
+
+This option overrides the *remap-uids* setting in the *options* section of
+/etc/containers/storage.conf.
+
+If this option is not specified, but a global --userns-uid-map setting is
+supplied, settings from the global option will be used.
+
+If none of --userns-uid-map-user, --userns-gid-map-group, or --userns-uid-map
+are specified, but --userns-gid-map is specified, the UID map will be set to
+use the same numeric values as the GID map.
+
+**NOTE:** When this option is specified by a rootless user, the specified mappings are relative to the rootless usernamespace in the container, rather than being relative to the host as it would be when run rootful.
+
+**--version**, **-v**
+
+Print the version
+
+## Environment Variables
+
+Buildah can set up environment variables from the env entry in the [engine] table in the containers.conf(5). These variables can be overridden by passing environment variables before the `buildah` commands.
+
+## COMMANDS
+
+| Command | Man Page | Description |
+| ---------- | ------------------------------------------------ | ---------------------------------------------------------------------------------------------------- |
+| add | [buildah-add(1)](buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. |
+| build | [buildah-build(1)](buildah-build.1.md) | Builds an OCI image using instructions in one or more Containerfiles. |
+| commit | [buildah-commit(1)](buildah-commit.1.md) | Create an image from a working container. |
+| config | [buildah-config(1)](buildah-config.1.md) | Update image configuration settings. |
+| containers | [buildah-containers(1)](buildah-containers.1.md) | List the working containers and their base images. |
+| copy | [buildah-copy(1)](buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
+| from | [buildah-from(1)](buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
+| images | [buildah-images(1)](buildah-images.1.md) | List images in local storage. |
+| info | [buildah-info(1)](buildah-info.1.md) | Display Buildah system information. |
+| inspect | [buildah-inspect(1)](buildah-inspect.1.md) | Inspects the configuration of a container or image |
+| login | [buildah-login(1)](buildah-login.1.md) | Login to a container registry. |
+| logout | [buildah-logout(1)](buildah-logout.1.md) | Logout of a container registry |
+| manifest | [buildah-manifest(1)](buildah-manifest.1.md) | Create and manipulate manifest lists and image indexes. |
+| mkcw | [buildah-mkcw(1)](buildah-mkcw.1.md) | Convert a conventional container image into a confidential workload image.
+| mount | [buildah-mount(1)](buildah-mount.1.md) | Mount the working container's root filesystem. |
+| prune | [buildah-prune(1)](buildah-prune.1.md) | Cleanup intermediate images as well as build and mount cache. |
+| pull | [buildah-pull(1)](buildah-pull.1.md) | Pull an image from the specified location. |
+| push | [buildah-push(1)](buildah-push.1.md) | Push an image from local storage to elsewhere. |
+| rename | [buildah-rename(1)](buildah-rename.1.md) | Rename a local container. |
+| rm | [buildah-rm(1)](buildah-rm.1.md) | Removes one or more working containers. |
+| rmi | [buildah-rmi(1)](buildah-rmi.1.md) | Removes one or more images. |
+| run | [buildah-run(1)](buildah-run.1.md) | Run a command inside of the container. |
+| source | [buildah-source(1)](buildah-source.1.md) | Create, push, pull and manage source images and associated source artifacts. |
+| tag | [buildah-tag(1)](buildah-tag.1.md) | Add an additional name to a local image. |
+| umount | [buildah-umount(1)](buildah-umount.1.md) | Unmount a working container's root file system. |
+| unshare | [buildah-unshare(1)](buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. |
+| version | [buildah-version(1)](buildah-version.1.md) | Display the Buildah Version Information |
+
+
+## Files
+
+**storage.conf** (`/etc/containers/storage.conf`)
+
+storage.conf is the storage configuration file for all tools using containers/storage
+
+The storage configuration file specifies all of the available container storage options for tools using shared container storage.
+
+**mounts.conf** (`/usr/share/containers/mounts.conf` and optionally `/etc/containers/mounts.conf`)
+
+The mounts.conf files specify volume mount files or directories that are automatically mounted inside containers when executing the `buildah run` or `buildah build` commands. Container processes can then use this content. The volume mount content does not get committed to the final image.
+
+Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories.
+
+For example, a mounts.conf with the line "`/usr/share/rhel/secrets:/run/secrets`", the content of `/usr/share/rhel/secrets` directory is mounted on `/run/secrets` inside the container. This mountpoint allows Red Hat Enterprise Linux subscriptions from the host to be used within the container. It is also possible to omit the destination if it's equal to the source path. For example, specifying `/var/lib/secrets` will mount the directory into the same container destination path `/var/lib/secrets`.
+
+Note this is not a volume mount. The content of the volumes is copied into container storage, not bind mounted directly from the host.
+
+**registries.conf** (`/etc/containers/registries.conf`)
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+**registries.d** (`/etc/containers/registries.d`)
+
+Directory which contains configuration snippets which specify registries which should be consulted when completing image names which do not include a registry or domain portion.
+
+## SEE ALSO
+containers.conf(5), containers-mounts.conf(5), newuidmap(1), newgidmap(1), containers-registries.conf(5), containers-storage.conf(5)
+
+## HISTORY
+December 2017, Originally compiled by Tom Sweeney <tsweeney@redhat.com>
diff --git a/docs/cni-examples/100-buildah-bridge.conf b/docs/cni-examples/100-buildah-bridge.conf
new file mode 100644
index 0000000..20633ff
--- /dev/null
+++ b/docs/cni-examples/100-buildah-bridge.conf
@@ -0,0 +1,17 @@
+{
+ "cniVersion": "0.3.1",
+ "name": "buildah-bridge",
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.88.0.0/16",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ]
+ }
+}
diff --git a/docs/cni-examples/200-loopback.conf b/docs/cni-examples/200-loopback.conf
new file mode 100644
index 0000000..023376f
--- /dev/null
+++ b/docs/cni-examples/200-loopback.conf
@@ -0,0 +1,5 @@
+{
+ "cniVersion": "0.3.0",
+ "name": "loopback",
+ "type": "loopback"
+}
diff --git a/docs/cni-examples/README.md b/docs/cni-examples/README.md
new file mode 100644
index 0000000..27a0a28
--- /dev/null
+++ b/docs/cni-examples/README.md
@@ -0,0 +1,37 @@
+When [buildah](https://github.com/containers/buildah)'s `buildah run`
+command is used, or when `buildah build` needs to handle a
+`RUN` instruction, the processes which `buildah` starts are run in their own
+network namespace unless the `--network=host` option is used.
+
+When a network namespace is first created, it contains no network interfaces
+and is essentially disconnected from any networks that the host can access.
+
+In order to configure network interfaces and network access for those network
+namespaces, `buildah` uses the
+[CNI](https://github.com/containernetworking/cni) library, which in turn uses
+plugins ([CNI plugins](https://github.com/containernetworking/plugins), and
+possibly others).
+
+Which plugins get used, and how, is controlled using configuration files, which
+`buildah` scans `/etc/cni/net.d` to find. By default, `buildah` expects to
+find plugins in `/opt/cni/bin`.
+
+This directory contains sample configuration files for the `loopback` and
+`bridge` plugins from the [CNI
+plugins](https://github.com/containernetworking/plugins) repository. To
+install those plugins, try running:
+
+```
+ git clone https://github.com/containernetworking/plugins
+ ( cd ./plugins; ./build.sh )
+ mkdir -p /opt/cni/bin
+ install -v ./plugins/bin/* /opt/cni/bin
+```
+
+If you've already installed a CNI configuration (for example, for
+[CRI-O](https://github.com/kubernetes-sigs/cri-o)), it'll probably just
+work, but to install these sample configuration files:
+```
+ mkdir -p /etc/cni/net.d
+ install -v -m644 *.conf /etc/cni/net.d/
+```
diff --git a/docs/containertools/README.md b/docs/containertools/README.md
new file mode 100644
index 0000000..8b6fd3f
--- /dev/null
+++ b/docs/containertools/README.md
@@ -0,0 +1,113 @@
+# Container Tools Guide
+
+## Introduction
+
+The purpose of this guide is to list a number of related Open-source projects that are available
+on [GitHub.com](https://github.com) that operate on
+[Open Container Initiative](https://www.opencontainers.org/) (OCI) images and containers. This
+guide will give a high level explanation of the related container tools and will explain a bit
+on how they interact amongst each other.
+
+The tools are:
+
+* [Buildah](https://github.com/containers/buildah)
+* [CRI-O](https://github.com/kubernetes-sigs/cri-o)
+* [Podman](https://github.com/containers/podman)
+* [Skopeo](https://github.com/containers/skopeo)
+
+## Buildah
+
+The Buildah project provides a command line tool that can be used to create an OCI or traditional Docker
+image format image and to then build a working container from the image. The container can be mounted
+and modified and then an image can be saved based on the updated container.
+
+## CRI-O
+
+CRI-O [Website](http://cri-o.io/)
+
+The CRI-O project provides an integration path between OCI conformant runtimes and kubelet.
+Specifically, it implements the Kubelet
+[Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md)
+using OCI conformant runtimes. The scope of CRI-O is tied to the scope of the CRI.
+
+At a high level CRI-O supports multiple image formats including the existing Docker image format,
+multiple means to download images including trust & image verification, container image and lifecycle
+management, monitoring, logging and resource isolation as required by CRI.
+
+## Podman
+
+[Podman](https://github.com/containers/podman) allows for full management of a container's lifecycle from creation
+through removal. It supports multiple image formats including both the Docker and OCI image formats. Support for
+pods is provided allowing pods to manage groups of containers together. Podman also supports trust and image
+verification when pulling images along with resource isolation of containers and pods.
+
+## Skopeo
+
+Skopeo is a command line tool that performs a variety of operations on container images and image repositories.
+Skopeo can work on either OCI or Docker images. Skopeo can be used to copy images from and to various
+container storage mechanisms including container registries. Skopeo also allows you to inspect an image
+showing its layers without requiring that the image be pulled. Skopeo also allows you to delete an image
+from a repository. When required by the repository, Skopeo can pass appropriate certificates and credentials
+for authentication.
+
+
+## Buildah and Podman relationship
+
+Buildah and Podman are two complementary Open-source projects that are available on
+most Linux platforms and both projects reside at [GitHub.com](https://github.com)
+with Buildah [here](https://github.com/containers/buildah) and
+Podman [here](https://github.com/containers/podman). Both Buildah and Podman are
+command line tools that work on OCI images and containers. The two projects
+differentiate in their specialization.
+
+Buildah specializes in building OCI images. Buildah's commands replicate all
+of the commands that are found in a Dockerfile. Buildah’s goal is also to
+provide a lower level coreutils interface to build images, allowing people to build
+containers without requiring a Dockerfile. The intent with Buildah is to allow other
+scripting languages to build container images, without requiring a daemon.
+
+Podman specializes in all of the commands and functions that help you to maintain and modify
+OCI images, such as pulling and tagging. It also allows you to create, run, and maintain containers
+created from those images.
+
+A major difference between Podman and Buildah is their concept of a container. Podman
+allows users to create "traditional containers" where the intent of these containers is
+to be long lived. While Buildah containers are really just created to allow content
+to be added back to the container image. An easy way to think of it is the
+`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
+command emulates the `docker run` command in functionality. Because of this you
+cannot see Podman containers from within Buildah or vice versa.
+
+In short Buildah is an efficient way to create OCI images while Podman allows
+you to manage and maintain those images and containers in a production environment using
+familiar container cli commands.
+
+Some of the commands between the projects overlap:
+
+* build
+The `podman build` and `buildah build` commands have significant overlap as Podman borrows large pieces of the `podman build` implementation from Buildah.
+
+* run
+The `buildah run` and `podman run` commands are similar but different. As explained above Podman and Buildah have a different concept of a container. An easy way to think of it is the `buildah run` command emulates the RUN command in a Dockerfile while the `podman run` command emulates the `docker run` command in functionality. As Buildah and Podman have somewhat different concepts of containers, you can not see Podman containers from within Buildah or vice versa.
+
+* pull, push
+These commands are basically the same between the two and either could be used.
+
+* commit
+Commit works differently because of the differences in `containers`. You cannot commit a Podman container from Buildah nor a Buildah container from Podman.
+
+* tag, rmi, images
+These commands are basically the same between the two and either could be used.
+
+* rm
+This command appears to be equivalent on the surface, but they differ due to the underlying storage differences of containers
+between the two projects. Given that, Buildah containers can not be removed with a Podman command and Podman containers
+can not be removed with a Buildah command.
+
+* mount
+Mount command is similar for both in that you can mount the container image and modify content in it, which will be saved to an image when you commit.
+
+In short Buildah is an efficient way to create OCI images while Podman allows
+you to manage and maintain those images and containers in a production environment using
+familiar container cli commands.
+
diff --git a/docs/links/buildah-bud.1 b/docs/links/buildah-bud.1
new file mode 100644
index 0000000..5f450eb
--- /dev/null
+++ b/docs/links/buildah-bud.1
@@ -0,0 +1 @@
+.so man1/buildah-build.1
diff --git a/docs/release-announcements/README.md b/docs/release-announcements/README.md
new file mode 100644
index 0000000..d9893e2
--- /dev/null
+++ b/docs/release-announcements/README.md
@@ -0,0 +1,27 @@
+![buildah logo](../../logos/buildah-logo_large.png)
+
+# Buildah Release Announcements
+
+
+**[Buildah v1.3 RA](v1.3.md) - August 7, 2018**
+
+Features: Dockerfile handling improvements, added the `buildah pull` command, added the `buildah rename` command, updated ulimits settings, added isolation control and other enhancements and bug fixes.
+
+**[Buildah v1.2 RA](v1.2.md) - July 14, 2018**
+
+Features: Added ability to control image layers when building an image, CVE’s Fixes, the initial support for user namespace handling and other enhancements and bug fixes.
+
+**[Buildah v1.1 RA](v1.1.md) - June 12, 2018**
+
+Features: OnBuild support for Dockerfiles, label support for the `buildah bud` command and other enhancements and bug fixes.
+
+
+**[Buildah Alpha v0.16 RA](v0.16.md) - April 2, 2018**
+
+Features: SHELL command support in Dockerfiles, added support for three transports for `buildah from`, added the ability to pull compressed docker-archive files and other enhancements and bug fixes.
+
+**[Buildah Alpha v0.12 RA](v0.12.md) - February 21, 2018**
+
+Features: Set the default certificate directory to /etc/containers/certs.d, improved lookups for a variety of image name formats, added pruning capability to the rmi command, provided authentication to `buildah bud` and other enhancements and bug fixes.
+
+## Buildah == Simplicity
diff --git a/docs/release-announcements/v0.12.md b/docs/release-announcements/v0.12.md
new file mode 100644
index 0000000..ee4f30e
--- /dev/null
+++ b/docs/release-announcements/v0.12.md
@@ -0,0 +1,32 @@
+# Buildah Alpha version 0.12 Release Announcement
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+We're pleased to announce the release of Buildah Alpha version 0.12 on both Fedora 26 and Fedora 27. As always, the latest Buildah can also be acquired from [GitHub](https://github.com/containers/buildah) for any other Linux distribution.
+
+The Buildah project has been building some steam over the past several weeks, welcoming several new contributors to the mix, launching new functionality and creating a number of improvements and bug fixes. The major highlights for this release are:
+
+* Added better handling of error messages for Unknown Dockerfile instructions.
+* Set the default certificate directory to /etc/containers/certs.d.
+* Vendored in the latest containers/image and containers/Storage packages.
+* The build-using-dockerfile (bud) command now sets the images 'author' field to the value provided by MAINTAINER in the Dockerfile.
+* Return exit code 1 when 'buildah rmi' fails.
+* Improve lookups of a variety of image name formats.
+* Adds the --format and --filter parameters to the 'buildah containers' command.
+* Adds the --prune,-p option to the 'buildah rmi' command allowing dangling images to be pruned.
+* Adds the --authfile parameter to the 'buildah commit' command.
+* Fix the --runtime-flag for the 'buildah run' and 'buildah bud' commands when global flags are used.
+* The format parameter now overrides the quiet parameter for 'buildah images'.
+* Provide authentication parameters to the build-using-docker-file (bud) command.
+* Directory permissions are no longer overwritten when using the --chown parameter.
+* HTML character output to the terminal is no longer escaped.
+* The container name is now always set to the image's name.
+* The username or password are prompted for if they are not supplied with the --creds parameter.
+* Return a better error message when bad credentials are used to pull a private image.
+* Plus several small bug fixes.
+
+If you haven’t yet, install Buildah from the Fedora repository and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+If you haven't joined our community yet, don't wait any longer! Come join us in [GitHub](https://github.com/containers/buildah), where Open Source communities live.
+
+**Buildah == Simplicity**
diff --git a/docs/release-announcements/v0.16.md b/docs/release-announcements/v0.16.md
new file mode 100644
index 0000000..50e7fb0
--- /dev/null
+++ b/docs/release-announcements/v0.16.md
@@ -0,0 +1,45 @@
+# Buildah Alpha version 0.16 Release Announcement
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+We're pleased to announce the release of Buildah Alpha version 0.16 which is now available from GitHub for any Linux distro. We will be shipping this release on Fedora, CentOS and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix, launching new functionality and creating a number of improvements and bug fixes.
+
+## The major highlights for this release are:
+
+ * Add support for the SHELL command in Dockerfiles.
+ * Change the time displayed by the image command to the locale time.
+ * Allow the --cmd parameter for the `buildah config` command to have commands as values. I.e. `buildah config --cmd “--help” {containerID}`.
+ * Documentations added for the mounts.conf file. The mounts config allows you to mount content from the host into the container to be used during the build procedure, but does not get committed to the image.
+ * Fixed a number of man pages to format correctly.
+ * The `buildah from` command now supports pulling images using the following three transports: docker-archive, oci-archive, and dir, as well as normal container registries and the docker daemon.
+ * If the user overrides the storage driver, the options will not be used from the default storage.conf file.
+ * Show the Config and Manifest as a JSON string in `buildah inspect` even when the --format parameter is not set.
+ * Adds feature to pull compressed docker-archive files.
+ * Vendor in latest containers/image
+ * docker-archive generates docker legacy compatible images.
+ * Ensure the layer IDs in legacy docker/tarfile metadata are unique.
+ * docker-archive: repeated layers are symlinked in the tar file.
+ * sysregistries: remove all trailing slashes.
+ * Improve docker/* error messages.
+ * Fix failure to make auth directory.
+ * Create a new slice in Schema1.UpdateLayerInfos.
+ * Drop unused storageImageDestination.{image,systemContext}.
+ * Load a *storage.Image only once in storageImageSource.
+ * Support gzip for docker-archive files.
+ * Remove .tar extension from blob and config file names.
+ * ostree, src: support copy of compressed layers.
+ * ostree: re-pull layer if it misses uncompressed_digest|uncompressed_size.
+ * image: fix docker schema v1 -> OCI conversion.
+ * Add /etc/containers/certs.d as default certs directory.
+ * Plus several small bug fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from the Fedora repo or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
+
diff --git a/docs/release-announcements/v1.1.md b/docs/release-announcements/v1.1.md
new file mode 100644
index 0000000..008d560
--- /dev/null
+++ b/docs/release-announcements/v1.1.md
@@ -0,0 +1,82 @@
+# Buildah version 1.1 Release Announcement
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+We're pleased to announce the release of Buildah version 1.1 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix, launching new functionality and creating a number of improvements and bug fixes.
+
+## The major highlights for this release are:
+
+ * Drop capabilities if running container processes as non root
+ * Print Warning message if cmd will not be used based on entrypoint
+ * Add OnBuild support for Dockerfiles
+ * Add support for buildah bud --label
+ * Report errors on bad transports specification when pushing images
+ * Add registry errors for pull
+ * Give better messages to users when image can not be found
+ * Add environment variable to buildah --format
+ * Accept json array input for config entrypoint
+ * buildah bud now requires a context directory or URL
+ * buildah bud picks up ENV from base image
+ * Run: set supplemental group IDs
+ * Use CNI to configure container networks
+ * add/secrets/commit: Use mappings when setting permissions on added content
+ * Add CLI options for specifying namespace and cgroup setup
+ * Always set mappings when using user namespaces
+ * Read UID/GID mapping information from containers and images
+ * build-using-dockerfile: add --annotation
+ * Implement --squash for build-using-dockerfile and commit
+ * Manage "Run" containers more closely
+ * Handle /etc/hosts and /etc/resolv.conf properly in container
+ * Make "run --terminal" and "run -t" aliases for "run --tty"
+ * buildah push/from can push and pull images with no reference
+ * Attempt to download file from url, if fails assume Dockerfile
+ * builder-inspect: fix format option
+ * buildah-from: add effective value to mount propagation
+ * Documentation Changes
+ * Change freenode chan to buildah
+ * Add example CNI configurations
+ * Touch Up tutorial for run changes
+ * Update 01-intro.md tutorial
+ * Update troubleshooting with new run workaround
+ * Add console syntax highlighting to troubleshooting page
+ * Touchup man page short options across man pages
+ * Demo Changes
+ * Added demo dir and a demo.
+ * Added Docker compatibility demo
+ * Added a bud demo and tidied up
+ * Update buildah scratch demo to support el7
+ * Quick fix on demo readme
+ * Test Changes
+ * Add tests for namespace control flags
+ * CI tests and minor fix for cache related noop flags
+ * Add cpu-shares short flag (-c) and cpu-shares CI tests
+ * Add buildah bud CI tests for ENV variables
+ * Additional bud CI tests
+ * Run integration tests under travis_wait in Travis
+ * Re-enable rpm .spec version check and new commit test
+ * Update to F28 and new run format in baseline test
+ * Add context dir to bud command in baseline test
+ * add test to inspect
+ * Test with Go 1.10, too
+ * rmi, rm: add test
+ * add mount test
+ * Fix SELinux test errors when SELinux is enabled
+ * Vendor changes
+ * Vendor in latest container/storage for devicemapper support
+ * Vendor github.com/onsi/ginkgo and github.com/onsi/gomega
+ * Vendor github.com/containernetworking/cni v0.6.0
+ * Update github.com/containers/storage
+ * Update github.com/containers/libpod
+ * Vendor in latest containers/image
+ * Plus a number of smaller fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from the Fedora repo or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
+
diff --git a/docs/release-announcements/v1.2.md b/docs/release-announcements/v1.2.md
new file mode 100644
index 0000000..9fe2298
--- /dev/null
+++ b/docs/release-announcements/v1.2.md
@@ -0,0 +1,81 @@
+# Buildah version 1.2 Release Announcement
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+We're pleased to announce the release of Buildah version 1.2 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix. The highlights of this release are the added ability to control image layers when building an image, CVE’s Fixes, the initial support for user namespace handling and several other enhancements and bug fixes.
+
+## The major highlights for this release are:
+
+### Allow the user to control the layers of the image when the image is built with the ‘buildah bud’ command.
+
+A container is comprised of a final readable/writeable layer and when the layers are cached, a number of intermediate read only layers. The read only layers are created with each step in the Dockerfile and the final readable/writeable layer contains the intermediate layers. Prior to these changes Buildah did not cache these intermediate read only layers.
+
+This release has a new environment variable ‘BUILDAH_LAYERS’ and a new ‘buildah bud’ --layers parameter. When either is set to true, the image layers are cached during the ‘buildah bud’ processing and not discarded. The disadvantage to retaining layers is the space that they use. The advantage to retaining them is if you make a change to your Dockerfile, only the layers for that change and the ones following it will need to be regenerated.
+
+The --nocache parameter has also been added to the ‘buildah bud’ command. When this parameter is set to true the ‘buildah bud’ command ignores any existing layers and creates all of the image layers anew.
+
+### Added initial user namespace support.
+
+To isolate the container’s processes from running as root on the host machine, user namespaces are used by container technologies. This allows the administrator to configure the containers processes that must run as root to remap the user to a less privileged user on the container’s host machine. This remapping is handled in part by settings in the /etc/subuid and /etc/subgid files on the host machine.
+
+The changes for this release does not yet provide full support for user namespaces, but does set up the options to control the mapping with the --userns-uid-map and --userns-gid-map options. Changes have also been made to prevent the container from modifying the /etc/host or /etc/resolv.conf files on the host.
+
+Also with this release if a user with a uid that’s not equal to zero creates a container, a namespace is now created based on the users uid and gid and the container will be reexec’d using that namespace. In addition, the storage driver, storage root directory and storage state directory will all be created under alternate locations. Please reference the buildah (1) man page for more details. Further information will be published in upcoming blogs and additional changes are in progress to provide full support of user namespaces in future versions of Buildah.
+
+### CVE security issues with /proc/acpi and /proc/keys have been addressed.
+
+The /proc/acpi and /proc/keys were added to the list of blocked kernel files. This prevents the container from manipulating these files on the container’s host.
+
+## Release Changes
+ * Added the ability to remove or retain image layers for ‘buildah bud’:
+ * Add --layers and --no-cache options to 'buildah bud'.
+ * Add --rm and --force-rm options to 'buildah bud'.
+ * Fixed the buildah bud --layers option.
+ * Added environment variable BUILDAH_LAYERS to control image layers creation.
+ * Added environment variable BUILDAH_RUNTIME to setup alternate runtimes.
+ * build-using-dockerfile: let -t include transports again.
+ * Block the use of /proc/acpi and /proc/keys from inside containers. These address potential CVE Security issues.
+ * Add --cidfile option to 'buildah from`.
+ * Add a --loglevel option to build-with-dockerfile.
+ * Begin supporting specification of user namespace for container separation:
+ * Allow --userns-uid-map/--userns-gid-map to be global options.
+ * If unprivileged, reexec in a user namespace.
+ * Force ownership of /etc/hosts and /etc/resolv.conf to 0:0.
+ * Recognize committing to second storage locations with 'buildah commit'.
+ * Add the --all option to 'buildah unmount' to unmount all mounted containers.
+ * When doing multiple mounts, output all pertinent errors, not just the last error.
+ * Implement basic recognition of the "--isolation" option for 'buildah from' and 'buildah run'.
+ * Fix ARGS parsing for run commands.
+ * When building a container the HTTP User-Agent is set to the Buildah version.
+ * Makefile: add the uninstall command.
+ * Support multiple inputs to 'buildah mount'.
+ * Use the right formatting when adding entries to /etc/hosts.
+ * A number of minor performance improvements for 'buildah run' and 'buildah bud'.
+ * Change RunOptions.Stdin/Stdout/Stderr to just be Reader/Writers.
+ * Use conversion code from containers/image instead of converting configs manually.
+ * Do not ignore any parsing errors during initialization.
+ * Explicitly handle "from scratch" images in Builder.initConfig.
+ * Fix parsing of OCI images.
+ * Don't ignore v2s1 history if docker_version is not set.
+ * Add --all,-a flags to 'buildah images'.
+ * Remove tty check from buildah images --format.
+ * Fix usage information for 'buildah images'.
+ * Documentation changes:
+ * Add registries.conf link to a few man pages.
+ * Add information about the configuration files to the install documentation.
+ * Follow man-pages(7) suggestions for SYNOPSIS in all man pages.
+ * Minor update to buildah config documentation for entrypoint.
+ * ONBUILD tutorial created.
+ * Touch up images man page.
+ * Plus a number of smaller fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from the Fedora repo or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
+
diff --git a/docs/release-announcements/v1.3.md b/docs/release-announcements/v1.3.md
new file mode 100644
index 0000000..9ef4a68
--- /dev/null
+++ b/docs/release-announcements/v1.3.md
@@ -0,0 +1,60 @@
+# Buildah version 1.3 Release Announcement
+
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+We're pleased to announce the release of Buildah version 1.3 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS, openSUSE and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix. The highlights of this release are Dockerfile handling improvements, added the `buildah pull` command, added the `buildah rename` command, updated ulimits settings, added isolation control and several other enhancements and bug fixes.
+
+## The major highlights for this release are:
+
+* Dockerfiles with a ‘.in’ suffix are preprocessed during the build process.
+
+CPP is now used by the ‘buildah bud’ command to preprocess any Dockerfile that has the ‘.in’ suffix. This allows Dockerfiles to be decomposed and make them reusable via CPP’s #include directive. Notice that those Dockerfiles can still be used by other tools by manually running cpp -E on them. Stay tuned for an upcoming blog with an example. (Many thanks to Valentin Rothberg for providing this functionality.)
+
+* Dockerfile input can come from stdin.
+
+If you use a dash ‘-’ as the argument to the `buildah bud --file` parameter, Dockerfile contents will be read from stdin.
+
+* Created a pull and rename command.
+
+The new `buildah pull` command pulls an image without creating a container like the `buildah from` command does. The new `buildah rename` command renames a container.
+
+* Ulimits settings now match the settings we add to the Docker unit file.
+
+The maximum number of processes and the number of open files that Buildah will handle now match the same number that Docker handles.
+
+* Added the ability to select the type of isolation to be used.
+
+By setting the new BUILDAH_ISOLATION environment variable or by using the new --isolation parameter found in the bud, from and run commands, one can select the type of isolation to use for running processes as part of the RUN instruction. Recognized types include oci, rootless and chroot. For more details, please refer to the `buildah bud`, `buildah from` and `buildah run` man pages. These new isolations are being added to run buildah inside locked down containers.
+
+## Release Changes
+* preprocess ".in" suffixed Dockerfiles.
+* Allow Dockerfile content to come from stdin.
+* Create buildah pull command.
+* Create buildah rename command.
+* Set the default ulimits to match Docker.
+* Set BUILDAH_ISOLATION=rootless when running unprivileged.
+* Add and implement IsolationOCIRootless.
+* Add a value for IsolationOCIRootless.
+* Fix rmi to remove intermediate images associated with an image.
+* Switch to github.com/containers/image/pkg/sysregistriesv2.
+* unshare: error message missed the pid.
+* bud should not search just the context directory for Dockerfile.
+* Add support for multiple Short options.
+* Fixed volume cache issue with buildah bud --layers.
+* Allow ping command without NET_RAW Capabilities.
+* usernamespace: assign additional IDs sequentially.
+* Remove default dev/pts which allows Buildah to be run as non-root.
+ * Documentation changes:
+ * Fix the the in buildah-config man page.
+* Updated the following packages to newer versions: containers/image, containers/storage, runc, and urfave/cli.
+* Plus a number of smaller fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from the Fedora repo or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
diff --git a/docs/release-announcements/v1.4.md b/docs/release-announcements/v1.4.md
new file mode 100644
index 0000000..6960571
--- /dev/null
+++ b/docs/release-announcements/v1.4.md
@@ -0,0 +1,82 @@
+# Buildah version 1.4 Release Announcement
+
+![buildah logo](https://buildah.io/images/buildah.png)
+
+We're pleased to announce the release of Buildah version 1.4 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS, openSUSE and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix. The highlights of this release are fixes for "rootless" users, improvements in symbolic link and chroot handling in Dockerfiles, the addition of a `pull` command, better error messaging for OCI containers and several other enhancements and bug fixes.
+
+## The major highlights for this release are:
+
+* Issues with a rootless user cleaned up.
+ - A variety of issues were cleaned up in this space. They include:
+ * Additional groups were not reset for a rootless user when creating a new user namespace. For example users of the 'docker' group are now able to use the docker-daemon: destination.
+ * Builtin volumes are now owned by the UID/GID of the container.
+ * Removed the --no-pivot functionality as it could cause EPERM issues in a rootless user environment.
+
+* Symbolic links handling for ADD and COPY
+
+ If a symbolic link was used as part of an ADD or COPY command in a Dockerfile, the link itself and not the underlying file(s) were copied. This has been corrected so that the files pointed to by the symbolic link are now copied.
+
+* COPY --chown in a Dockerfile
+
+ The COPY command with the --chown parameter in a Dockerfile is now processed correctly.
+
+* The pull command has been created.
+
+ The `buildah pull` command has been created. It works like the `from` command however it only pulls the image and does not build a container like the `from` command does.
+
+* Non-OCI command handling in a Dockerfile
+
+ If a command that is not OCI compliant is encountered when building an OCI formatted container image, better error messages are now displayed. If a non-OCI formatted container image is desired, the '--format=docker' option should be passed to the bud command.
+
+* We've moved!
+
+ The Buildah project has moved from projectatomic/buildah to containers/buildah in GitHub.com. Come check out our new home!
+
+* buildah.io website created!
+
+ Not really tied to this release, but shortly before this release the buildah.io website was created. There you can find blogs, release announcements, talks and more. Go check it out if you haven't already!
+
+## Release Changes
+
+* Add the isolation option to the `from` command.
+* Change SELinux MCS Label handling to avoid collisions with Podman.
+* Fixed a number of issues with the `bud` --layers option.
+* The `rmi --prune` option no longer accepts an ImageID .
+* Additional groups are not reset for a rootless user when creating a new user namespace.
+* Builtin volumes are now owned by the UID/GID of the container.
+* Better error reporting for image-pulling errors with the `from` and `pull` commands.
+* Allow an empty destination for the `push` command. If empty, the source image parameter is reused for the destination.
+* Missing parent directories for volume mounts.
+* A number of commands have added verification to the flag ordering.
+* Error messages have been cleaned up when options are misordered.
+* Fixed a rare race condition in the `bud` command when pulling and naming the image.
+* Symbolic links in ADD/COPY Dockerfile commands are handled correctly.
+* The `push` command now shows the image digest after it succeeds.
+* The `rename` command verifies that the container name is not already in use.
+* The `containers` command no longer exits early when the --json option is used.
+* Provide better errors when non-OCI commands are in a Dockerfile when building an OCI container image.
+* After renaming a container, the correct name is now always shown in the `containers` command.
+* A number of small changes were made to Buildah images building process to more closely match Docker images.
+* Documented BUILDAH_* environment variables in `buildah bud --help` usage output.
+* Using the ADD command in a Dockerfile is now handled correctly when the --layers option is used with the bud command.
+* After deleting an image the correct image ID is now displayed.
+* COPY --chown in a Dockerfile is processed correctly.
+* The `run` command processing now bind mounts an empty directory for masking when using chroot.
+* Removed --no-pivot for rootless isolation.
+* Removed the stream option from the `bud` command.
+* Added a `pull` command.
+* Added a Docker conformance test suite.
+* Documentation Changes:
+ * Created a Container Tools Guide.
+* Updated the following packages to newer versions: CNI, containers/image, containers/storage, imagebuilder, libpod, runc, and urfave/cli.
+* Plus a number of smaller fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from one of the Linux repos or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
diff --git a/docs/release-announcements/v1.5.md b/docs/release-announcements/v1.5.md
new file mode 100644
index 0000000..76ca92f
--- /dev/null
+++ b/docs/release-announcements/v1.5.md
@@ -0,0 +1,81 @@
+# Buildah version 1.5 Release Announcement
+
+![buildah logo](https://buildah.io/images/buildah.png)
+
+We're pleased to announce the release of Buildah version 1.5 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS, openSUSE and Ubuntu in the near future.
+
+The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix. Updates were made to rootless user handling, added support for a few Dockerfile commands that were missing, a number of performance changes for the underlying pull commands and bug fixes.
+
+## The major highlights for this release are:
+
+* A variety of updates were made in rootless user handling:
+ - Let runc auto-detect rootless capability. This will allow for different runtimes to run in rootless mode.
+ - If slirp4netns is available, it is now used to configure the network for the rootless isolation mode.
+ - Support for fuse-overlayfs has been added.
+ - Usernamespaces are now created only when they are needed.
+
+* Dockerfile handling improvements.
+ - If the ARG command was the first line in the Dockerfile, Buildah did not process it. This has been corrected.
+ - The “FROM {image} AS {reference}” command in a Dockerfile is now supported.
+
+* A number of performance changes have been made to the underlying pull functionality.
+
+* If a directory was passed the ‘bud’ commands --file parameter a panic would occur even if a Dockerfile was in the directory. This has been corrected.
+
+## Release Changes
+
+* rootless: do not specify --rootless to the OCI runtime.
+* rootless: use slirp4netns to setup the network namespace.
+* rootless: only discard network configuration names.
+* run: only set up /etc/hosts or /etc/resolv.conf with network.
+* run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume.
+* Handle directories better in ‘bud --file’.
+* common: support a per-user registries conf file.
+* unshare: do not override the configuration.
+* common: honor the rootless configuration file.
+* unshare: create a new mount namespace.
+* unshare: support libpod rootless pkg.
+* Use libpod GetDefaultStorage to report proper storage config.
+* Allow container storage to manage the SELinux labels.
+* When the value of isolation is provided to the run command, use the value provided instead of the default value.
+* Fix ‘buildah version’ error when build time could not be determined on some systems.
+* Walk symlinks when checking cached images for copied/added files.
+* ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder.
+* Set WorkingDir to empty, not ‘/’ for conformance to Docker.
+* Allow setting --no-pivot default with an env var.
+* Add the --no-pivot flag to the run command.
+* Improve reporting about individual pull failures.
+* Return a "search registries were needed but empty" indication in util.ResolveName.
+* Simplify handling of the "tried to pull an image but found nothing" case.
+* Don't even invoke the pull loop if options.FromImage == "".
+* Eliminate the long-running ref and img variables in resolveImage.
+* In resolveImage, return immediately on success.
+* Fix From As in Dockerfile.
+* Sort CLI flags of buildah bud.
+* unshare: detect when unprivileged userns are disabled.
+* chroot: fix the args check.
+* buildah: use the same logic for XDG_RUNTIME_DIR as podman.
+* Podman --privileged selinux is broken.
+* parse: Modify the return value.
+* parse: modify the verification of the isolation value.
+* Make sure we log or return every error.
+* pullImage(): when completing an image name, try docker://.
+* Enforce "blocked" for registries for the "docker" transport.
+* Correctly set DockerInsecureSkipTLSVerify when pulling images.
+* chroot: set up seccomp and capabilities after supplemental groups.
+* chroot: fix capabilities list setup and application.
+* namespaces.bats: fix handling of uidmap/gidmap options in pairs.
+* chroot: only create user namespaces when we know we need them.
+* Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS).
+* bash/buildah: add isolation option to the from command for bash completions.
+* Updated the following packages to newer versions: containers/image, containers/storage, libpod, and opencontainers/selinux.
+* Plus a number of smaller fixes.
+
+## Try it Out.
+
+If you haven’t yet, install Buildah from one of the Linux repos or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved!
+
+For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live.
+
+## Buildah == Simplicity
+
diff --git a/docs/samples/registries.conf b/docs/samples/registries.conf
new file mode 100644
index 0000000..a494070
--- /dev/null
+++ b/docs/samples/registries.conf
@@ -0,0 +1,28 @@
+## containers-registries.conf(5): System Registry Configuration File
+#
+# This is a sample of system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to TOML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries.search', 'registries.insecure',
+# and 'registries.block'.
+
+[registries.search]
+registries = ['quay.io', 'docker.io']
+
+# If you need to access insecure registries, add the registry's fully-qualified name.
+# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
+[registries.insecure]
+registries = []
+
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#
+# Docker only
+[registries.block]
+registries = []
+
diff --git a/docs/tutorials/01-intro.md b/docs/tutorials/01-intro.md
new file mode 100644
index 0000000..d6c85a0
--- /dev/null
+++ b/docs/tutorials/01-intro.md
@@ -0,0 +1,284 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Buildah Tutorial 1
+## Building OCI container images
+
+The purpose of this tutorial is to demonstrate how Buildah can be used to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec). Images can be built based on existing images, from scratch, and using Dockerfiles. OCI images built using the Buildah command line tool (CLI) and the underlying OCI based technologies (e.g. [containers/image](https://github.com/containers/image) and [containers/storage](https://github.com/containers/storage)) are portable and can therefore run in a Docker environment.
+
+In brief the `containers/image` project provides mechanisms to copy (push, pull), inspect, and sign container images. The `containers/storage` project provides mechanisms for storing filesystem layers, container images, and containers. Buildah is a CLI that takes advantage of these underlying projects and therefore allows you to build, move, and manage container images and containers.
+
+Buildah works on a number of Linux distributions, but is not supported on Windows or Mac platforms at this time. Buildah specializes mainly in building OCI images while [Podman](https://podman.io) provides a broader set of commands and functions that help you to maintain, modify and run OCI images and containers. For more information on the difference between the projects please refer to the [Buildah and Podman relationship](https://github.com/containers/buildah#buildah-and-podman-relationship) section on the main README.md.
+
+## Configure and Install Buildah
+
+Note that installation instructions below assume you are running a Linux distro that uses `dnf` as its package manager, and have all prerequisites fulfilled. See Buildah's [installation instructions][buildah-install] for a full list of prerequisites, and the `buildah` installation section in the [official Red Hat documentation][rh-repo-docs] for RHEL-specific instructions.
+
+[buildah-install]:../../install.md
+[rh-repo-docs]:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/building_running_and_managing_containers/
+
+First step is to install Buildah. Run as root because you will need to be root for installing the Buildah package:
+
+ $ sudo -s
+
+Then install buildah by running:
+
+ # dnf -y install buildah
+
+## Rootless User Configuration
+
+If you plan to run Buildah as a user without root privileges, i.e. a "rootless user", the administrator of the system might have to do a bit of additional configuration beforehand. The setup required for this is listed on the Podman GitHub site [here](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md). Buildah has the same setup and configuration requirements that Podman does for rootless users.
+
+## Post Installation Verification
+
+After installing Buildah we can see there are no images installed. The `buildah images` command will list all the images:
+
+ # buildah images
+
+We can also see that there are also no working containers by running:
+
+ # buildah containers
+
+When you build a working container from an existing image, Buildah defaults to appending '-working-container' to the image's name to construct a name for the container. The Buildah CLI conveniently returns the name of the new container. You can take advantage of this by assigning the returned value to a shell variable using standard shell assignment:
+
+ # container=$(buildah from fedora)
+
+It is not required to assign the container's name to a shell variable. Running `buildah from fedora` is sufficient. It just helps simplify commands later. To see the name of the container that we stored in the shell variable:
+
+ # echo $container
+
+What can we do with this new container? Let's try running bash:
+
+ # buildah run $container bash
+
+Notice we get a new shell prompt because we are running a bash shell inside of the container. It should be noted that `buildah run` is primarily intended for debugging and running commands as part of the build process. A more full-featured engine like Podman or a container runtime interface service like [CRI-O](https://github.com/kubernetes-sigs/cri-o) is more suited for starting containers in production.
+
+Be sure to `exit` out of the container and let's try running something else:
+
+ # buildah run $container java
+
+Oops. Java is not installed. A message containing something like the following was returned.
+
+ runc create failed: unable to start start container process: exec: "java": executable file not found in $PATH
+
+Let's try installing it inside the container using:
+
+ # buildah run $container -- dnf -y install java
+
+The `--` syntax basically tells Buildah: there are no more `buildah run` command options after this point. The options after this point are for the command that's started inside the container. It is required if the command we specify includes command line options which are not meant for Buildah.
+
+Now running `buildah run $container java` will show that Java has been installed. It will return the standard Java `Usage` output.
+
+## Building a container from scratch
+
+One of the advantages of using `buildah` to build OCI compliant container images is that you can easily build a container image from scratch and therefore exclude unnecessary packages from your image. Most final container images for production probably don't need a package manager like `dnf`.
+
+Let's build a container and image from scratch. The special "image" name "scratch" tells Buildah to create an empty container. The container has a small amount of metadata about the container but no real Linux content.
+
+ # newcontainer=$(buildah from scratch)
+
+You can see this new empty container by running:
+
+ # buildah containers
+
+You should see output similar to the following:
+
+ CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
+ 82af3b9a9488 * 3d85fcda5754 docker.io/library/fedora:latest fedora-working-container
+ ac8fa6be0f0a * scratch working-container
+
+Its container name is working-container by default and it's stored in the `$newcontainer` variable. Notice the image name (IMAGE NAME) is "scratch". This is a special value that indicates that the working container wasn't based on an image. When we run:
+
+ # buildah images
+
+We don't see the "scratch" image listed. There is no corresponding scratch image. A container based on "scratch" starts from nothing.
+
+So does this container actually do anything? Let's see.
+
+ # buildah run $newcontainer bash
+
+Nope. This really is empty. The package installer `dnf` is not even inside this container. It's essentially an empty layer on top of the kernel. So what can be done with that? Thankfully there is a `buildah mount` command.
+
+ # scratchmnt=$(buildah mount $newcontainer)
+
+Note: If attempting to mount in rootless mode, the command fails. Mounting a container can only be done in a mount namespace that you own. Create and enter a user namespace and mount namespace by executing the `buildah unshare` command. See buildah-mount(1) man page for more information.
+
+ $ export newcontainer
+ $ buildah unshare
+ # scratchmnt=$(buildah mount $newcontainer)
+
+By echoing `$scratchmnt` we can see the path for the [overlay mount point](https://wiki.archlinux.org/index.php/Overlay_filesystem), which is used as the root file system for the container.
+
+ # echo $scratchmnt
+ /var/lib/containers/storage/overlay/b78d0e11957d15b5d1fe776293bd40a36c28825fb6cf76f407b4d0a95b2a200d/merged
+
+Notice that the overlay mount point is somewhere under `/var/lib/containers/storage` if you started out as root, and under your home directory's `.local/share/containers/storage` directory if you're in rootless mode. (See above on `containers/storage` or for more information see [containers/storage](https://github.com/containers/storage).)
+
+Now that we have a new empty container we can install or remove software packages or simply copy content into that container. So let's install `bash` and `coreutils` so that we can run bash scripts. This could easily be `nginx` or other packages needed for your container.
+
+**NOTE:** the version in the example below (35) relates to a Fedora version which is the Linux platform this example was run on. If you are running dnf on the host to populate the container, the version you specify must be valid for the host or dnf will throw an error. I.e. If you were to run this on a RHEL platform, you'd need to specify `--releasever 8.1` or similar instead of `--releasever 35`. If you want the container to be a particular Linux platform, change `scratch` in the first line of the example to the platform you want, i.e. `# newcontainer=$(buildah from fedora)`, and then you can specify an appropriate version number for that Linux platform.
+
+ # dnf install --installroot $scratchmnt --releasever 35 bash coreutils --setopt install_weak_deps=false -y
+
+Let's try it out (showing the prompt in this example to demonstrate the difference):
+
+ # buildah run $newcontainer sh
+ sh-5.1# cd /usr/bin
+ sh-5.1# ls
+ sh-5.1# exit
+
+Notice we now have a `/usr/bin` directory in the newcontainer's root file system. Let's first copy a simple file from our host into the container. Create a file called runecho.sh which contains the following:
+
+ #!/usr/bin/env bash
+ for i in `seq 0 9`;
+ do
+ echo "This is a new container from ipbabble [" $i "]"
+ done
+
+Change the permissions on the file so that it can be run:
+
+ # chmod +x runecho.sh
+
+With `buildah` files can be copied into the new container. We can then use `buildah run` to run that command within the container by specifying the command. We can also configure the image we'll create from this container to run the command directly when we run it using [Podman](https://github.com/containers/podman) and its `podman run` command. In short the `buildah run` command is equivalent to the "RUN" command in a Dockerfile (it always needs to be told what to run), whereas `podman run` is equivalent to the `docker run` command (it can look at the image's configuration to see what to run). Now let's copy this new command into the container's `/usr/bin` directory, configure the command to be run when the image is run by `podman`, and create an image from the container's root file system and configuration settings:
+
+ # To test with Podman, first install via:
+ # dnf -y install podman
+ # buildah copy $newcontainer ./runecho.sh /usr/bin
+ # buildah config --cmd /usr/bin/runecho.sh $newcontainer
+ # buildah commit $newcontainer newimage
+
+We've got a new image named "newimage". The container is still there because we didn't remove it.
+Now run the command in the container with Buildah specifying the command to run in the container:
+
+ # buildah run $newcontainer /usr/bin/runecho.sh
+ This is a new container from ipbabble [ 0 ]
+ This is a new container from ipbabble [ 1 ]
+ This is a new container from ipbabble [ 2 ]
+ This is a new container from ipbabble [ 3 ]
+ This is a new container from ipbabble [ 4 ]
+ This is a new container from ipbabble [ 5 ]
+ This is a new container from ipbabble [ 6 ]
+ This is a new container from ipbabble [ 7 ]
+ This is a new container from ipbabble [ 8 ]
+ This is a new container from ipbabble [ 9 ]
+
+Now use Podman to run the command in a new container based on our new image (no command required):
+
+ # podman run --rm newimage
+ This is a new container from ipbabble [ 0 ]
+ This is a new container from ipbabble [ 1 ]
+ This is a new container from ipbabble [ 2 ]
+ This is a new container from ipbabble [ 3 ]
+ This is a new container from ipbabble [ 4 ]
+ This is a new container from ipbabble [ 5 ]
+ This is a new container from ipbabble [ 6 ]
+ This is a new container from ipbabble [ 7 ]
+ This is a new container from ipbabble [ 8 ]
+ This is a new container from ipbabble [ 9 ]
+
+It works! Congratulations, you have built a new OCI container image from scratch that uses bash scripting.
+
+Back to Buildah, let's add some more configuration information.
+
+ # buildah config --created-by "ipbabble" $newcontainer
+ # buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora35-bashecho $newcontainer
+
+We can inspect the working container's metadata using the `inspect` command:
+
+ # buildah inspect $newcontainer
+
+We should probably unmount the working container's rootfs. We will need to commit the container again to create an image that includes the two configuration changes we just made:
+
+ # buildah unmount $newcontainer
+ # buildah commit $newcontainer fedora-bashecho
+ # buildah images
+
+And you can see there is a new image called `localhost/fedora-bashecho:latest`. You can inspect the new image using:
+
+ # buildah inspect --type=image fedora-bashecho
+
+Later when you want to create a new container or containers from this image, you simply need to do `buildah from fedora-bashecho`. This will create a new container based on this image for you.
+
+Now that you have the new image you can remove the scratch container called working-container:
+
+ # buildah rm $newcontainer
+
+or
+
+ # buildah rm working-container
+
+## OCI images built using Buildah are portable
+
+Let's test if this new OCI image is really portable to another container engine like Docker. First you should install Docker and start it. Notice that Docker requires a running daemon process in order to run any client commands. Buildah and Podman have no daemon requirement.
+
+ # dnf -y install docker
+ # systemctl start docker
+
+Let's copy that image from where containers/storage stores it to where the Docker daemon stores its images, so that we can run it using Docker. We can achieve this using `buildah push`. This copies the image to Docker's storage area which is located under `/var/lib/docker`. Docker's storage is managed by the Docker daemon. This needs to be explicitly stated by telling Buildah to push the image to the Docker daemon using `docker-daemon:`.
+
+ # buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
+
+Under the covers, the containers/image library calls into the containers/storage library to read the image's contents from where buildah keeps them, and sends them to the local Docker daemon, which writes them to where it keeps them. This can take a little while. And usually you won't need to do this. If you're using `buildah` you are probably not using Docker. This is just for demo purposes. Let's try it:
+
+ # docker run --rm fedora-bashecho
+ This is a new container from ipbabble [ 0 ]
+ This is a new container from ipbabble [ 1 ]
+ This is a new container from ipbabble [ 2 ]
+ This is a new container from ipbabble [ 3 ]
+ This is a new container from ipbabble [ 4 ]
+ This is a new container from ipbabble [ 5 ]
+ This is a new container from ipbabble [ 6 ]
+ This is a new container from ipbabble [ 7 ]
+ This is a new container from ipbabble [ 8 ]
+ This is a new container from ipbabble [ 9 ]
+
+OCI container images built with `buildah` are completely standard as expected. So now it might be time to run:
+
+ # dnf -y remove docker
+
+## Using Containerfiles/Dockerfiles with Buildah
+
+What if you have been using Docker for a while and have some existing Dockerfiles? Not a problem. Buildah can build images using a Dockerfile. The `build` command takes a Dockerfile as input and produces an OCI image.
+
+Find one of your Dockerfiles or create a file called Dockerfile. Use the following example or some variation if you'd like:
+
+ # Base on the most recently released Fedora
+ FROM fedora:latest
+ MAINTAINER ipbabble email buildahboy@redhat.com # not a real email
+
+ # Install updates and httpd
+ RUN echo "Updating all fedora packages"; dnf -y update; dnf -y clean all
+ RUN echo "Installing httpd"; dnf -y install httpd && dnf -y clean all
+
+ # Expose the default httpd port 80
+ EXPOSE 80
+
+ # Run the httpd
+ CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
+
+Now run `buildah build` with the name of the Dockerfile and the name to be given to the created image (e.g. fedora-httpd):
+
+ # buildah build -f Dockerfile -t fedora-httpd .
+
+or, because `buildah build` defaults to `Dockerfile` and using the current directory as the build context:
+
+ # buildah build -t fedora-httpd
+
+You will see all the steps of the Dockerfile executing. Afterwards `buildah images` will show you the new image. Now we can create a container from the image and test it with `podman run`:
+
+ # podman run --rm -p 8123:80 fedora-httpd
+
+While that container is running, in another shell run:
+
+ # curl localhost:8123
+
+You will see the standard Apache webpage.
+
+Why not try and modify the Dockerfile. Do not install httpd, but instead ADD the runecho.sh file and have it run as the CMD.
+
+## Congratulations
+
+Well done. You have learned a lot about Buildah using this short tutorial. Hopefully you followed along with the examples and found them to be sufficient. Be sure to look at Buildah's man pages to see the other useful commands you can use. Have fun playing.
+
+If you have any suggestions or issues please post them at the [Buildah Issues page](https://github.com/containers/buildah/issues).
+
+For more information on Buildah and how you might contribute please visit the [Buildah home page on GitHub](https://github.com/containers/buildah).
diff --git a/docs/tutorials/02-registries-repositories.md b/docs/tutorials/02-registries-repositories.md
new file mode 100644
index 0000000..07aa570
--- /dev/null
+++ b/docs/tutorials/02-registries-repositories.md
@@ -0,0 +1,134 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Buildah Tutorial 2
+## Using Buildah with container registries
+
+The purpose of this tutorial is to demonstrate how Buildah can be used to move OCI compliant images in and out of private or public registries.
+
+In the [first tutorial](https://github.com/containers/buildah/blob/main/docs/tutorials/01-intro.md) we built an image from scratch that we called `fedora-bashecho` and we pushed it to a local Docker daemon using the `docker-daemon` protocol. We are going to push the same image to a private container registry.
+
+First we must pull down a registry. As a shortcut we will save the container name that is returned from the `buildah from` command, into a bash variable called `registry`. This is just like we did in Tutorial 1:
+
+ # registryctr=$(buildah from registry)
+
+It is worth pointing out that the `from` command can also use other protocols beyond the default (and implicitly assumed) order that first looks in local containers-storage (containers-storage:) and then looks in a container registry (by default, Docker Hub) (docker:). For example, if you already had a registry container image downloaded by a local Docker daemon then you could use the following:
+
+ # registryctr=$(buildah from docker-daemon:registry:latest)
+
+Then we need to start the registry. You should start the registry in a separate shell and leave it running there:
+
+ # buildah run --net=host $registryctr /entrypoint.sh /etc/docker/registry/config.yml
+
+If you would like to see more details as to what is going on inside the registry, especially if you are having problems with the registry, you can run the registry container in debug mode as follows:
+
+ # buildah --log-level=debug run --net=host $registryctr /entrypoint.sh /etc/docker/registry/config.yml
+
+You can use `--log-level=debug` on any Buildah command.
+
+The registry is running and is waiting for requests to process. Notice that this registry is a Docker registry that we pulled from Docker Hub and we are running it for this example using `buildah run`. There is no Docker daemon running at this time.
+
+Let's push our image to the private registry. By default, Buildah is set up to only make secure connections to a registry. Therefore we will need to turn the TLS verification off using the `--tls-verify` flag. We also need to tell Buildah that the registry is on this local host ( i.e. localhost) and listening on port 5000. Similar to what you'd expect to do on multi-tenant Docker Hub, we will explicitly specify that the registry is to store the image under the `ipbabble` repository - so as not to clash with other users' similarly named images.
+
+ # buildah push --tls-verify=false fedora-bashecho docker://localhost:5000/ipbabble/fedora-bashecho:latest
+
+[Skopeo](https://github.com/containers/skopeo) is a containers tool that was created to inspect images in registries without having to pull the image from the registry. It has grown to have many other uses. We will verify that the image has been stored by using Skopeo to inspect the image in the registry:
+
+ # skopeo inspect --tls-verify=false docker://localhost:5000/ipbabble/fedora-bashecho:latest
+ {
+ "Name": "localhost:5000/ipbabble/fedora-bashecho",
+ "Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
+ "RepoTags": [
+ "latest"
+ ],
+ "Created": "2017-12-05T21:38:12.311901938Z",
+ "DockerVersion": "",
+ "Labels": {
+ "name": "fedora-bashecho"
+ },
+ "Architecture": "amd64",
+ "Os": "linux",
+ "Layers": [
+ "sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
+ ]
+ }
+
+We can verify that it is still portable to Docker by starting Docker again, as we did in the first tutorial. Then we can pull down the image and start the container using Docker:
+
+ # systemctl start docker
+ # docker pull localhost:5000/ipbabble/fedora-bashecho
+ Using default tag: latest
+ Trying to pull repository localhost:5000/ipbabble/fedora-bashecho ...
+ sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137: Pulling from localhost:5000/ipbabble/fedora-bashecho
+ 0cb7556c7147: Pull complete
+ Digest: sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137
+ Status: Downloaded newer image for localhost:5000/ipbabble/fedora-bashecho:latest
+
+ # docker run --rm localhost:5000/ipbabble/fedora-bashecho
+ This is a new container named ipbabble [ 0 ]
+ This is a new container named ipbabble [ 1 ]
+ This is a new container named ipbabble [ 2 ]
+ This is a new container named ipbabble [ 3 ]
+ This is a new container named ipbabble [ 4 ]
+ This is a new container named ipbabble [ 5 ]
+ This is a new container named ipbabble [ 6 ]
+ This is a new container named ipbabble [ 7 ]
+ This is a new container named ipbabble [ 8 ]
+ This is a new container named ipbabble [ 9 ]
+ # systemctl stop docker
+
+Pushing to Docker Hub is just as easy. Of course you must have an account with credentials. In this example I'm using a Docker Hub API key, which has the form "username:password" (example password has been edited for privacy), that I created with my Docker Hub account. I use the `--creds` flag to use my API key. I also specify my local image name `fedora-bashecho` as my image source and I use the `docker` protocol with no registry name or port so that it will look at the default port on the default Docker Hub registry:
+
+ # buildah push --creds=ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c fedora-bashecho docker://ipbabble/fedora-bashecho:latest
+
+And let's inspect that with Skopeo:
+
+ # skopeo inspect --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c docker://ipbabble/fedora-bashecho:latest
+ {
+ "Name": "docker.io/ipbabble/fedora-bashecho",
+ "Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
+ "RepoTags": [
+ "latest"
+ ],
+ "Created": "2017-12-05T21:38:12.311901938Z",
+ "DockerVersion": "",
+ "Labels": {
+ "name": "fedora-bashecho"
+ },
+ "Architecture": "amd64",
+ "Os": "linux",
+ "Layers": [
+ "sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
+ ]
+ }
+
+We can use Buildah to pull down the image using the `buildah from` command. But before we do let's clean up our local containers-storage so that we don't already have a copy of the fedora-bashecho image - otherwise Buildah will know it already exists and not bother pulling it down.
+
+ # buildah images
+ IMAGE ID IMAGE NAME CREATED AT SIZE
+ d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
+ e31b0f0b0a63 docker.io/library/fedora-bashecho:latest Dec 5, 2017 21:38 772 B
+ # buildah rmi fedora-bashecho
+ untagged: docker.io/library/fedora-bashecho:latest
+ e31b0f0b0a63e94c5a558d438d7490fab930a282a4736364360ab9b92cb25f3a
+ # buildah images
+ IMAGE ID IMAGE NAME CREATED AT SIZE
+ d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
+
+Okay, so we don't have a fedora-bashecho image anymore. Let's pull the image from Docker Hub:
+
+ # buildah from ipbabble/fedora-bashecho
+
+If you don't want to bother doing the remove image step (`rmi`) you can use the flag `--pull-always` to force the image to be pulled again and overwrite any corresponding local image.
+
+Now check that image is in the local containers-storage:
+
+ # buildah images
+ IMAGE ID IMAGE NAME CREATED AT SIZE
+ d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
+ 864871ac1c45 docker.io/ipbabble/fedora-bashecho:latest Dec 5, 2017 21:38 315.4 MB
+
+Success!
+
+If you have any suggestions or issues please post them at the [Buildah Issues page](https://github.com/containers/buildah/issues).
+
+For more information on Buildah and how you might contribute please visit the [Buildah home page on Github](https://github.com/containers/buildah).
diff --git a/docs/tutorials/03-on-build.md b/docs/tutorials/03-on-build.md
new file mode 100644
index 0000000..5d50266
--- /dev/null
+++ b/docs/tutorials/03-on-build.md
@@ -0,0 +1,193 @@
+![buildah logo](../../logos/buildah-logo_large.png)
+
+# Buildah Tutorial 3
+## Using ONBUILD in Buildah
+
+The purpose of this tutorial is to demonstrate how Buildah can use a Dockerfile with the ONBUILD instruction within it or how the ONBUILD instruction can be used with the `buildah config` command. The ONBUILD instruction stores a command in the meta data of a container image which is then invoked when the image is used as a base image. The image can have multiple ONBUILD instructions. Note: The ONBUILD instructions do not change the content of the image that contain the instructions, only the container images that are created from this image are changed based on the FROM command.
+
+Container images that are compliant with the [Open Container Initiative][] (OCI) [image specification][] do not support the ONBUILD instruction. Images that are created by Buildah are in the OCI format by default. Only container images that are created by Buildah in the Docker format can use the ONBUILD instruction. The OCI format can be overridden in Buildah by specifying the Docker format with the `--format=docker` option or by setting the BUILDAH_FORMAT environment variable to 'docker'. Regardless of the format selected, Buildah is capable of working seamlessly with either OCI or Docker compliant images and containers.
+
+On to the tutorial. The first step is to install Buildah. In short, the `buildah run` command emulates the RUN command that is found in a Dockerfile while the `podman run` command emulates the `docker run` command. For the purpose of this tutorial Buildah's run command will be used. As an aside, Podman is aimed at managing containers, images, and pods while Buildah focuses on the building of container images. For more info on Podman, please go to [Podman's site][].
+
+## Setup
+
+The following assumes installation on Fedora.
+
+Run as root because you will need to be root for installing the Buildah package:
+
+ $ sudo -s
+
+Then install Buildah by running:
+
+ # dnf -y install buildah
+
+After installing Buildah check to see that there are no images installed. The `buildah images` command will list all the images:
+
+ # buildah images
+
+We can also see that there are also no containers by running:
+
+ # buildah containers
+
+## Examples
+
+The two examples that will be shown are relatively simple, but they illustrate how a command or a number of commands can be setup in a primary image such that they will be added to a secondary container image that is created from it. This is extremely useful if you need to setup an environment where your containers have 75% of the same content, but need a few individual tweaks. This can be helpful in setting up an environment for maven or java development containers for instance. In this way you can create a single Dockerfile with all the common setup steps as ONBUILD commands and then really minimize the buildah commands or instructions in a second Dockerfile that would be necessary to complete the creation of the container image.
+
+NOTE: In the examples below the option `--format=docker` is used in several places. If you wanted to omit that, you could define the `BUILDAH_FORMAT` environment variable and set it to 'docker'. On Fedora that command would be `export BUILDAH_FORMAT=docker`.
+
+## ONBUILD in a Dockerfile - Example 1
+
+The first example was provided by Chris Collins (GitHub @clcollins), the idea is a file `/bar` will be created in the derived container images only, and not in our original image.
+
+First create two Dockerfiles:
+
+```
+$ cat << EOF > Dockerfile
+FROM fedora:latest
+RUN touch /foo
+ONBUILD RUN touch /bar
+EOF
+
+$ cat << EOF > Dockerfile-2
+FROM onbuild-image
+RUN touch /baz
+EOF
+```
+
+Now to create the first container image and verify that ONBUILD has been set:
+
+```
+# buildah build --format=docker -f Dockerfile -t onbuild-image .
+# buildah inspect --format '{{.Docker.Config.OnBuild}}' onbuild-image
+[RUN touch /bar]
+```
+
+The second container image is now created and the `/bar` file will be created within it:
+
+```
+# buildah build --format=docker -f Dockerfile-2 -t result-image .
+STEP 1: FROM onbuild-image
+STEP 2: RUN touch /bar # Note /bar is created here based on the ONBUILD in the base image
+STEP 3: RUN touch /baz
+COMMIT result-image
+{output edited for brevity}
+$ container=$(sudo buildah from result-image:latest)
+# buildah run $container ls /bar /foo /baz
+/bar /baz /foo
+```
+
+## ONBUILD via `buildah config` - Example 1
+
+Instead of using a Dockerfile to create the onbuild-image, Buildah allows you to build an image and configure it directly with the same commands that can be found in a Dockerfile. This allows for easy on the fly manipulation of your image. Let's look at the previous example without the use of a Dockerfile when building the primary container image.
+
+First a Fedora container will be created with `buildah from`, then the `/foo` file will be added with `buildah run`. The `buildah config` command will configure ONBUILD to add `/bar` when a container image is created from the primary image, and finally the image will be saved with `buildah commit`.
+
+```
+# buildah from --format=docker --name onbuild-container fedora:latest
+# buildah run onbuild-container touch /foo
+# buildah config --onbuild="RUN touch /bar" onbuild-container
+# buildah commit --format=docker onbuild-container onbuild-image
+{output edited for brevity}
+# buildah inspect --format '{{.Docker.Config.OnBuild}}' onbuild-image
+[RUN touch /bar]
+```
+The onbuild-image has been created, so now create a container from it using the same commands as the first example using the second Dockerfile:
+
+```
+# buildah build --format=docker -f Dockerfile-2 -t result-image .
+STEP 1: FROM onbuild-image
+STEP 2: RUN touch /bar # Note /bar is created here based on the ONBUILD in the base image
+STEP 3: RUN touch /baz
+COMMIT result-image
+{output edited for brevity}
+$ container=$(buildah from result-image)
+# buildah run $container ls /bar /foo /baz
+/bar /baz /foo
+```
+Or for bonus points, piece the secondary container image together with Buildah commands directly:
+
+```
+# buildah from --format=docker --name result-container onbuild-image
+result-container
+# buildah run result-container touch /baz
+# buildah run result-container ls /bar /foo /baz
+/bar /baz /foo
+```
+
+## ONBUILD via `buildah config` - Example 2
+
+For this example the ONBUILD instructions in the primary container image will be used to copy a shell script and then run it in the secondary container image. For the script, we'll make use of the shell script from the [Introduction Tutorial](01-intro.md). First create a file in the local directory called `runecho.sh` containing the following:
+
+```
+#!/usr/bin/env bash
+
+for i in `seq 0 9`;
+do
+ echo "This is a new container from ipbabble [" $i "]"
+done
+```
+Change the permissions on the file so that it can be run:
+
+```
+$ chmod +x runecho.sh
+```
+
+Now create a second primary container image. This image has multiple ONBUILD instructions, the first ONBUILD instruction copies the file into the image and a second ONBUILD instruction to then run it. We're going to do this example using only Buildah commands. A Dockerfile could be translated easily and used from these commands, or these commands could be saved to a script directly.
+
+```
+# buildah from --format=docker --name onbuild-container-2 fedora:latest
+onbuild-container-2
+# buildah config --onbuild="COPY ./runecho.sh /usr/bin/runecho.sh" onbuild-container-2
+# buildah config --onbuild="RUN /usr/bin/runecho.sh" onbuild-container-2
+# buildah commit --format=docker onbuild-container-2 onbuild-image-2
+{output edited for brevity}
+# buildah inspect --format '{{.Docker.Config.OnBuild}}' onbuild-image-2
+[COPY ./runecho.sh /usr/bin/runecho.sh RUN /usr/bin/runecho.sh]
+```
+
+Now the secondary container can be created from the second primary container image onbuild-image-2. The runecho.sh script will be copied to the container's /usr/bin directory and then run from there when the secondary container is created.
+
+```
+# buildah from --format=docker --name result-container-2 onbuild-image-2
+STEP 1: COPY ./runecho.sh /usr/bin/runecho.sh
+STEP 2: RUN /usr/bin/runecho.sh
+This is a new container pull ipbabble [ 1 ]
+This is a new container pull ipbabble [ 2 ]
+This is a new container pull ipbabble [ 3 ]
+This is a new container pull ipbabble [ 4 ]
+This is a new container pull ipbabble [ 5 ]
+This is a new container pull ipbabble [ 6 ]
+This is a new container pull ipbabble [ 7 ]
+This is a new container pull ipbabble [ 8 ]
+This is a new container pull ipbabble [ 9 ]
+result-container-2
+```
+As result-container-2 has a copy of the script stored in its /usr/bin it can be run at anytime.
+
+```
+# buildah run result-container-2 /usr/bin/runecho.sh
+This is a new container pull ipbabble [ 1 ]
+This is a new container pull ipbabble [ 2 ]
+This is a new container pull ipbabble [ 3 ]
+This is a new container pull ipbabble [ 4 ]
+This is a new container pull ipbabble [ 5 ]
+This is a new container pull ipbabble [ 6 ]
+This is a new container pull ipbabble [ 7 ]
+This is a new container pull ipbabble [ 8 ]
+This is a new container pull ipbabble [ 9 ]
+
+```
+Again these aren't the most extensive examples, but they both illustrate how a primary image can be setup and then a secondary container image can then be created with just a few steps. This way the steps that are set up with the ONBUILD instructions don't have to be typed in each and every time that you need to setup your container.
+
+## Congratulations
+
+Well done. You have learned about Buildah's ONBUILD functionality using this short tutorial. Hopefully you followed along with the examples and found them to be sufficient. Be sure to look at Buildah's man pages to see the other useful commands you can use. Have fun playing.
+
+If you have any suggestions or issues please post them at the [Buildah Issues page](https://github.com/containers/buildah/issues).
+
+For more information on Buildah and how you might contribute please visit the [Buildah home page on GitHub](https://github.com/containers/buildah).
+
+[Podman's site]: https://podman.io/
+[image specification]: https://github.com/opencontainers/runtime-spec
+[Introduction Tutorial]: 01-intro.md
+[Open Container Initiative]: https://www.opencontainers.org/
diff --git a/docs/tutorials/04-include-in-your-build-tool.md b/docs/tutorials/04-include-in-your-build-tool.md
new file mode 100644
index 0000000..fb930b9
--- /dev/null
+++ b/docs/tutorials/04-include-in-your-build-tool.md
@@ -0,0 +1,203 @@
+![buildah logo](../../logos/buildah-logo_large.png)
+
+# Buildah Tutorial 4
+
+## Include Buildah in your build tool
+
+The purpose of this tutorial is to demonstrate how to include Buildah as a library in your build tool.
+
+You can take advantage of all features provided by Buildah, like using Dockerfiles and building using rootless mode.
+
+In this tutorial I'll show you how to create a simple CLI tool that creates an image containing NodeJS and a JS main file.
+
+## Bootstrap the project and install the dependencies
+
+Bootstrap the installation of development dependencies of Buildah by following the [Building from scratch](https://github.com/slinkydeveloper/buildah/blob/main/install.md#building-from-scratch) instructions and in particular creating a directory for the Buildah project by completing the instructions in the [Installation from GitHub](https://github.com/containers/buildah/blob/main/install.md#installation-from-github) section of that page.
+
+Now let's bootstrap our project. Assuming you are in the directory of the project, run the following to initialize the go modules:
+
+```shell
+go mod init
+```
+
+Next, we should import Buildah as a dependency. However, make sure that you have the following
+developer packages installed:
+
+```shell
+dnf install btrfs-progs-devel gpgme-devel
+```
+
+Depending on your Linux distribution, the names of the packages can be slightly different. For instance, on
+OpenSUSE it would be
+
+```shell
+zypper in libbtrfs-devel libgpgme-devel
+```
+
+On Debian and Ubuntu, it would be
+
+```shell
+apt install libbtrfs-dev libgpgme-dev
+```
+
+Now import Buildah as a dependency:
+
+```shell
+go get github.com/containers/buildah
+```
+
+## Build the image
+
+Now you can develop your application. To access to the build features of Buildah, you need to instantiate `buildah.Builder`. This struct has methods to configure the build, define the build steps and run it.
+
+To instantiate a `Builder`, you need a `storage.Store` (the Store interface found in [store.go](https://github.com/containers/storage/blob/main/store.go)) from [`github.com/containers/storage`](https://github.com/containers/storage), where the intermediate and result images will be stored:
+
+```go
+buildStoreOptions, err := storage.DefaultStoreOptionsAutoDetectUID()
+buildStore, err := storage.GetStore(buildStoreOptions)
+```
+
+Define the builder options:
+
+```go
+builderOpts := buildah.BuilderOptions{
+ FromImage: "node:12-alpine", // base image
+}
+```
+
+Now instantiate the `Builder`:
+
+```go
+builder, err := buildah.NewBuilder(context.TODO(), buildStore, builderOpts)
+```
+
+Let's add our JS file (assuming is in your local directory with name `script.js`):
+
+```go
+err = builder.Add("/home/node/", false, buildah.AddAndCopyOptions{}, "script.js")
+```
+
+And configure the command to run:
+
+```go
+builder.SetCmd([]string{"node", "/home/node/script.js"})
+```
+
+Before completing the build, create the image reference:
+
+```go
+imageRef, err := is.Transport.ParseStoreReference(buildStore, "docker.io/myusername/my-image")
+```
+
+Now you can run commit the build:
+
+```go
+imageId, _, _, err := builder.Commit(context.TODO(), imageRef, buildah.CommitOptions{})
+```
+
+## Supplying defaults for Run()
+
+If you need to run a command as part of the build, you'll have to dig up a couple of defaults that aren't picked up automatically:
+```go
+conf, err := config.Default()
+capabilitiesForRoot, err := conf.Capabilities("root", nil, nil)
+isolation, err := parse.IsolationOption("")
+```
+
+## Rootless mode
+
+To enable rootless mode, import `github.com/containers/storage/pkg/unshare` and add this code at the beginning of your main method:
+
+```go
+if buildah.InitReexec() {
+ return
+}
+unshare.MaybeReexecUsingUserNamespace(false)
+```
+
+This code ensures that your application is re-executed in a user namespace where it has root privileges.
+
+## Complete code
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/common/pkg/config"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+)
+
+func main() {
+ if buildah.InitReexec() {
+ return
+ }
+ unshare.MaybeReexecUsingUserNamespace(false)
+
+ buildStoreOptions, err := storage.DefaultStoreOptionsAutoDetectUID()
+ if err != nil {
+ panic(err)
+ }
+
+ conf, err := config.Default()
+ if err != nil {
+ panic(err)
+ }
+ capabilitiesForRoot, err := conf.Capabilities("root", nil, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ buildStore, err := storage.GetStore(buildStoreOptions)
+ if err != nil {
+ panic(err)
+ }
+ defer buildStore.Shutdown(false)
+
+ builderOpts := buildah.BuilderOptions{
+ FromImage: "node:12-alpine",
+ Capabilities: capabilitiesForRoot,
+ }
+
+ builder, err := buildah.NewBuilder(context.TODO(), buildStore, builderOpts)
+ if err != nil {
+ panic(err)
+ }
+ defer builder.Delete()
+
+ err = builder.Add("/home/node/", false, buildah.AddAndCopyOptions{}, "script.js")
+ if err != nil {
+ panic(err)
+ }
+
+ isolation, err := parse.IsolationOption("")
+ if err != nil {
+ panic(err)
+ }
+
+ err = builder.Run([]string{"sh", "-c", "date > /home/node/build-date.txt"}, buildah.RunOptions{Isolation: isolation, Terminal: buildah.WithoutTerminal})
+ if err != nil {
+ panic(err)
+ }
+
+ builder.SetCmd([]string{"node", "/home/node/script.js"})
+
+ imageRef, err := is.Transport.ParseStoreReference(buildStore, "docker.io/myusername/my-image")
+ if err != nil {
+ panic(err)
+ }
+
+ imageId, _, _, err := builder.Commit(context.TODO(), imageRef, buildah.CommitOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Image built! %s\n", imageId)
+}
+```
diff --git a/docs/tutorials/05-openshift-rootless-build.md b/docs/tutorials/05-openshift-rootless-build.md
new file mode 100644
index 0000000..2b8142e
--- /dev/null
+++ b/docs/tutorials/05-openshift-rootless-build.md
@@ -0,0 +1,578 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Buildah Tutorial 5
+## Using Buildah to build images in a rootless OpenShift container
+
+This tutorial will walk you through setting up a container in OpenShift for building images.
+
+The instructions have been tested on OpenShift 4.9.5 with Buildah 1.23.1.
+
+Note that the VFS is used for storage instead of the more performant fuse-overlayfs or overlayfs. But the the latter do not work at the moment.
+
+### Prepare a new namespace
+
+Create a new project in OpenShift called `image-build`.
+
+Make the registry URL available to the following steps.
+
+*Note that you need to change this so it matches your OpenShift installation.*
+
+````console
+$ export REGISTRY_URL=default-route-openshift-image-registry.apps.whatever.com
+````
+
+Login to OpenShift and its registry:
+
+````console
+$ oc login -n image-build
+Username: ...
+Password: ...
+Login successful.
+
+You have access to N projects, the list has been suppressed. You can list all projects with 'oc projects'
+
+Using project "image-build".
+
+$ oc whoami -t | buildah login -u $(id -u -n) --password-stdin $REGISTRY_URL
+Login Succeeded!
+````
+
+
+### Make builder image
+
+This is the image that will host the building. It uses the Buildah stable official image, which is based on Fedora 35.
+
+The image starts a python web server. This allows us to interact with the container via the OpenShift console terminal, demonstrating that building an image works.
+
+First create an ImageStream to hold the image:
+
+````console
+$ oc create -f - <<EOF
+apiVersion: image.openshift.io/v1
+kind: ImageStream
+metadata:
+ name: buildah
+EOF
+
+imagestream.image.openshift.io/buildah created
+````
+
+Then create the image.
+
+Note that no packages are updated - this should ensure that this tutorial is actually working.
+If you are making anything for use in the real world, make sure to update it frequently for security fixes!
+
+````console
+$ cat > Containerfile-buildah <<EOF
+FROM quay.io/buildah/stable:v1.23.1
+
+RUN touch /etc/subgid /etc/subuid \
+ && chmod g=u /etc/subgid /etc/subuid /etc/passwd \
+ && echo build:10000:65536 > /etc/subuid \
+ && echo build:10000:65536 > /etc/subgid
+
+# Use chroot since the default runc does not work when running rootless
+RUN echo "export BUILDAH_ISOLATION=chroot" >> /home/build/.bashrc
+
+# Use VFS since fuse does not work
+RUN mkdir -p /home/build/.config/containers \
+ && (echo '[storage]';echo 'driver = "vfs"') > /home/build/.config/containers/storage.conf
+
+USER build
+WORKDIR /home/build
+
+# Just keep the container running, allowing "oc rsh" access
+CMD ["python3", "-m", "http.server"]
+EOF
+
+$ buildah build -t $REGISTRY_URL/image-build/buildah -f Containerfile-buildah
+STEP 1: FROM quay.io/buildah/stable:v1.23.1
+STEP 2: RUN touch /etc/subgid /etc/subuid && chmod g=u /etc/subgid /etc/subuid /etc/passwd && echo build:10000:65536 > /etc/subuid && echo build:10000:65536 > /etc/subgid
+--> a25dbbd3824
+STEP 3: CMD ["python3", "-m", "http.server"]
+STEP 4: COMMIT default-route-openshift-image-registry.../image-build/buildah
+--> 9656f2677e3
+9656f2677e3e760e071c93ca7cba116871f5549b28ad8595e9134679db2345fc
+
+$ buildah push $REGISTRY_URL/image-build/buildah
+Getting image source signatures
+...
+Storing signatures
+````
+
+
+### Create Service Account for building images
+
+Create a service account which is solely used for image building.
+
+````console
+$ oc create -f - <<EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: buildah-sa
+EOF
+
+serviceaccount/buildah-sa created
+````
+
+You need to assign it the ability to run as the standard `anyuid` [SCC](https://docs.openshift.com/container-platform/4.3/authentication/managing-security-context-constraints.html).
+
+````console
+$ oc adm policy add-scc-to-user anyuid -z buildah-sa
+clusterrole.rbac.authorization.k8s.io/system:openshift:scc:anyuid added: "buildah-sa"
+````
+
+This will give the container *cap_kill*, *cap_setgid*, and *cap_setuid* capabilities which are extras compared to the `restricted` SCC.
+Note that *cap_kill* is dropped by the DeploymentConfig, but the two others are required to execute commands with different user ids as an image is built.
+
+
+With this in place, when you get the Pod running (in a little while!), its YAML state will contain:
+
+````
+kind: Pod
+metadata:
+ ...
+ openshift.io/scc: anyuid
+...
+````
+
+Which tells you that the Pod has been launched with the correct permissions.
+
+
+#### Create DeploymentConfig
+
+This is a simple DC just to get the container running.
+
+Note that it drops CAP_KILL which is not required.
+
+````console
+$ oc create -f - <<EOF
+apiVersion: apps.openshift.io/v1
+kind: DeploymentConfig
+metadata:
+ name: buildah
+spec:
+ selector:
+ app: image-builder
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: image-builder
+ spec:
+ serviceAccount: buildah-sa
+ containers:
+ - name: buildah
+ image: image-registry.openshift-image-registry.svc:5000/image-build/buildah
+ securityContext:
+ capabilities:
+ drop:
+ - KILL
+EOF
+
+deploymentconfig.apps.openshift.io/buildah created
+````
+
+#### The Buildah container
+
+In the OpenShift console you can now open the Pod's Terminal and try building an image.
+
+This is what the user/platform should look like:
+
+````console
+sh-5.0$ id
+uid=1000(build) gid=1000(build) groups=1000(build)
+
+sh-5.0$ uname -a
+Linux buildah-1-8t74l 4.18.0-147.13.2.el8_1.x86_64 #1 SMP Wed May 13 15:19:35 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
+
+sh-5.0$ capsh --print
+Current: = cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_setgid,cap_setuid,cap_setpcap,cap_net_bind_service=i
+Bounding set =cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_setgid,cap_setuid,cap_setpcap,cap_net_bind_service
+Ambient set =
+Current IAB: cap_chown,cap_dac_override,!cap_dac_read_search,cap_fowner,cap_fsetid,!cap_kill,cap_setgid,cap_setuid,cap_setpcap,!cap_linux_immutable,cap_net_bind_service,!cap_net_broadcast,!cap_net_admin,!cap_net_raw,!cap_ipc_lock,!cap_ipc_owner,!cap_sys_module,!cap_sys_rawio,!cap_sys_chroot,!cap_sys_ptrace,!cap_sys_pacct,!cap_sys_admin,!cap_sys_boot,!cap_sys_nice,!cap_sys_resource,!cap_sys_time,!cap_sys_tty_config,!cap_mknod,!cap_lease,!cap_audit_write,!cap_audit_control,!cap_setfcap,!cap_mac_override,!cap_mac_admin,!cap_syslog,!cap_wake_alarm,!cap_block_suspend,!cap_audit_read,!cap_perfmon,!cap_bpf
+Securebits: 00/0x0/1'b0 (no-new-privs=0)
+ secure-noroot: no (unlocked)
+ secure-no-suid-fixup: no (unlocked)
+ secure-keep-caps: no (unlocked)
+ secure-no-ambient-raise: no (unlocked)
+uid=1000(build)
+gid=1000(build)
+groups=
+Guessed mode: UNCERTAIN (0)
+````
+
+This is what the Buildah data should look like:
+
+````console
+sh-5.0$ buildah version
+Version: 1.23.1
+Go Version: go1.16.8
+Image Spec: 1.0.1-dev
+Runtime Spec: 1.0.2-dev
+CNI Spec: 0.4.0
+libcni Version: v0.8.1
+image Version: 5.16.0
+Git Commit:
+Built: Tue Sep 28 18:26:37 2021
+OS/Arch: linux/amd64
+BuildPlatform: linux/amd64
+
+sh-5.0$ buildah info
+{
+ "host": {
+ "CgroupVersion": "v1",
+ "Distribution": {
+ "distribution": "fedora",
+ "version": "35"
+ },
+ "MemTotal": 33726861312,
+ "MenFree": 20319305728,
+ "OCIRuntime": "crun",
+ "SwapFree": 0,
+ "SwapTotal": 0,
+ "arch": "amd64",
+ "cpus": 4,
+ "hostname": "buildah-1-6hvsw",
+ "kernel": "4.18.0-305.19.1.el8_4.x86_64",
+ "os": "linux",
+ "rootless": true,
+ "uptime": "61h 10m 39.3s (Approximately 2.54 days)"
+ },
+ "store": {
+ "ContainerStore": {
+ "number": 0
+ },
+ "GraphDriverName": "vfs",
+ "GraphOptions": null,
+ "GraphRoot": "/home/build/.local/share/containers/storage",
+ "GraphStatus": {},
+ "ImageStore": {
+ "number": 0
+ },
+ "RunRoot": "/var/tmp/containers-user-1000/containers"
+ }
+}
+
+````
+
+#### Building an image
+
+Now create some files for testing.
+
+This container test file exercises at least some of the critical parts of building an image (package update/installation, execution of commands, and use of volumes).
+
+````console
+sh-5.0$ cat > test-script.sh <<EOF
+#/bin/bash
+echo "Args \$*"
+ls -l /
+EOF
+
+sh-5.0$ chmod +x test-script.sh
+
+sh-5.0$ cat > Containerfile.test <<EOF
+FROM fedora:35
+RUN ls -l /test-script.sh
+RUN /test-script.sh "Hello world"
+RUN dnf update -y | tee /output/update-output.txt
+RUN dnf install -y gcc
+EOF
+
+sh-5.0$ mkdir output
+````
+
+And finally build the image, testing that everything works as expected:
+
+````console
+sh-5.0$ buildah -v /home/build/output:/output:rw -v /home/build/test-script.sh:/test-script.sh:ro build-using-dockerfile -t myimage -f Containerfile.test
+FROM fedora:35
+RUN ls -l /test-script.sh
+RUN /test-script.sh "Hello world"
+RUN dnf update -y | tee /output/update-output.txt
+RUN dnf install -y gcc
+EOF
+sh-5.1$ mkdir output
+sh-5.1$ buildah -v /home/build/output:/output:rw -v /home/build/test-script.sh:/test-script.sh:ro build-using-dockerfile -t myimage -f Containerfile.test
+STEP 1/5: FROM fedora:35
+Resolved "fedora" as an alias (/etc/containers/registries.conf.d/000-shortnames.conf)
+Trying to pull registry.fedoraproject.org/fedora:35...
+Getting image source signatures
+Copying blob 791199e77b3d done
+Copying config 1b52edb081 done
+Writing manifest to image destination
+Storing signatures
+STEP 2/5: RUN ls -l /test-script.sh
+-rwxr-xr-x. 1 root root 34 Nov 12 21:20 /test-script.sh
+STEP 3/5: RUN /test-script.sh "Hello world"
+Args Hello world
+total 8
+lrwxrwxrwx. 1 root root 7 Jul 21 23:47 bin -> usr/bin
+dr-xr-xr-x. 2 root root 6 Jul 21 23:47 boot
+drwxr-xr-x. 5 nobody nobody 360 Nov 12 21:17 dev
+drwxr-xr-x. 42 root root 4096 Nov 3 16:38 etc
+drwxr-xr-x. 2 root root 6 Jul 21 23:47 home
+lrwxrwxrwx. 1 root root 7 Jul 21 23:47 lib -> usr/lib
+lrwxrwxrwx. 1 root root 9 Jul 21 23:47 lib64 -> usr/lib64
+drwx------. 2 root root 6 Nov 3 16:37 lost+found
+drwxr-xr-x. 2 root root 6 Jul 21 23:47 media
+drwxr-xr-x. 2 root root 6 Jul 21 23:47 mnt
+drwxr-xr-x. 2 root root 6 Jul 21 23:47 opt
+drwxr-xr-x. 2 root root 6 Nov 12 21:21 output
+dr-xr-xr-x. 352 nobody nobody 0 Nov 12 21:17 proc
+dr-xr-x---. 2 root root 196 Nov 3 16:38 root
+drwxr-xr-x. 3 root root 42 Nov 12 21:21 run
+lrwxrwxrwx. 1 root root 8 Jul 21 23:47 sbin -> usr/sbin
+drwxr-xr-x. 2 root root 6 Jul 21 23:47 srv
+dr-xr-xr-x. 13 nobody nobody 0 Nov 12 20:27 sys
+-rwxr-xr-x. 1 root root 34 Nov 12 21:20 test-script.sh
+drwxrwxrwt. 2 root root 6 Nov 3 16:37 tmp
+drwxr-xr-x. 12 root root 144 Nov 3 16:38 usr
+drwxr-xr-x. 18 root root 235 Nov 3 16:38 var
+STEP 4/5: RUN dnf update -y | tee /output/update-output.txt
+Fedora 35 - x86_64 7.1 MB/s | 61 MB 00:08
+Fedora 35 openh264 (From Cisco) - x86_64 4.1 kB/s | 2.5 kB 00:00
+Fedora Modular 35 - x86_64 3.1 MB/s | 2.6 MB 00:00
+Fedora 35 - x86_64 - Updates 5.6 MB/s | 10 MB 00:01
+Fedora Modular 35 - x86_64 - Updates 763 kB/s | 712 kB 00:00
+Last metadata expiration check: 0:00:01 ago on Fri Nov 12 21:22:21 2021.
+Dependencies resolved.
+================================================================================
+ Package Arch Version Repository Size
+================================================================================
+Upgrading:
+ glib2 x86_64 2.70.1-1.fc35 updates 2.6 M
+ glibc x86_64 2.34-8.fc35 updates 2.0 M
+ glibc-common x86_64 2.34-8.fc35 updates 406 k
+ glibc-minimal-langpack x86_64 2.34-8.fc35 updates 134 k
+ gpgme x86_64 1.15.1-6.fc35 updates 206 k
+ libgpg-error x86_64 1.43-1.fc35 updates 216 k
+ python3-gpg x86_64 1.15.1-6.fc35 updates 261 k
+ shadow-utils x86_64 2:4.9-5.fc35 updates 1.1 M
+ vim-minimal x86_64 2:8.2.3582-1.fc35 updates 706 k
+Installing weak dependencies:
+ glibc-gconv-extra x86_64 2.34-8.fc35 updates 1.6 M
+
+Transaction Summary
+================================================================================
+Install 1 Package
+Upgrade 9 Packages
+
+Total download size: 9.3 M
+Downloading Packages:
+(1/10): glibc-2.34-8.fc35.x86_64.rpm 5.2 MB/s | 2.0 MB 00:00
+(2/10): glibc-gconv-extra-2.34-8.fc35.x86_64.rp 3.9 MB/s | 1.6 MB 00:00
+(3/10): glib2-2.70.1-1.fc35.x86_64.rpm 5.7 MB/s | 2.6 MB 00:00
+(4/10): glibc-minimal-langpack-2.34-8.fc35.x86_ 2.1 MB/s | 134 kB 00:00
+(5/10): glibc-common-2.34-8.fc35.x86_64.rpm 3.9 MB/s | 406 kB 00:00
+(6/10): gpgme-1.15.1-6.fc35.x86_64.rpm 4.6 MB/s | 206 kB 00:00
+(7/10): libgpg-error-1.43-1.fc35.x86_64.rpm 5.4 MB/s | 216 kB 00:00
+(8/10): python3-gpg-1.15.1-6.fc35.x86_64.rpm 5.6 MB/s | 261 kB 00:00
+(9/10): shadow-utils-4.9-5.fc35.x86_64.rpm 14 MB/s | 1.1 MB 00:00
+(10/10): vim-minimal-8.2.3582-1.fc35.x86_64.rpm 8.2 MB/s | 706 kB 00:00
+--------------------------------------------------------------------------------
+Total 9.4 MB/s | 9.3 MB 00:00
+Running transaction check
+Transaction check succeeded.
+Running transaction test
+Transaction test succeeded.
+Running transaction
+ Preparing : 1/1
+ Upgrading : glibc-common-2.34-8.fc35.x86_64 1/19
+ Upgrading : glibc-minimal-langpack-2.34-8.fc35.x86_64 2/19
+ Running scriptlet: glibc-2.34-8.fc35.x86_64 3/19
+ Upgrading : glibc-2.34-8.fc35.x86_64 3/19
+ Running scriptlet: glibc-2.34-8.fc35.x86_64 3/19
+ Installing : glibc-gconv-extra-2.34-8.fc35.x86_64 4/19
+ Running scriptlet: glibc-gconv-extra-2.34-8.fc35.x86_64 4/19
+ Upgrading : libgpg-error-1.43-1.fc35.x86_64 5/19
+ Upgrading : gpgme-1.15.1-6.fc35.x86_64 6/19
+ Upgrading : python3-gpg-1.15.1-6.fc35.x86_64 7/19
+ Upgrading : glib2-2.70.1-1.fc35.x86_64 8/19
+ Upgrading : shadow-utils-2:4.9-5.fc35.x86_64 9/19
+ Upgrading : vim-minimal-2:8.2.3582-1.fc35.x86_64 10/19
+ Cleanup : glib2-2.70.0-5.fc35.x86_64 11/19
+ Cleanup : shadow-utils-2:4.9-3.fc35.x86_64 12/19
+ Cleanup : python3-gpg-1.15.1-4.fc35.x86_64 13/19
+ Cleanup : gpgme-1.15.1-4.fc35.x86_64 14/19
+ Cleanup : vim-minimal-2:8.2.3568-1.fc35.x86_64 15/19
+ Cleanup : libgpg-error-1.42-3.fc35.x86_64 16/19
+ Cleanup : glibc-2.34-7.fc35.x86_64 17/19
+ Cleanup : glibc-minimal-langpack-2.34-7.fc35.x86_64 18/19
+ Cleanup : glibc-common-2.34-7.fc35.x86_64 19/19
+ Running scriptlet: glibc-common-2.34-7.fc35.x86_64 19/19
+ Verifying : glibc-gconv-extra-2.34-8.fc35.x86_64 1/19
+ Verifying : glib2-2.70.1-1.fc35.x86_64 2/19
+ Verifying : glib2-2.70.0-5.fc35.x86_64 3/19
+ Verifying : glibc-2.34-8.fc35.x86_64 4/19
+ Verifying : glibc-2.34-7.fc35.x86_64 5/19
+ Verifying : glibc-common-2.34-8.fc35.x86_64 6/19
+ Verifying : glibc-common-2.34-7.fc35.x86_64 7/19
+ Verifying : glibc-minimal-langpack-2.34-8.fc35.x86_64 8/19
+ Verifying : glibc-minimal-langpack-2.34-7.fc35.x86_64 9/19
+ Verifying : gpgme-1.15.1-6.fc35.x86_64 10/19
+ Verifying : gpgme-1.15.1-4.fc35.x86_64 11/19
+ Verifying : libgpg-error-1.43-1.fc35.x86_64 12/19
+ Verifying : libgpg-error-1.42-3.fc35.x86_64 13/19
+ Verifying : python3-gpg-1.15.1-6.fc35.x86_64 14/19
+ Verifying : python3-gpg-1.15.1-4.fc35.x86_64 15/19
+ Verifying : shadow-utils-2:4.9-5.fc35.x86_64 16/19
+ Verifying : shadow-utils-2:4.9-3.fc35.x86_64 17/19
+ Verifying : vim-minimal-2:8.2.3582-1.fc35.x86_64 18/19
+ Verifying : vim-minimal-2:8.2.3568-1.fc35.x86_64 19/19
+
+Upgraded:
+ glib2-2.70.1-1.fc35.x86_64
+ glibc-2.34-8.fc35.x86_64
+ glibc-common-2.34-8.fc35.x86_64
+ glibc-minimal-langpack-2.34-8.fc35.x86_64
+ gpgme-1.15.1-6.fc35.x86_64
+ libgpg-error-1.43-1.fc35.x86_64
+ python3-gpg-1.15.1-6.fc35.x86_64
+ shadow-utils-2:4.9-5.fc35.x86_64
+ vim-minimal-2:8.2.3582-1.fc35.x86_64
+Installed:
+ glibc-gconv-extra-2.34-8.fc35.x86_64
+
+Complete!
+STEP 5/5: RUN dnf install -y gcc
+Last metadata expiration check: 0:00:10 ago on Fri Nov 12 21:22:21 2021.
+Dependencies resolved.
+================================================================================
+ Package Arch Version Repository Size
+================================================================================
+Installing:
+ gcc x86_64 11.2.1-1.fc35 fedora 31 M
+Installing dependencies:
+ binutils x86_64 2.37-10.fc35 fedora 6.0 M
+ binutils-gold x86_64 2.37-10.fc35 fedora 728 k
+ cpp x86_64 11.2.1-1.fc35 fedora 10 M
+ elfutils-debuginfod-client x86_64 0.185-5.fc35 fedora 36 k
+ gc x86_64 8.0.4-6.fc35 fedora 103 k
+ glibc-devel x86_64 2.34-8.fc35 updates 146 k
+ glibc-headers-x86 noarch 2.34-8.fc35 updates 544 k
+ guile22 x86_64 2.2.7-3.fc35 fedora 6.4 M
+ kernel-headers x86_64 5.14.9-300.fc35 fedora 1.3 M
+ libmpc x86_64 1.2.1-3.fc35 fedora 62 k
+ libpkgconf x86_64 1.8.0-1.fc35 fedora 36 k
+ libtool-ltdl x86_64 2.4.6-42.fc35 fedora 36 k
+ libxcrypt-devel x86_64 4.4.26-4.fc35 fedora 29 k
+ make x86_64 1:4.3-6.fc35 fedora 533 k
+ pkgconf x86_64 1.8.0-1.fc35 fedora 41 k
+ pkgconf-m4 noarch 1.8.0-1.fc35 fedora 14 k
+ pkgconf-pkg-config x86_64 1.8.0-1.fc35 fedora 10 k
+
+Transaction Summary
+================================================================================
+Install 18 Packages
+
+Total download size: 57 M
+Installed size: 196 M
+Downloading Packages:
+(1/18): binutils-gold-2.37-10.fc35.x86_64.rpm 1.4 MB/s | 728 kB 00:00
+(2/18): elfutils-debuginfod-client-0.185-5.fc35 565 kB/s | 36 kB 00:00
+(3/18): gc-8.0.4-6.fc35.x86_64.rpm 1.4 MB/s | 103 kB 00:00
+(4/18): binutils-2.37-10.fc35.x86_64.rpm 6.1 MB/s | 6.0 MB 00:00
+(5/18): cpp-11.2.1-1.fc35.x86_64.rpm 9.2 MB/s | 10 MB 00:01
+(6/18): kernel-headers-5.14.9-300.fc35.x86_64.r 11 MB/s | 1.3 MB 00:00
+(7/18): libmpc-1.2.1-3.fc35.x86_64.rpm 785 kB/s | 62 kB 00:00
+(8/18): guile22-2.2.7-3.fc35.x86_64.rpm 16 MB/s | 6.4 MB 00:00
+(9/18): libpkgconf-1.8.0-1.fc35.x86_64.rpm 376 kB/s | 36 kB 00:00
+(10/18): libtool-ltdl-2.4.6-42.fc35.x86_64.rpm 520 kB/s | 36 kB 00:00
+(11/18): libxcrypt-devel-4.4.26-4.fc35.x86_64.r 429 kB/s | 29 kB 00:00
+(12/18): pkgconf-1.8.0-1.fc35.x86_64.rpm 471 kB/s | 41 kB 00:00
+(13/18): pkgconf-m4-1.8.0-1.fc35.noarch.rpm 148 kB/s | 14 kB 00:00
+(14/18): pkgconf-pkg-config-1.8.0-1.fc35.x86_64 143 kB/s | 10 kB 00:00
+(15/18): glibc-devel-2.34-8.fc35.x86_64.rpm 518 kB/s | 146 kB 00:00
+(16/18): gcc-11.2.1-1.fc35.x86_64.rpm 21 MB/s | 31 MB 00:01
+(17/18): make-4.3-6.fc35.x86_64.rpm 702 kB/s | 533 kB 00:00
+(18/18): glibc-headers-x86-2.34-8.fc35.noarch.r 2.0 MB/s | 544 kB 00:00
+--------------------------------------------------------------------------------
+Total 19 MB/s | 57 MB 00:02
+Running transaction check
+Transaction check succeeded.
+Running transaction test
+Transaction test succeeded.
+Running transaction
+ Preparing : 1/1
+ Installing : libmpc-1.2.1-3.fc35.x86_64 1/18
+ Installing : cpp-11.2.1-1.fc35.x86_64 2/18
+ Installing : glibc-headers-x86-2.34-8.fc35.noarch 3/18
+ Installing : pkgconf-m4-1.8.0-1.fc35.noarch 4/18
+ Installing : libtool-ltdl-2.4.6-42.fc35.x86_64 5/18
+ Installing : libpkgconf-1.8.0-1.fc35.x86_64 6/18
+ Installing : pkgconf-1.8.0-1.fc35.x86_64 7/18
+ Installing : pkgconf-pkg-config-1.8.0-1.fc35.x86_64 8/18
+ Installing : kernel-headers-5.14.9-300.fc35.x86_64 9/18
+ Installing : libxcrypt-devel-4.4.26-4.fc35.x86_64 10/18
+ Installing : glibc-devel-2.34-8.fc35.x86_64 11/18
+ Installing : gc-8.0.4-6.fc35.x86_64 12/18
+ Installing : guile22-2.2.7-3.fc35.x86_64 13/18
+ Installing : make-1:4.3-6.fc35.x86_64 14/18
+ Installing : elfutils-debuginfod-client-0.185-5.fc35.x86_64 15/18
+ Installing : binutils-gold-2.37-10.fc35.x86_64 16/18
+ Installing : binutils-2.37-10.fc35.x86_64 17/18
+ Running scriptlet: binutils-2.37-10.fc35.x86_64 17/18
+ Installing : gcc-11.2.1-1.fc35.x86_64 18/18
+ Running scriptlet: gcc-11.2.1-1.fc35.x86_64 18/18
+ Verifying : binutils-2.37-10.fc35.x86_64 1/18
+ Verifying : binutils-gold-2.37-10.fc35.x86_64 2/18
+ Verifying : cpp-11.2.1-1.fc35.x86_64 3/18
+ Verifying : elfutils-debuginfod-client-0.185-5.fc35.x86_64 4/18
+ Verifying : gc-8.0.4-6.fc35.x86_64 5/18
+ Verifying : gcc-11.2.1-1.fc35.x86_64 6/18
+ Verifying : guile22-2.2.7-3.fc35.x86_64 7/18
+ Verifying : kernel-headers-5.14.9-300.fc35.x86_64 8/18
+ Verifying : libmpc-1.2.1-3.fc35.x86_64 9/18
+ Verifying : libpkgconf-1.8.0-1.fc35.x86_64 10/18
+ Verifying : libtool-ltdl-2.4.6-42.fc35.x86_64 11/18
+ Verifying : libxcrypt-devel-4.4.26-4.fc35.x86_64 12/18
+ Verifying : make-1:4.3-6.fc35.x86_64 13/18
+ Verifying : pkgconf-1.8.0-1.fc35.x86_64 14/18
+ Verifying : pkgconf-m4-1.8.0-1.fc35.noarch 15/18
+ Verifying : pkgconf-pkg-config-1.8.0-1.fc35.x86_64 16/18
+ Verifying : glibc-devel-2.34-8.fc35.x86_64 17/18
+ Verifying : glibc-headers-x86-2.34-8.fc35.noarch 18/18
+
+Installed:
+ binutils-2.37-10.fc35.x86_64
+ binutils-gold-2.37-10.fc35.x86_64
+ cpp-11.2.1-1.fc35.x86_64
+ elfutils-debuginfod-client-0.185-5.fc35.x86_64
+ gc-8.0.4-6.fc35.x86_64
+ gcc-11.2.1-1.fc35.x86_64
+ glibc-devel-2.34-8.fc35.x86_64
+ glibc-headers-x86-2.34-8.fc35.noarch
+ guile22-2.2.7-3.fc35.x86_64
+ kernel-headers-5.14.9-300.fc35.x86_64
+ libmpc-1.2.1-3.fc35.x86_64
+ libpkgconf-1.8.0-1.fc35.x86_64
+ libtool-ltdl-2.4.6-42.fc35.x86_64
+ libxcrypt-devel-4.4.26-4.fc35.x86_64
+ make-1:4.3-6.fc35.x86_64
+ pkgconf-1.8.0-1.fc35.x86_64
+ pkgconf-m4-1.8.0-1.fc35.noarch
+ pkgconf-pkg-config-1.8.0-1.fc35.x86_64
+
+Complete!
+COMMIT myimage
+Getting image source signatures
+Copying blob cd62a89550d0 skipped: already exists
+Copying blob 0f38b540528b done
+Copying config c0458c205e done
+Writing manifest to image destination
+Storing signatures
+--> c0458c205e5
+Successfully tagged localhost/myimage:latest
+c0458c205e533af9be1e5e9e665afb0d491f622a243deac76b4cbd0824bf23f6
+
+sh-5.0$ buildah images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+localhost/myimage latest d3a341d4fd99 22 seconds ago 544 MB
+registry.fedoraproject.org/fedora 35 1b52edb08181 23 hours ago 159 MB
+
+sh-5.0$ ls -l output/
+total 4
+-rw-r--r--. 1 build build 7186 Nov 12 21:22 update-output.txt
+````
diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md
new file mode 100644
index 0000000..08ccf9d
--- /dev/null
+++ b/docs/tutorials/README.md
@@ -0,0 +1,26 @@
+![buildah logo](../../logos/buildah-logo_large.png)
+
+# Buildah Tutorials
+
+## Links to a number of useful tutorials for the Buildah project.
+
+**[Introduction Tutorial](01-intro.md)**
+
+Learn how to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec) using Buildah. This tutorial shows how to [Configure and Setup](01-intro.md#configure-and-install-buildah) Buildah, how to [build containers using a Dockerfile](01-intro.md#using-dockerfiles-with-buildah) and how to [build containers from scratch](01-intro.md#building-a-container-from-scratch).
+
+**[Buildah and Registries Tutorial](02-registries-repositories.md)**
+
+Learn how Buildah can be used to move OCI compliant images in and out of private or public registries.
+
+**[Buildah ONBUILD Tutorial](03-on-build.md)**
+
+Learn how Buildah can use the ONBUILD instruction in either a Dockerfile or via the `buildah config --onbuild` command to configure an image to run those instructions when the container is created. In this manner you can front load setup of the container inside the image and minimalize the steps needed to create one or more containers that share a number of initial settings, but need a few differentiators between each.
+
+**[Include Buildah in your build tool](04-include-in-your-build-tool.md)**
+
+Learn how to include Buildah as a library in your build tool.
+
+**[Rootless OpenShift container](05-openshift-rootless-build.md)**
+
+Learn how to build an image from a rootless OpenShift container.
+
diff --git a/examples/all-the-things.sh b/examples/all-the-things.sh
new file mode 100755
index 0000000..6b2ce55
--- /dev/null
+++ b/examples/all-the-things.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+set -e
+set -x
+read
+export PATH=`pwd`:$PATH
+systemctl restart ocid
+read
+: " Check if we have some images to work with."
+ocic image list
+read
+: " Create a working container, and capture its name "
+read
+echo '[container1=`buildah from ${1:-ubuntu}`]'
+container1=`buildah from ${1:-ubuntu}`
+read
+: " Mount that working container, and capture the mountpoint "
+read
+echo '[mountpoint1=`buildah mount $container1`]'
+mountpoint1=`buildah mount $container1`
+read
+: " Add a file to the container "
+read
+echo '[echo yay > $mountpoint1/file-in-root]'
+echo yay > $mountpoint1/file-in-root
+read
+: " Produce an image from the container "
+read
+buildah commit "$container1" ${2:-first-new-image}
+read
+: " Verify that our new image is there "
+read
+ocic image list
+read
+: " Unmount our working container and delete it "
+read
+buildah umount "$container1"
+buildah rm "$container1"
+read
+: " Now try it with ocid not running! "
+read
+systemctl stop ocid
+read
+: " You know what? Go ahead and use that image we just created, and capture its name "
+read
+echo '[container2=`buildah from ${2:-first-new-image}`]'
+container2=`buildah from ${2:-first-new-image}`
+read
+: " Mount that new working container, and capture the mountpoint "
+read
+echo '[mountpoint2=`buildah mount $container2`]'
+mountpoint2=`buildah mount $container2`
+read
+: " That file we added to the image is there, right? "
+read
+cat $mountpoint2/file-in-root
+read
+: " Add a file to the new container"
+read
+echo '[echo yay > $mountpoint2/another-file-in-root]'
+echo yay > $mountpoint2/another-file-in-root
+read
+: " Produce an image from the new container"
+read
+buildah commit "$container2" ${3:-second-new-image}
+read
+: " Unmount our new working container and delete it "
+read
+buildah umount "$container2"
+buildah rm "$container2"
+read
+: " Verify that our new new image is there"
+read
+systemctl start ocid
+ocic image list
+read
+: " Clean up, because I ran this like fifty times while testing "
+read
+ocic image remove --id=${2:-first-new-image}
+ocic image remove --id=${3:-second-new-image}
diff --git a/examples/copy.sh b/examples/copy.sh
new file mode 100755
index 0000000..84a2493
--- /dev/null
+++ b/examples/copy.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+set -e
+set -x
+: " Build a temporary directory; make sure ocid is running."
+export PATH=`pwd`:$PATH
+d=`mktemp -d`
+trap 'cd /;rm -fr "$d"' EXIT
+cd "$d"
+systemctl restart ocid
+read
+: " Check if we have some images to work with."
+read
+ocic image list
+read
+: " Create a working container, and capture its name "
+read
+echo '[container1=`buildah from ${1:-alpine}`]'
+container1=`buildah from ${1:-alpine}`
+read
+: " Mount that working container, and capture the mountpoint "
+read
+echo '[mountpoint1=`buildah mount $container1`]'
+mountpoint1=`buildah mount $container1`
+read
+: " List random files in the container "
+read
+echo '[find $mountpoint1 -name "random*"]'
+find $mountpoint1 -name "random*"
+read
+: " Ensure the default destination for copying files is / "
+read
+echo '[buildah config $container1 --workingdir /]'
+buildah config $container1 --workingdir /
+read
+: " Add a file to the container "
+read
+echo '[dd if=/dev/urandom of=random1 bs=512 count=1]'
+echo '[buildah copy $container1 random1]'
+dd if=/dev/urandom of=random1 bs=512 count=1
+buildah copy $container1 random1
+read
+: " Change the default destination for copying files "
+read
+echo '[buildah config $container1 --workingdir /tmp]'
+buildah config $container1 --workingdir /tmp
+read
+: " Add another new file to the container "
+read
+echo '[dd if=/dev/urandom of=random2 bs=512 count=1]'
+echo '[buildah copy $container1 random2]'
+dd if=/dev/urandom of=random2 bs=512 count=1
+buildah copy $container1 random2
+read
+: " Copy a subdirectory with some files in it "
+read
+echo '[mkdir -p randomsubdir]'
+echo '[dd if=/dev/urandom of=randomsubdir/random3 bs=512 count=1]'
+echo '[dd if=/dev/urandom of=randomsubdir/random4 bs=512 count=1]'
+echo '[buildah copy $container1 randomsubdir]'
+mkdir -p randomsubdir
+dd if=/dev/urandom of=randomsubdir/random3 bs=512 count=1
+dd if=/dev/urandom of=randomsubdir/random4 bs=512 count=1
+buildah copy $container1 randomsubdir
+read
+: " List some of the container's contents "
+read
+echo '[find $mountpoint1 -name "random*"]'
+find $mountpoint1 -name "random*"
+read
+: " Download a tarball "
+read
+echo '[wget -c https://releases.pagure.org/tmpwatch/tmpwatch-2.9.17.tar.bz2]'
+wget -c https://releases.pagure.org/tmpwatch/tmpwatch-2.9.17.tar.bz2
+read
+: " Copy that tarball to the container "
+read
+echo '[mkdir -p $mountpoint1/tmpwatch]'
+echo '[buildah copy $container1 --dest /tmpwatch tmpwatch-2.9.17.tar.bz2]'
+mkdir -p $mountpoint1/tmpwatch
+buildah copy $container1 --dest /tmpwatch tmpwatch-2.9.17.tar.bz2
+read
+: " Download another tarball to the container "
+read
+echo '[buildah copy $container1 --dest /tmpwatch https://releases.pagure.org/newt/newt-0.52.19.tar.gz]'
+buildah copy $container1 --dest /tmpwatch https://releases.pagure.org/newt/newt-0.52.19.tar.gz
+read
+: " List the contents of the target directory "
+read
+echo '[find $mountpoint1/tmpwatch]'
+find $mountpoint1/tmpwatch
+read
+: " Now 'add' the downloaded tarball to the container "
+read
+echo '[buildah add $container1 --dest /tmpwatch tmpwatch-2.9.17.tar.bz2]'
+buildah add $container1 --dest /tmpwatch tmpwatch-2.9.17.tar.bz2
+read
+: " List the contents of the target directory again "
+read
+echo '[find $mountpoint1/tmpwatch]'
+find $mountpoint1/tmpwatch
+read
+: " Clean up, because I ran this like fifty times while testing "
+read
+echo '[buildah delete $container1]'
+buildah rm $container1
diff --git a/examples/lighttpd.sh b/examples/lighttpd.sh
new file mode 100755
index 0000000..e8c95c8
--- /dev/null
+++ b/examples/lighttpd.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+set -x
+ctr1=$(buildah from "${1:-fedora}")
+
+## Get all updates and install our minimal httpd server
+buildah run "$ctr1" -- dnf update -y
+buildah run "$ctr1" -- dnf install -y lighttpd
+buildah run "$ctr1" -- mkdir /run/lighttpd
+
+## Include some buildtime annotations
+buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
+
+## Run our server and expose the port
+buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
+buildah config --port 80 "$ctr1"
+
+## Commit this container to an image name
+buildah commit "$ctr1" "${2:-$USER/lighttpd}"
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..99bfe50
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,145 @@
+module github.com/containers/buildah
+
+go 1.20
+
+require (
+ github.com/containerd/containerd v1.7.9
+ github.com/containernetworking/cni v1.1.2
+ github.com/containernetworking/plugins v1.3.0
+ github.com/containers/common v0.57.4
+ github.com/containers/image/v5 v5.29.2
+ github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b
+ github.com/containers/ocicrypt v1.1.9
+ github.com/containers/storage v1.51.0
+ github.com/cyphar/filepath-securejoin v0.2.4
+ github.com/docker/distribution v2.8.3+incompatible
+ github.com/docker/docker v24.0.7+incompatible
+ github.com/docker/go-units v0.5.0
+ github.com/fsouza/go-dockerclient v1.10.0
+ github.com/hashicorp/go-multierror v1.1.1
+ github.com/mattn/go-shellwords v1.0.12
+ github.com/moby/buildkit v0.12.5
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/gomega v1.30.0
+ github.com/opencontainers/go-digest v1.0.0
+ github.com/opencontainers/image-spec v1.1.0-rc5
+ github.com/opencontainers/runc v1.1.10
+ github.com/opencontainers/runtime-spec v1.1.0
+ github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
+ github.com/opencontainers/selinux v1.11.0
+ github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722
+ github.com/seccomp/libseccomp-golang v0.10.0
+ github.com/sirupsen/logrus v1.9.3
+ github.com/spf13/cobra v1.8.0
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.8.4
+ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
+ go.etcd.io/bbolt v1.3.8
+ golang.org/x/crypto v0.15.0
+ golang.org/x/sync v0.5.0
+ golang.org/x/sys v0.14.0
+ golang.org/x/term v0.14.0
+ sigs.k8s.io/yaml v1.4.0
+)
+
+require (
+ dario.cat/mergo v1.0.0 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/BurntSushi/toml v1.3.2 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
+ github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
+ github.com/chzyer/readline v1.5.1 // indirect
+ github.com/containerd/cgroups/v3 v3.0.2 // indirect
+ github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
+ github.com/containerd/typeurl/v2 v2.1.1 // indirect
+ github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
+ github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/disiqueira/gotree/v3 v3.0.2 // indirect
+ github.com/distribution/reference v0.5.0 // indirect
+ github.com/docker/docker-credential-helpers v0.8.0 // indirect
+ github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
+ github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.1 // indirect
+ github.com/go-openapi/analysis v0.21.4 // indirect
+ github.com/go-openapi/errors v0.20.4 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/loads v0.21.2 // indirect
+ github.com/go-openapi/runtime v0.26.0 // indirect
+ github.com/go-openapi/spec v0.20.9 // indirect
+ github.com/go-openapi/strfmt v0.21.7 // indirect
+ github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/validate v0.22.1 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-containerregistry v0.16.1 // indirect
+ github.com/google/go-intervals v0.0.2 // indirect
+ github.com/google/uuid v1.4.0 // indirect
+ github.com/gorilla/mux v1.8.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jinzhu/copier v0.4.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/compress v1.17.3 // indirect
+ github.com/klauspost/pgzip v1.2.6 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/manifoldco/promptui v0.9.0 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/mattn/go-sqlite3 v1.14.18 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
+ github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moby/patternmatcher v0.6.0 // indirect
+ github.com/moby/sys/mountinfo v0.7.1 // indirect
+ github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/term v0.5.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
+ github.com/nxadm/tail v1.4.8 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/proglottis/gpgme v0.1.3 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
+ github.com/sigstore/fulcio v1.4.3 // indirect
+ github.com/sigstore/rekor v1.2.2 // indirect
+ github.com/sigstore/sigstore v1.7.5 // indirect
+ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
+ github.com/sylabs/sif/v2 v2.15.0 // indirect
+ github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
+ github.com/ulikunitz/xz v0.5.11 // indirect
+ github.com/vbatts/tar-split v0.11.5 // indirect
+ github.com/vbauerster/mpb/v8 v8.6.2 // indirect
+ github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
+ github.com/vishvananda/netns v0.0.4 // indirect
+ go.mongodb.org/mongo-driver v1.11.3 // indirect
+ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
+ golang.org/x/mod v0.13.0 // indirect
+ golang.org/x/net v0.18.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.14.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
+ google.golang.org/grpc v1.58.3 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
+ gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/klog v1.0.0 // indirect
+ tags.cncf.io/container-device-interface v0.6.2 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..7547d24
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,616 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A=
+github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
+github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs=
+github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
+github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
+github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc=
+github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
+github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
+github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
+github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
+github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
+github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
+github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g=
+github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw=
+github.com/containers/image/v5 v5.29.2 h1:b8U0XYWhaQbKucK73IbmSm8WQyKAhKDbAHQc45XlsOw=
+github.com/containers/image/v5 v5.29.2/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
+github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b h1:8XvNAm+g7ivwPUkyiHvBs7z356JWpK9a0FDaek86+sY=
+github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b/go.mod h1:menB9p4o5HckgcLW6cO0+dl6+axkVmSqKlrNcratsh4=
+github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
+github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
+github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
+github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8=
+github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
+github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
+github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
+github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y=
+github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsouza/go-dockerclient v1.10.0 h1:ppSBsbR60I1DFbV4Ag7LlHlHakHFRNLk9XakATW1yVQ=
+github.com/fsouza/go-dockerclient v1.10.0/go.mod h1:+iNzAW78AzClIBTZ6WFjkaMvOgz68GyCJ236b1opLTs=
+github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
+github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
+github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
+github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
+github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
+github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
+github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
+github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
+github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
+github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
+github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8=
+github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
+github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
+github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
+github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
+github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
+github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
+github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
+github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
+github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
+github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
+github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
+github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
+github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
+github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
+github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
+github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
+github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
+github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
+github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
+github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
+github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc=
+github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs=
+github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
+github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 h1:vhEmg+NeucmSYnT2j9ukkZLrR/ZOFUuUiGhxlBAlW8U=
+github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722/go.mod h1:+rSifDZnwJPSW2uYHl7ePSVxq4DEu1VlhNR1uIz/Lm4=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
+github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
+github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
+github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
+github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
+github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
+github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
+github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
+github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
+github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
+github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
+github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
+github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
+go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
+go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
+go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
+golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
+golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
+golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
+gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg=
+tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE=
diff --git a/hack/Dockerfile b/hack/Dockerfile
new file mode 100644
index 0000000..db11900
--- /dev/null
+++ b/hack/Dockerfile
@@ -0,0 +1,7 @@
+FROM fedora:latest
+RUN dnf -y update; dnf -y clean all
+RUN dnf -y install nginx --setopt install_weak_deps=false; dnf -y clean all
+RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+RUN echo "nginx on Fedora" > /usr/share/nginx/html/index.html
+EXPOSE 80
+CMD [ "/usr/sbin/nginx" ]
diff --git a/hack/apparmor_tag.sh b/hack/apparmor_tag.sh
new file mode 100755
index 0000000..eea722d
--- /dev/null
+++ b/hack/apparmor_tag.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+if pkg-config libapparmor 2>/dev/null; then
+ echo apparmor
+fi
diff --git a/hack/build_speed.sh b/hack/build_speed.sh
new file mode 100755
index 0000000..7e8aebc
--- /dev/null
+++ b/hack/build_speed.sh
@@ -0,0 +1,128 @@
+#! /bin/sh
+
+# The main goal of this script is test and time builds using Buildah or Docker.
+# We hope to use it to help optimize Buildah build performance
+#
+# It takes two options
+# First option tells the type of the container image
+# build to do. Valid options are:
+# Docker - docker build
+# Buildah - buildah bud
+# Both - Do docker build followed by buildah bud
+#
+# Second Option specifies a directory or cleanup
+# The script will 'find' files beginning with Dockerfile, for each Dockerfile
+# it finds it will run a build with the Dockerfile and directory for the
+# context. When it does the builds, it will call time on them to show how
+# long the builds take. The created image name will be a combination of the
+# lowercased Directory name that the Dockerfile was found in plus the lower
+# cased dockerfile name.
+#
+# if the second field is cleanup, the script will remove all images from the
+# specified builder.
+#
+# The script does not check for conflicts on naming.
+#
+# Outputs file:
+#
+#
+# cat /tmp/build_speed.json
+# {
+# "/usr/share/fedora-dockerfiles/redis/Dockerfile": {
+# "docker": {
+# "command": "docker build -f /usr/share/fedora-dockerfiles/redis/Dockerfile -t redis_dockerfile /usr/share/fedora-dockerfiles/redis",
+# "real": "3:28.70"
+# },
+# "buildah": {
+# "command": "buildah bud --layers -f /usr/share/fedora-dockerfiles/redis/Dockerfile -t redis_dockerfile /usr/share/fedora-dockerfiles/redis",
+# "real": "2:55.48"
+# }
+# }
+# }
+#
+# Examples uses
+# ./build_speed.sh Docker ~/MyImages
+# ./build_speed.sh Both /usr/share/fedora-dockerfiles/django/Dockerfile
+
+#totalsfile=$(mktemp /tmp/buildspeedXXX.json)
+totalsfile=/tmp/build_speed.json
+commaDockerfile=""
+
+echo -n '{' > $totalsfile
+Dockerfiles() {
+ find -L $1 -name Dockerfile\*
+}
+
+Buildah() {
+ Name=$1
+ Dockerfile=$2
+ Context=$3
+ echo buildah bud --layers -f ${Dockerfile} -t ${Name} ${Context}
+ Time buildah bud --layers -f ${Dockerfile} -t ${Name} ${Context}
+}
+
+Time() {
+ outfile=$(mktemp /tmp/buildspeedXXX)
+ /usr/bin/time -o $outfile --f "%E" $@
+ echo "{\"engine\": \"$1\", \"command\": \"$@\", \"real\": \"$(cat ${outfile})\"}"
+ echo -n "${comma}\"$1\": {\"command\": \"$@\", \"real\": \"$(cat ${outfile})\"}" >> $totalsfile
+ comma=","
+ rm -f $outfile
+}
+
+Docker() {
+ Name=$1
+ Dockerfile=$2
+ Context=$3
+ echo docker build -f ${Dockerfile} -t ${Name} ${Context}
+ Time docker build -f ${Dockerfile} -t ${Name} ${Context}
+}
+
+Both() {
+ comma=""
+ echo -n "${commaDockerfile}\"$2\": {" >> $totalsfile
+ commaDockerfile=","
+ Docker $1 $2 $3
+ Buildah $1 $2 $3
+ echo -n "}" >> $totalsfile
+}
+
+Docker_cleanup() {
+ docker rmi --force $(docker images -q)
+}
+
+Buildah_cleanup() {
+ buildah rmi --force --all
+}
+
+Both_cleanup() {
+ Docker_cleanup
+ Buildah_cleanup
+}
+
+Cmd=${1?Missing CMD argument}
+Path=${2?Missing PATH argument}
+
+case "$Cmd" in
+ Docker) ;;
+ Buildah) ;;
+ Both) ;;
+ *) echo "Invalid command '$Cmd'; must be Buildah, Docker, or Both"; exit 1;;
+esac
+
+
+if [ "$Path" == "cleanup" ]; then
+ ${Cmd}_cleanup
+ exit 0
+fi
+
+for i in $(Dockerfiles ${Path});do
+ name=$(basename $(dirname $i) | sed -e 's/\(.*\)/\L\1/')
+ name=${name}_$(basename $i | sed -e 's/\(.*\)/\L\1/')
+ echo ${Cmd} ${name} $i $(dirname $i)
+ ${Cmd} ${name} $i $(dirname $i)
+done
+
+echo '}'>>$totalsfile
+echo cat $totalsfile
+cat $totalsfile
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
new file mode 100755
index 0000000..457a90d
--- /dev/null
+++ b/hack/get_ci_vm.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+#
+# For help and usage information, simply execute the script w/o any arguments.
+#
+# This script is intended to be run by Red Hat buildah developers who need
+# to debug problems specifically related to Cirrus-CI automated testing.
+# It requires that you have been granted prior access to create VMs in
+# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
+# with supervision upon request.
+
+set -e
+
+SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
+SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
+REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
+
+# Help detect if we were called by get_ci_vm container
+GET_CI_VM="${GET_CI_VM:-0}"
+in_get_ci_vm() {
+ if ((GET_CI_VM==0)); then
+ echo "Error: $1 is not intended for use in this context"
+ exit 2
+ fi
+}
+
+# get_ci_vm APIv1 container entrypoint calls into this script
+# to obtain required repo. specific configuration options.
+if [[ "$1" == "--config" ]]; then
+ in_get_ci_vm "$1"
+ cat <<EOF
+DESTDIR="/var/tmp/go/src/github.com/containers/buildah"
+UPSTREAM_REPO="https://github.com/containers/buildah.git"
+GCLOUD_PROJECT="buildah"
+GCLOUD_IMGPROJECT="libpod-218412"
+GCLOUD_CFG="buildah"
+GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-c}"
+GCLOUD_CPUS="2"
+GCLOUD_MEMORY="4Gb"
+GCLOUD_DISK="200"
+EOF
+elif [[ "$1" == "--setup" ]]; then
+ in_get_ci_vm "$1"
+ # get_ci_vm container entrypoint calls us with this option on the
+ # Cirrus-CI environment instance, to perform repo.-specific setup.
+ cd $REPO_DIRPATH
+ echo "+ Loading ./contrib/cirrus/lib.sh" > /dev/stderr
+ source ./contrib/cirrus/lib.sh
+ echo "+ Running environment setup" > /dev/stderr
+ ./contrib/cirrus/setup.sh
+else
+ # Create and access VM for specified Cirrus-CI task
+ mkdir -p $HOME/.config/gcloud/ssh
+ podman run -it --rm \
+ --tz=local \
+ -e NAME="$USER" \
+ -e SRCDIR=/src \
+ -e GCLOUD_ZONE="$GCLOUD_ZONE" \
+ -e A_DEBUG="${A_DEBUG:-0}" \
+ -v $REPO_DIRPATH:/src:O \
+ -v $HOME/.config/gcloud:/root/.config/gcloud:z \
+ -v $HOME/.config/gcloud/ssh:/root/.ssh:z \
+ quay.io/libpod/get_ci_vm:latest "$@"
+fi
diff --git a/hack/libsubid_tag.sh b/hack/libsubid_tag.sh
new file mode 100755
index 0000000..31412b3
--- /dev/null
+++ b/hack/libsubid_tag.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+if test $(${GO:-go} env GOOS) != "linux" ; then
+ exit 0
+fi
+tmpdir="$PWD/tmp.$RANDOM"
+mkdir -p "$tmpdir"
+trap 'rm -fr "$tmpdir"' EXIT
+cc -o "$tmpdir"/libsubid_tag -l subid -x c - > /dev/null 2> /dev/null << EOF
+#include <shadow/subid.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+const char *Prog = "test";
+FILE *shadow_logfd = NULL;
+
+int main() {
+ struct subid_range *ranges = NULL;
+#if SUBID_ABI_MAJOR >= 4
+ subid_get_uid_ranges("root", &ranges);
+#else
+ get_subuid_ranges("root", &ranges);
+#endif
+ free(ranges);
+ return 0;
+}
+EOF
+if test $? -eq 0 ; then
+ echo libsubid
+fi
diff --git a/hack/systemd_tag.sh b/hack/systemd_tag.sh
new file mode 100755
index 0000000..5af3228
--- /dev/null
+++ b/hack/systemd_tag.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
+#include <systemd/sd-daemon.h>
+EOF
+if test $? -eq 0 ; then
+ echo systemd
+fi
diff --git a/hack/tree_status.sh b/hack/tree_status.sh
new file mode 100755
index 0000000..11c9d6c
--- /dev/null
+++ b/hack/tree_status.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+set -e
+
+STATUS=$(git status --porcelain)
+if [[ -z $STATUS ]]
+then
+ echo "tree is clean"
+else
+ echo "tree is dirty, please commit all changes and sync the vendor.conf"
+ echo ""
+ echo "$STATUS"
+ git diff
+ exit 1
+fi
diff --git a/hack/xref-helpmsgs-manpages b/hack/xref-helpmsgs-manpages
new file mode 100755
index 0000000..0b1d15a
--- /dev/null
+++ b/hack/xref-helpmsgs-manpages
@@ -0,0 +1,333 @@
+#!/usr/bin/perl
+#
+# xref-helpmsgs-manpages - cross-reference --help options against man pages
+#
+package LibPod::CI::XrefHelpmsgsManpages;
+
+use v5.14;
+use utf8;
+
+use strict;
+use warnings;
+
+(our $ME = $0) =~ s|.*/||;
+our $VERSION = '0.1';
+
+# For debugging, show data structures using DumpTree($var)
+#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0;
+
+# unbuffer output
+$| = 1;
+
+###############################################################################
+# BEGIN user-customizable section
+
+# Path to desired executable
+my $Default_Tool = './bin/buildah';
+my $TOOL = $ENV{BUILDAH} || $Default_Tool;
+
+# Path to all doc files, including .rst and (down one level) markdown
+my $Docs_Path = 'docs';
+
+# Path to markdown source files (of the form <toolname>-*.1.md)
+my $Markdown_Path = "$Docs_Path";
+
+# Global error count
+my $Errs = 0;
+
+# END user-customizable section
+###############################################################################
+
+use FindBin;
+
+###############################################################################
+# BEGIN boilerplate args checking, usage messages
+
+sub usage {
+ print <<"END_USAGE";
+Usage: $ME [OPTIONS]
+
+$ME recursively runs '<tool> --help' against
+all subcommands; and recursively reads <tool>-*.1.md files
+in $Markdown_Path, then cross-references that each --help
+option is listed in the appropriate man page and vice-versa.
+
+$ME invokes '\$BUILDAH' (default: $Default_Tool).
+
+Exit status is zero if no inconsistencies found, one otherwise
+
+OPTIONS:
+
+ -v, --verbose show verbose progress indicators
+ -n, --dry-run make no actual changes
+
+ --help display this message
+ --version display program name and version
+END_USAGE
+
+ exit;
+}
+
+# Command-line options. Note that this operates directly on @ARGV !
+our $debug = 0;
+our $verbose = 0;
+sub handle_opts {
+ use Getopt::Long;
+ GetOptions(
+ 'debug!' => \$debug,
+ 'verbose|v' => \$verbose,
+
+ help => \&usage,
+ version => sub { print "$ME version $VERSION\n"; exit 0 },
+ ) or die "Try `$ME --help' for help\n";
+}
+
+# END boilerplate args checking, usage messages
+###############################################################################
+
+############################## CODE BEGINS HERE ###############################
+
+# The term is "modulino".
+__PACKAGE__->main() unless caller();
+
+# Main code.
+sub main {
+ # Note that we operate directly on @ARGV, not on function parameters.
+ # This is deliberate: it's because Getopt::Long only operates on @ARGV
+ # and there's no clean way to make it use @_.
+ handle_opts(); # will set package globals
+
+ # Fetch command-line arguments. Barf if too many.
+ die "$ME: Too many arguments; try $ME --help\n" if @ARGV;
+
+ my $help = tool_help();
+ my $man = tool_man('buildah');
+
+ xref_by_help($help, $man);
+ xref_by_man($help, $man);
+
+# xref_rst($help, $rst);
+
+ exit !!$Errs;
+}
+
+###############################################################################
+# BEGIN cross-referencing
+
+##################
+# xref_by_help # Find keys in '--help' but not in man
+##################
+sub xref_by_help {
+ my ($help, $man, @subcommand) = @_;
+
+ for my $k (sort keys %$help) {
+ if (exists $man->{$k}) {
+ if (ref $help->{$k}) {
+ xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k);
+ }
+ # Otherwise, non-ref is leaf node such as a --option
+ }
+ else {
+ my $man = $man->{_path} || 'man';
+ warn "$ME: buildah @subcommand --help lists $k, but $k not in $man\n";
+ ++$Errs;
+ }
+ }
+}
+
+#################
+# xref_by_man # Find keys in man pages but not in --help
+#################
+#
+# In an ideal world we could share the functionality in one function; but
+# there are just too many special cases in man pages.
+#
+sub xref_by_man {
+ my ($help, $man, @subcommand) = @_;
+
+ # FIXME: this generates way too much output
+ for my $k (grep { $_ ne '_path' } sort keys %$man) {
+ if (exists $help->{$k}) {
+ if (ref $man->{$k}) {
+ xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k);
+ }
+ }
+ elsif ($k ne '--help' && $k ne '-h') {
+ my $man = $man->{_path} || 'man';
+
+ # Special case: 'buildah run --tty' is an invisible alias for -t
+ next if $k eq '--tty' && $help->{'--terminal'};
+
+ # Special case: '--net' is an undocumented shortcut in from,run
+ next if $k eq '--net' && $help->{'--network'};
+
+ # Special case: global options, re-documented in buildah-bud.md
+# next if "@subcommand" eq "bud" && $k =~ /^--userns-.id-map$/;
+
+ warn "$ME: buildah @subcommand: $k in $man, but not --help\n";
+ ++$Errs;
+ }
+ }
+}
+
+# END cross-referencing
+###############################################################################
+# BEGIN data gathering
+
+###############
+# tool_help # Parse output of '<tool> [subcommand] --help'
+###############
+sub tool_help {
+ my %help;
+ open my $fh, '-|', $TOOL, @_, '--help'
+ or die "$ME: Cannot fork: $!\n";
+ my $section = '';
+ while (my $line = <$fh>) {
+ # Cobra is blessedly consistent in its output:
+ # Usage: ...
+ # Available Commands:
+ # ....
+ # Flags:
+ # ....
+ #
+ # Start by identifying the section we're in...
+ if ($line =~ /^Available\s+(Commands):/) {
+ $section = lc $1;
+ }
+ elsif ($line =~ /^(Flags):/) {
+ $section = lc $1;
+ }
+
+ # ...then track commands and options. For subcommands, recurse.
+ elsif ($section eq 'commands') {
+ if ($line =~ /^\s{1,4}(\S+)\s/) {
+ my $subcommand = $1;
+ print "> buildah @_ $subcommand\n" if $debug;
+ $help{$subcommand} = tool_help(@_, $subcommand)
+ unless $subcommand eq 'help'; # 'help' not in man
+ }
+ }
+ elsif ($section eq 'flags') {
+ next if $line =~ /^\s+-h,\s+--help/; # Ignore --help
+
+ # Handle '--foo' or '-f, --foo'
+ if ($line =~ /^\s{1,10}(--\S+)\s/) {
+ print "> buildah @_ $1\n" if $debug;
+ $help{$1} = 1;
+ }
+ elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) {
+ print "> buildah @_ $1, $2\n" if $debug;
+ $help{$1} = $help{$2} = 1;
+ }
+ }
+ }
+ close $fh
+ or die "$ME: Error running 'buildah @_ --help'\n";
+
+ return \%help;
+}
+
+
+##############
+# tool_man # Parse contents of <tool>-*.1.md
+##############
+sub tool_man {
+ my $command = shift;
+ my $subpath = "$Markdown_Path/$command.1.md";
+ my $manpath = "$FindBin::Bin/../$subpath";
+ print "** $subpath \n" if $debug;
+
+ my %man = (_path => $subpath);
+ open my $fh, '<', $manpath
+ or die "$ME: Cannot read $manpath: $!\n";
+ my $section = '';
+ my @most_recent_flags;
+ my $previous_subcmd = '';
+ my $previous_flag = '';
+ my @line_history; # Circular buffer of recent lines
+ while (my $line = <$fh>) {
+ chomp $line;
+ push @line_history, $line;
+ shift @line_history if @line_history > 2;
+ next unless $line; # skip empty lines
+
+ # .md files designate sections with leading double hash
+ if ($line =~ /^##\s*(GLOBAL\s+)?OPTIONS/) {
+ $section = 'flags';
+ $previous_flag = '';
+ }
+ elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) {
+ $section = 'commands';
+ }
+ elsif ($line =~ /^\#\#[^#]/) {
+ $section = '';
+ }
+
+ # This will be a table containing subcommand names, links to man pages.
+ # The format is slightly different between buildah.1.md and subcommands.
+ elsif ($section eq 'commands') {
+ # In tool.1.md
+ if ($line =~ /^\|\s*buildah-(\S+?)\(\d\)/) {
+ # $1 will be changed by recursion _*BEFORE*_ left-hand assignment
+ my $subcmd = $1;
+ $man{$subcmd} = tool_man("buildah-$1");
+ }
+
+ # In tool-<subcommand>.1.md
+ elsif ($line =~ /^\|\s+(\S+)\s+\|\s+\[\S+\]\((\S+)\.1\.md\)/) {
+ # $1 will be changed by recursion _*BEFORE*_ left-hand assignment
+ my $subcmd = $1;
+ if ($previous_subcmd gt $subcmd) {
+ warn "$ME: $subpath: '$previous_subcmd' and '$subcmd' are out of order\n";
+ ++$Errs;
+ }
+ $previous_subcmd = $subcmd;
+ $man{$subcmd} = tool_man($2);
+ }
+ }
+
+ # Flags should always be of the form '**-f**' or '**--flag**',
+ # possibly separated by comma-space.
+ elsif ($section eq 'flags') {
+ # e.g. 'podman run --ip6', documented in man page, but nonexistent
+ if ($line =~ /^not\s+implemented/i) {
+ delete $man{$_} for @most_recent_flags;
+ }
+
+ # AAAAAAAaaaaargh, workaround for buildah-config --add-history
+ # which enumerates a long list of options. Since buildah man pages
+ # (unlike podman) don't use the '####' convention for options,
+ # it's hard to differentiate 'this is an option' from 'this is
+ # a __mention__ of an option'. Workaround: actual options must
+ # be preceded by an empty line.
+ next if $line_history[-2];
+
+ @most_recent_flags = ();
+ # Handle any variation of '**--foo**, **-f**'
+ my $is_first = 1;
+ while ($line =~ s/^\*\*((--[a-z0-9-]+)|(-.))\*\*(,\s+)?//g) {
+ my $flag = $1;
+ $man{$flag} = 1;
+ if ($flag lt $previous_flag && $is_first) {
+ warn "$ME: $subpath:$.: $flag should precede $previous_flag\n";
+ ++$Errs;
+ }
+ $previous_flag = $flag if $is_first;
+ # Keep track of them, in case we see 'Not implemented' below
+ push @most_recent_flags, $flag;
+
+ # Further iterations of /g are allowed to be out of order,
+ # e.g., it's OK for --namespace,-ns to precede --nohead
+ $is_first = 0;
+ }
+ }
+ }
+ close $fh;
+
+ return \%man;
+}
+
+# END data gathering
+###############################################################################
+
+1;
diff --git a/image.go b/image.go
new file mode 100644
index 0000000..7318e04
--- /dev/null
+++ b/image.go
@@ -0,0 +1,949 @@
+package buildah
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ "github.com/containers/buildah/internal/config"
+ "github.com/containers/buildah/internal/mkcw"
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/image-spec/specs-go"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
+ // suitable for specifying as a value of the PreferredManifestType
+ // member of a CommitOptions structure. It is also the default.
+ OCIv1ImageManifest = define.OCIv1ImageManifest
+ // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
+ // manifest, suitable for specifying as a value of the
+ // PreferredManifestType member of a CommitOptions structure.
+ Dockerv2ImageManifest = define.Dockerv2ImageManifest
+)
+
+// ExtractRootfsOptions is consumed by ExtractRootfs() which allows
+// users to preserve nature of various modes like setuid, setgid and xattrs
+// over the extracted file system objects.
+type ExtractRootfsOptions struct {
+ StripSetuidBit bool // strip the setuid bit off of items being extracted.
+ StripSetgidBit bool // strip the setgid bit off of items being extracted.
+ StripXattrs bool // don't record extended attributes of items being extracted.
+}
+
+type containerImageRef struct {
+ fromImageName string
+ fromImageID string
+ store storage.Store
+ compression archive.Compression
+ name reference.Named
+ names []string
+ containerID string
+ mountLabel string
+ layerID string
+ oconfig []byte
+ dconfig []byte
+ created *time.Time
+ createdBy string
+ historyComment string
+ annotations map[string]string
+ preferredManifestType string
+ squash bool
+ confidentialWorkload ConfidentialWorkloadOptions
+ omitHistory bool
+ emptyLayer bool
+ idMappingOptions *define.IDMappingOptions
+ parent string
+ blobDirectory string
+ preEmptyLayers []v1.History
+ postEmptyLayers []v1.History
+ overrideChanges []string
+ overrideConfig *manifest.Schema2Config
+}
+
+type blobLayerInfo struct {
+ ID string
+ Size int64
+}
+
+type containerImageSource struct {
+ path string
+ ref *containerImageRef
+ store storage.Store
+ containerID string
+ mountLabel string
+ layerID string
+ names []string
+ compression archive.Compression
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+ manifestType string
+ blobDirectory string
+ blobLayers map[digest.Digest]blobLayerInfo
+}
+
+func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
+ src, err := i.NewImageSource(ctx, sc)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(ctx, sc, src)
+}
+
+func expectedOCIDiffIDs(image v1.Image) int {
+ expected := 0
+ for _, history := range image.History {
+ if !history.EmptyLayer {
+ expected = expected + 1
+ }
+ }
+ return expected
+}
+
+func expectedDockerDiffIDs(image docker.V2Image) int {
+ expected := 0
+ for _, history := range image.History {
+ if !history.EmptyLayer {
+ expected = expected + 1
+ }
+ }
+ return expected
+}
+
+// Compute the media types which we need to attach to a layer, given the type of
+// compression that we'll be applying.
+func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
+ omediaType = v1.MediaTypeImageLayer
+ dmediaType = docker.V2S2MediaTypeUncompressedLayer
+ if layerCompression != archive.Uncompressed {
+ switch layerCompression {
+ case archive.Gzip:
+ omediaType = v1.MediaTypeImageLayerGzip
+ dmediaType = manifest.DockerV2Schema2LayerMediaType
+ logrus.Debugf("compressing %s with gzip", what)
+ case archive.Bzip2:
+ // Until the image specs define a media type for bzip2-compressed layers, even if we know
+ // how to decompress them, we can't try to compress layers with bzip2.
+ return "", "", errors.New("media type for bzip2-compressed layers is not defined")
+ case archive.Xz:
+ // Until the image specs define a media type for xz-compressed layers, even if we know
+ // how to decompress them, we can't try to compress layers with xz.
+ return "", "", errors.New("media type for xz-compressed layers is not defined")
+ case archive.Zstd:
+ // Until the image specs define a media type for zstd-compressed layers, even if we know
+ // how to decompress them, we can't try to compress layers with zstd.
+ return "", "", errors.New("media type for zstd-compressed layers is not defined")
+ default:
+ logrus.Debugf("compressing %s with unknown compressor(?)", what)
+ }
+ }
+ return omediaType, dmediaType, nil
+}
+
+// Extract the container's whole filesystem as a filesystem image, wrapped
+// in LUKS-compatible encryption.
+func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
+ var image v1.Image
+ if err := json.Unmarshal(i.oconfig, &image); err != nil {
+ return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
+ }
+ mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
+ if err != nil {
+ return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
+ }
+ archiveOptions := mkcw.ArchiveOptions{
+ AttestationURL: options.AttestationURL,
+ CPUs: options.CPUs,
+ Memory: options.Memory,
+ TempDir: options.TempDir,
+ TeeType: options.TeeType,
+ IgnoreAttestationErrors: options.IgnoreAttestationErrors,
+ WorkloadID: options.WorkloadID,
+ DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
+ Slop: options.Slop,
+ FirmwareLibrary: options.FirmwareLibrary,
+ }
+ rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
+ if err != nil {
+ if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
+ logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
+ }
+ return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err)
+ }
+ return ioutils.NewReadCloserWrapper(rc, func() error {
+ if err = rc.Close(); err != nil {
+ err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
+ }
+ if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
+ if err2 != nil {
+ err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
+ }
+ err = err2
+ } else {
+ logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
+ }
+ return err
+ }), nil
+}
+
+// Extract the container's whole filesystem as if it were a single layer.
+// Takes ExtractRootfsOptions as argument which allows caller to configure
+// preserve nature of setuid,setgid,sticky and extended attributes
+// on extracted rootfs.
+func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
+ var uidMap, gidMap []idtools.IDMap
+ mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
+ if err != nil {
+ return nil, nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
+ }
+ pipeReader, pipeWriter := io.Pipe()
+ errChan := make(chan error, 1)
+ go func() {
+ defer close(errChan)
+ if i.idMappingOptions != nil {
+ uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
+ }
+ copierOptions := copier.GetOptions{
+ UIDMap: uidMap,
+ GIDMap: gidMap,
+ StripSetuidBit: opts.StripSetuidBit,
+ StripSetgidBit: opts.StripSetgidBit,
+ StripXattrs: opts.StripXattrs,
+ }
+ err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
+ errChan <- err
+ pipeWriter.Close()
+
+ }()
+ return ioutils.NewReadCloserWrapper(pipeReader, func() error {
+ if err = pipeReader.Close(); err != nil {
+ err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
+ }
+ if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
+ if err2 != nil {
+ err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
+ }
+ err = err2
+ }
+ return err
+ }), errChan, nil
+}
+
+// Build fresh copies of the container configuration structures so that we can edit them
+// without making unintended changes to the original Builder.
+func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = *i.created
+ }
+
+ // Build an empty image, and then decode over it.
+ oimage := v1.Image{}
+ if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
+ return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
+ }
+ // Always replace this value, since we're newer than our base image.
+ oimage.Created = &created
+ // Clear the list of diffIDs, since we always repopulate it.
+ oimage.RootFS.Type = docker.TypeLayers
+ oimage.RootFS.DiffIDs = []digest.Digest{}
+ // Only clear the history if we're squashing, otherwise leave it be so that we can append
+ // entries to it.
+ if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
+ oimage.History = []v1.History{}
+ }
+
+ // Build an empty image, and then decode over it.
+ dimage := docker.V2Image{}
+ if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
+ return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
+ }
+ dimage.Parent = docker.ID(i.parent)
+ dimage.Container = i.containerID
+ if dimage.Config != nil {
+ dimage.ContainerConfig = *dimage.Config
+ }
+ // Always replace this value, since we're newer than our base image.
+ dimage.Created = created
+ // Clear the list of diffIDs, since we always repopulate it.
+ dimage.RootFS = &docker.V2S2RootFS{}
+ dimage.RootFS.Type = docker.TypeLayers
+ dimage.RootFS.DiffIDs = []digest.Digest{}
+ // Only clear the history if we're squashing, otherwise leave it be so
+ // that we can append entries to it. Clear the parent, too, we no
+ // longer include its layers and history.
+ if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
+ dimage.Parent = ""
+ dimage.History = []docker.V2S2History{}
+ }
+
+ // If we were supplied with a configuration, copy fields from it to
+ // matching fields in both formats.
+ if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
+ return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err)
+ }
+
+ // If we're producing a confidential workload, override the command and
+ // assorted other settings that aren't expected to work correctly.
+ if i.confidentialWorkload.Convert {
+ dimage.Config.Entrypoint = []string{"/entrypoint"}
+ oimage.Config.Entrypoint = []string{"/entrypoint"}
+ dimage.Config.Cmd = nil
+ oimage.Config.Cmd = nil
+ dimage.Config.User = ""
+ oimage.Config.User = ""
+ dimage.Config.WorkingDir = ""
+ oimage.Config.WorkingDir = ""
+ dimage.Config.Healthcheck = nil
+ dimage.Config.Shell = nil
+ dimage.Config.Volumes = nil
+ oimage.Config.Volumes = nil
+ dimage.Config.ExposedPorts = nil
+ oimage.Config.ExposedPorts = nil
+ }
+
+ // Build empty manifests. The Layers lists will be populated later.
+ omanifest := v1.Manifest{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ MediaType: v1.MediaTypeImageManifest,
+ Config: v1.Descriptor{
+ MediaType: v1.MediaTypeImageConfig,
+ },
+ Layers: []v1.Descriptor{},
+ Annotations: i.annotations,
+ }
+
+ dmanifest := docker.V2S2Manifest{
+ V2Versioned: docker.V2Versioned{
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ },
+ Config: docker.V2S2Descriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ },
+ Layers: []docker.V2S2Descriptor{},
+ }
+
+ return oimage, omanifest, dimage, dmanifest, nil
+}
+
+func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
+ // Decide which type of manifest and configuration output we're going to provide.
+ manifestType := i.preferredManifestType
+ // If it's not a format we support, return an error.
+ if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
+ return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
+ manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
+ }
+ // Start building the list of layers using the read-write layer.
+ layers := []string{}
+ layerID := i.layerID
+ layer, err := i.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
+ }
+ // Walk the list of parent layers, prepending each as we go. If we're squashing,
+ // stop at the layer ID of the top layer, which we won't really be using anyway.
+ for layer != nil {
+ layers = append(append([]string{}, layerID), layers...)
+ layerID = layer.Parent
+ if layerID == "" || i.confidentialWorkload.Convert || i.squash {
+ err = nil
+ break
+ }
+ layer, err = i.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
+ }
+ }
+ logrus.Debugf("layer list: %q", layers)
+
+ // Make a temporary directory to hold blobs.
+ path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
+ if err != nil {
+ return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
+ }
+ logrus.Debugf("using %q to hold temporary data", path)
+ defer func() {
+ if src == nil {
+ err2 := os.RemoveAll(path)
+ if err2 != nil {
+ logrus.Errorf("error removing layer blob directory: %v", err)
+ }
+ }
+ }()
+
+ // Build fresh copies of the configurations and manifest so that we don't mess with any
+ // values in the Builder object itself.
+ oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
+ if err != nil {
+ return nil, err
+ }
+
+ // Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
+ blobLayers := make(map[digest.Digest]blobLayerInfo)
+ for _, layerID := range layers {
+ what := fmt.Sprintf("layer %q", layerID)
+ if i.confidentialWorkload.Convert || i.squash {
+ what = fmt.Sprintf("container %q", i.containerID)
+ }
+ // The default layer media type assumes no compression.
+ omediaType := v1.MediaTypeImageLayer
+ dmediaType := docker.V2S2MediaTypeUncompressedLayer
+ // Look up this layer.
+ layer, err := i.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
+ }
+ // If we already know the digest of the contents of parent
+ // layers, reuse their blobsums, diff IDs, and sizes.
+ if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" {
+ layerBlobSum := layer.UncompressedDigest
+ layerBlobSize := layer.UncompressedSize
+ diffID := layer.UncompressedDigest
+ // Note this layer in the manifest, using the appropriate blobsum.
+ olayerDescriptor := v1.Descriptor{
+ MediaType: omediaType,
+ Digest: layerBlobSum,
+ Size: layerBlobSize,
+ }
+ omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
+ dlayerDescriptor := docker.V2S2Descriptor{
+ MediaType: dmediaType,
+ Digest: layerBlobSum,
+ Size: layerBlobSize,
+ }
+ dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
+ // Note this layer in the list of diffIDs, again using the uncompressed digest.
+ oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
+ dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
+ blobLayers[diffID] = blobLayerInfo{
+ ID: layer.ID,
+ Size: layerBlobSize,
+ }
+ continue
+ }
+ // Figure out if we need to change the media type, in case we've changed the compression.
+ omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
+ if err != nil {
+ return nil, err
+ }
+ // Start reading either the layer or the whole container rootfs.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ var rc io.ReadCloser
+ var errChan chan error
+ if i.confidentialWorkload.Convert {
+ // Convert the root filesystem into an encrypted disk image.
+ rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
+ if err != nil {
+ return nil, err
+ }
+ } else if i.squash {
+ // Extract the root filesystem as a single layer.
+ rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{})
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // If we're up to the final layer, but we don't want to
+ // include a diff for it, we're done.
+ if i.emptyLayer && layerID == i.layerID {
+ continue
+ }
+ // Extract this layer, one of possibly many.
+ rc, err = i.store.Diff("", layerID, diffOptions)
+ if err != nil {
+ return nil, fmt.Errorf("extracting %s: %w", what, err)
+ }
+ }
+ srcHasher := digest.Canonical.Digester()
+ // Set up to write the possibly-recompressed blob.
+ layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
+ if err != nil {
+ rc.Close()
+ return nil, fmt.Errorf("opening file for %s: %w", what, err)
+ }
+
+ counter := ioutils.NewWriteCounter(layerFile)
+ var destHasher digest.Digester
+ var multiWriter io.Writer
+ // Avoid rehashing when we do not compress.
+ if i.compression != archive.Uncompressed {
+ destHasher = digest.Canonical.Digester()
+ multiWriter = io.MultiWriter(counter, destHasher.Hash())
+ } else {
+ destHasher = srcHasher
+ multiWriter = counter
+ }
+ // Compress the layer, if we're recompressing it.
+ writeCloser, err := archive.CompressStream(multiWriter, i.compression)
+ if err != nil {
+ layerFile.Close()
+ rc.Close()
+ return nil, fmt.Errorf("compressing %s: %w", what, err)
+ }
+ writer := io.MultiWriter(writeCloser, srcHasher.Hash())
+ // Scrub any local user names that might correspond to UIDs or GIDs of
+ // files in this layer.
+ {
+ nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
+ writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ hdr.Uname, hdr.Gname = "", ""
+ return false, false, nil
+ })
+ writer = io.Writer(writeCloser)
+ }
+ // Use specified timestamps in the layer, if we're doing that for
+ // history entries.
+ if i.created != nil {
+ nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
+ writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ // Changing a zeroed field to a non-zero field
+ // can affect the format that the library uses
+ // for writing the header, so only change
+ // fields that are already set to avoid
+ // changing the format (and as a result,
+ // changing the length) of the header that we
+ // write.
+ if !hdr.ModTime.IsZero() {
+ hdr.ModTime = *i.created
+ }
+ if !hdr.AccessTime.IsZero() {
+ hdr.AccessTime = *i.created
+ }
+ if !hdr.ChangeTime.IsZero() {
+ hdr.ChangeTime = *i.created
+ }
+ return false, false, nil
+ })
+ writer = io.Writer(writeCloser)
+ }
+ size, err := io.Copy(writer, rc)
+ writeCloser.Close()
+ layerFile.Close()
+ rc.Close()
+
+ if errChan != nil {
+ err = <-errChan
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("storing %s to file: %w", what, err)
+ }
+ if i.compression == archive.Uncompressed {
+ if size != counter.Count {
+ return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
+ }
+ } else {
+ size = counter.Count
+ }
+ logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
+ // Rename the layer so that we can more easily find it by digest later.
+ finalBlobName := filepath.Join(path, destHasher.Digest().String())
+ if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
+ return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
+ }
+ // Add a note in the manifest about the layer. The blobs are identified by their possibly-
+ // compressed blob digests.
+ olayerDescriptor := v1.Descriptor{
+ MediaType: omediaType,
+ Digest: destHasher.Digest(),
+ Size: size,
+ }
+ omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
+ dlayerDescriptor := docker.V2S2Descriptor{
+ MediaType: dmediaType,
+ Digest: destHasher.Digest(),
+ Size: size,
+ }
+ dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
+ // Add a note about the diffID, which is always the layer's uncompressed digest.
+ oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
+ dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
+ }
+
+ // Build history notes in the image configurations.
+ appendHistory := func(history []v1.History) {
+ for i := range history {
+ var created *time.Time
+ if history[i].Created != nil {
+ copiedTimestamp := *history[i].Created
+ created = &copiedTimestamp
+ }
+ onews := v1.History{
+ Created: created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ oimage.History = append(oimage.History, onews)
+ if created == nil {
+ created = &time.Time{}
+ }
+ dnews := docker.V2S2History{
+ Created: *created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ dimage.History = append(dimage.History, dnews)
+ }
+ }
+
+ // Calculate base image history for special scenarios
+ // when base layers does not contains any history.
+ // We will ignore sanity checks if baseImage history is null
+ // but still add new history for docker parity.
+ baseImageHistoryLen := len(oimage.History)
+ // Only attempt to append history if history was not disabled explicitly.
+ if !i.omitHistory {
+ appendHistory(i.preEmptyLayers)
+ created := time.Now().UTC()
+ if i.created != nil {
+ created = (*i.created).UTC()
+ }
+ comment := i.historyComment
+ // Add a comment for which base image is being used
+ if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
+ comment += "FROM " + i.fromImageName
+ }
+ onews := v1.History{
+ Created: &created,
+ CreatedBy: i.createdBy,
+ Author: oimage.Author,
+ Comment: comment,
+ EmptyLayer: i.emptyLayer,
+ }
+ oimage.History = append(oimage.History, onews)
+ dnews := docker.V2S2History{
+ Created: created,
+ CreatedBy: i.createdBy,
+ Author: dimage.Author,
+ Comment: comment,
+ EmptyLayer: i.emptyLayer,
+ }
+ dimage.History = append(dimage.History, dnews)
+ appendHistory(i.postEmptyLayers)
+
+ // Sanity check that we didn't just create a mismatch between non-empty layers in the
+ // history and the number of diffIDs. Following sanity check is ignored if build history
+ // is disabled explicitly by the user.
+ // Disable sanity check when baseImageHistory is null for docker parity
+ if baseImageHistoryLen != 0 {
+ expectedDiffIDs := expectedOCIDiffIDs(oimage)
+ if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
+ return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
+ }
+ expectedDiffIDs = expectedDockerDiffIDs(dimage)
+ if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
+ return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
+ }
+ }
+ }
+
+ // Encode the image configuration blob.
+ oconfig, err := json.Marshal(&oimage)
+ if err != nil {
+ return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
+ }
+ logrus.Debugf("OCIv1 config = %s", oconfig)
+
+ // Add the configuration blob to the manifest.
+ omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
+ omanifest.Config.Size = int64(len(oconfig))
+ omanifest.Config.MediaType = v1.MediaTypeImageConfig
+
+ // Encode the manifest.
+ omanifestbytes, err := json.Marshal(&omanifest)
+ if err != nil {
+ return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
+ }
+ logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
+
+ // Encode the image configuration blob.
+ dconfig, err := json.Marshal(&dimage)
+ if err != nil {
+ return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
+ }
+ logrus.Debugf("Docker v2s2 config = %s", dconfig)
+
+ // Add the configuration blob to the manifest.
+ dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
+ dmanifest.Config.Size = int64(len(dconfig))
+ dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
+
+ // Encode the manifest.
+ dmanifestbytes, err := json.Marshal(&dmanifest)
+ if err != nil {
+ return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
+ }
+ logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
+
+ // Decide which manifest and configuration blobs we'll actually output.
+ var config []byte
+ var imageManifest []byte
+ switch manifestType {
+ case v1.MediaTypeImageManifest:
+ imageManifest = omanifestbytes
+ config = oconfig
+ case manifest.DockerV2Schema2MediaType:
+ imageManifest = dmanifestbytes
+ config = dconfig
+ default:
+ panic("unreachable code: unsupported manifest type")
+ }
+ src = &containerImageSource{
+ path: path,
+ ref: i,
+ store: i.store,
+ containerID: i.containerID,
+ mountLabel: i.mountLabel,
+ layerID: i.layerID,
+ names: i.names,
+ compression: i.compression,
+ config: config,
+ configDigest: digest.Canonical.FromBytes(config),
+ manifest: imageManifest,
+ manifestType: manifestType,
+ blobDirectory: i.blobDirectory,
+ blobLayers: blobLayers,
+ }
+ return src, nil
+}
+
+func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
+ return nil, errors.New("can't write to a container")
+}
+
+func (i *containerImageRef) DockerReference() reference.Named {
+ return i.name
+}
+
+func (i *containerImageRef) StringWithinTransport() string {
+ if len(i.names) > 0 {
+ return i.names[0]
+ }
+ return ""
+}
+
+func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error {
+ // we were never here
+ return nil
+}
+
+func (i *containerImageRef) PolicyConfigurationIdentity() string {
+ return ""
+}
+
+func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
+ return nil
+}
+
+func (i *containerImageRef) Transport() types.ImageTransport {
+ return is.Transport
+}
+
+func (i *containerImageSource) Close() error {
+ err := os.RemoveAll(i.path)
+ if err != nil {
+ return fmt.Errorf("removing layer blob directory: %w", err)
+ }
+ return nil
+}
+
+func (i *containerImageSource) Reference() types.ImageReference {
+ return i.ref
+}
+
+func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return nil, nil
+}
+
+func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ return i.manifest, i.manifestType, nil
+}
+
+func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
+
+func (i *containerImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
+ if blob.Digest == i.configDigest {
+ logrus.Debugf("start reading config")
+ reader := bytes.NewReader(i.config)
+ closer := func() error {
+ logrus.Debugf("finished reading config")
+ return nil
+ }
+ return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
+ }
+ var layerReadCloser io.ReadCloser
+ size = -1
+ if blobLayerInfo, ok := i.blobLayers[blob.Digest]; ok {
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ layerReadCloser, err = i.store.Diff("", blobLayerInfo.ID, diffOptions)
+ size = blobLayerInfo.Size
+ } else {
+ for _, blobDir := range []string{i.blobDirectory, i.path} {
+ var layerFile *os.File
+ layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600)
+ if err == nil {
+ st, err := layerFile.Stat()
+ if err != nil {
+ logrus.Warnf("error reading size of layer file %q: %v", blob.Digest.String(), err)
+ } else {
+ size = st.Size()
+ layerReadCloser = layerFile
+ break
+ }
+ layerFile.Close()
+ }
+ if !errors.Is(err, os.ErrNotExist) {
+ logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err)
+ }
+ }
+ }
+ if err != nil || layerReadCloser == nil || size == -1 {
+ logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
+ return nil, -1, fmt.Errorf("opening layer blob: %w", err)
+ }
+ logrus.Debugf("reading layer %q", blob.Digest.String())
+ closer := func() error {
+ logrus.Debugf("finished reading layer %q", blob.Digest.String())
+ if err := layerReadCloser.Close(); err != nil {
+ return fmt.Errorf("closing layer %q after reading: %w", blob.Digest.String(), err)
+ }
+ return nil
+ }
+ return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
+}
+
+func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
+ var name reference.Named
+ container, err := b.store.Container(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("locating container %q: %w", b.ContainerID, err)
+ }
+ if len(container.Names) > 0 {
+ if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
+ name = parsed
+ }
+ }
+ manifestType := options.PreferredManifestType
+ if manifestType == "" {
+ manifestType = define.OCIv1ImageManifest
+ }
+
+ for _, u := range options.UnsetEnvs {
+ b.UnsetEnv(u)
+ }
+ oconfig, err := json.Marshal(&b.OCIv1)
+ if err != nil {
+ return nil, fmt.Errorf("encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
+ }
+ dconfig, err := json.Marshal(&b.Docker)
+ if err != nil {
+ return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
+ }
+ var created *time.Time
+ if options.HistoryTimestamp != nil {
+ historyTimestampUTC := options.HistoryTimestamp.UTC()
+ created = &historyTimestampUTC
+ }
+ createdBy := b.CreatedBy()
+ if createdBy == "" {
+ createdBy = strings.Join(b.Shell(), " ")
+ if createdBy == "" {
+ createdBy = "/bin/sh"
+ }
+ }
+
+ parent := ""
+ if b.FromImageID != "" {
+ parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
+ if parentDigest.Validate() == nil {
+ parent = parentDigest.String()
+ }
+ }
+
+ ref := &containerImageRef{
+ fromImageName: b.FromImage,
+ fromImageID: b.FromImageID,
+ store: b.store,
+ compression: options.Compression,
+ name: name,
+ names: container.Names,
+ containerID: container.ID,
+ mountLabel: b.MountLabel,
+ layerID: container.LayerID,
+ oconfig: oconfig,
+ dconfig: dconfig,
+ created: created,
+ createdBy: createdBy,
+ historyComment: b.HistoryComment(),
+ annotations: b.Annotations(),
+ preferredManifestType: manifestType,
+ squash: options.Squash,
+ confidentialWorkload: options.ConfidentialWorkloadOptions,
+ omitHistory: options.OmitHistory,
+ emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
+ idMappingOptions: &b.IDMappingOptions,
+ parent: parent,
+ blobDirectory: options.BlobDirectory,
+ preEmptyLayers: b.PrependedEmptyLayers,
+ postEmptyLayers: b.AppendedEmptyLayers,
+ overrideChanges: options.OverrideChanges,
+ overrideConfig: options.OverrideConfig,
+ }
+ return ref, nil
+}
+
+// Extract the container's whole filesystem as if it were a single layer from current builder instance
+func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
+ src, err := b.makeContainerImageRef(options)
+ if err != nil {
+ return nil, nil, fmt.Errorf("creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
+ }
+ return src.extractRootfs(opts)
+}
diff --git a/imagebuildah/build.go b/imagebuildah/build.go
new file mode 100644
index 0000000..03081fd
--- /dev/null
+++ b/imagebuildah/build.go
@@ -0,0 +1,699 @@
+package imagebuildah
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/shortnames"
+ istorage "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/hashicorp/go-multierror"
+ "github.com/mattn/go-shellwords"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ PullIfMissing = define.PullIfMissing
+ PullAlways = define.PullAlways
+ PullIfNewer = define.PullIfNewer
+ PullNever = define.PullNever
+
+ Gzip = archive.Gzip
+ Bzip2 = archive.Bzip2
+ Xz = archive.Xz
+ Zstd = archive.Zstd
+ Uncompressed = archive.Uncompressed
+)
+
+// Mount is a mountpoint for the build container.
+type Mount = specs.Mount
+
+type BuildOptions = define.BuildOptions
+
+// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
+// URLs), creates one or more new Executors, and then runs
+// Prepare/Execute/Commit/Delete over the entire set of instructions.
+// If the Manifest option is set, returns the ID of the manifest list, else it
+// returns the ID of the built image, and if a name was assigned to it, a
+// canonical reference for that image.
+func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) {
+ if options.CommonBuildOpts == nil {
+ options.CommonBuildOpts = &define.CommonBuildOptions{}
+ }
+ if err := parse.Volumes(options.CommonBuildOpts.Volumes); err != nil {
+ return "", nil, fmt.Errorf("validating volumes: %w", err)
+ }
+ if len(paths) == 0 {
+ return "", nil, errors.New("building: no dockerfiles specified")
+ }
+ if len(options.Platforms) > 1 && options.IIDFile != "" {
+ return "", nil, fmt.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
+ }
+
+ logger := logrus.New()
+ if options.Err != nil {
+ logger.SetOutput(options.Err)
+ } else {
+ logger.SetOutput(os.Stderr)
+ }
+ logger.SetLevel(logrus.GetLevel())
+
+ var dockerfiles []io.ReadCloser
+ defer func(dockerfiles ...io.ReadCloser) {
+ for _, d := range dockerfiles {
+ d.Close()
+ }
+ }(dockerfiles...)
+
+ for _, tag := range append([]string{options.Output}, options.AdditionalTags...) {
+ if tag == "" {
+ continue
+ }
+ if _, err := util.VerifyTagName(tag); err != nil {
+ return "", nil, fmt.Errorf("tag %s: %w", tag, err)
+ }
+ }
+
+ for _, dfile := range paths {
+ var data io.ReadCloser
+
+ if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
+ logger.Debugf("reading remote Dockerfile %q", dfile)
+ resp, err := http.Get(dfile)
+ if err != nil {
+ return "", nil, err
+ }
+ if resp.ContentLength == 0 {
+ resp.Body.Close()
+ return "", nil, fmt.Errorf("no contents in %q", dfile)
+ }
+ data = resp.Body
+ } else {
+ dinfo, err := os.Stat(dfile)
+ if err != nil {
+ // If the Dockerfile isn't available, try again with
+ // context directory prepended (if not prepended yet).
+ if !strings.HasPrefix(dfile, options.ContextDirectory) {
+ dfile = filepath.Join(options.ContextDirectory, dfile)
+ dinfo, err = os.Stat(dfile)
+ }
+ }
+ if err != nil {
+ return "", nil, err
+ }
+
+ var contents *os.File
+ // If given a directory error out since `-f` does not supports path to directory
+ if dinfo.Mode().IsDir() {
+ return "", nil, fmt.Errorf("containerfile: %q cannot be path to a directory", dfile)
+ }
+ contents, err = os.Open(dfile)
+ if err != nil {
+ return "", nil, err
+ }
+ dinfo, err = contents.Stat()
+ if err != nil {
+ contents.Close()
+ return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
+ }
+ if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
+ contents.Close()
+ return "", nil, fmt.Errorf("no contents in %q", dfile)
+ }
+ data = contents
+ }
+
+ // pre-process Dockerfiles with ".in" suffix
+ if strings.HasSuffix(dfile, ".in") {
+ pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory, options.CPPFlags)
+ if err != nil {
+ return "", nil, err
+ }
+ data = io.NopCloser(pData)
+ }
+
+ dockerfiles = append(dockerfiles, data)
+ }
+
+ var files [][]byte
+ for _, dockerfile := range dockerfiles {
+ var b bytes.Buffer
+ if _, err := b.ReadFrom(dockerfile); err != nil {
+ return "", nil, err
+ }
+ files = append(files, b.Bytes())
+ }
+
+ if options.JobSemaphore == nil {
+ if options.Jobs != nil {
+ if *options.Jobs < 0 {
+ return "", nil, errors.New("building: invalid value for jobs. It must be a positive integer")
+ }
+ if *options.Jobs > 0 {
+ options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs))
+ }
+ } else {
+ options.JobSemaphore = semaphore.NewWeighted(1)
+ }
+ }
+
+ manifestList := options.Manifest
+ options.Manifest = ""
+ type instance struct {
+ v1.Platform
+ ID string
+ Ref reference.Canonical
+ }
+ var instances []instance
+ var instancesLock sync.Mutex
+
+ var builds multierror.Group
+ if options.SystemContext == nil {
+ options.SystemContext = &types.SystemContext{}
+ }
+
+ if len(options.Platforms) == 0 {
+ options.Platforms = append(options.Platforms, struct{ OS, Arch, Variant string }{
+ OS: options.SystemContext.OSChoice,
+ Arch: options.SystemContext.ArchitectureChoice,
+ })
+ }
+
+ if options.AllPlatforms {
+ if options.AdditionalBuildContexts == nil {
+ options.AdditionalBuildContexts = make(map[string]*define.AdditionalBuildContext)
+ }
+ options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.AdditionalBuildContexts, options.SystemContext)
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ systemContext := options.SystemContext
+ for _, platform := range options.Platforms {
+ platformContext := *systemContext
+ platformSpec := internalUtil.NormalizePlatform(v1.Platform{
+ OS: platform.OS,
+ Architecture: platform.Arch,
+ Variant: platform.Variant,
+ })
+ // internalUtil.NormalizePlatform converts an empty os value to GOOS
+ // so we have to check the original value here to not overwrite the default for no reason
+ if platform.OS != "" {
+ platformContext.OSChoice = platformSpec.OS
+ }
+ if platform.Arch != "" {
+ platformContext.ArchitectureChoice = platformSpec.Architecture
+ platformContext.VariantChoice = platformSpec.Variant
+ }
+ platformOptions := options
+ platformOptions.SystemContext = &platformContext
+ platformOptions.OS = platformContext.OSChoice
+ platformOptions.Architecture = platformContext.ArchitectureChoice
+ logPrefix := ""
+ if len(options.Platforms) > 1 {
+ logPrefix = "[" + platforms.Format(platformSpec) + "] "
+ }
+ // Deep copy args to prevent concurrent read/writes over Args.
+ argsCopy := make(map[string]string)
+ for key, value := range options.Args {
+ argsCopy[key] = value
+ }
+ platformOptions.Args = argsCopy
+ builds.Go(func() error {
+ loggerPerPlatform := logger
+ if platformOptions.LogFile != "" && platformOptions.LogSplitByPlatform {
+ logFile := platformOptions.LogFile + "_" + platformOptions.OS + "_" + platformOptions.Architecture
+ f, err := os.OpenFile(logFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600)
+ if err != nil {
+ return fmt.Errorf("opening logfile: %q: %w", logFile, err)
+ }
+ defer f.Close()
+ loggerPerPlatform = logrus.New()
+ loggerPerPlatform.SetOutput(f)
+ loggerPerPlatform.SetLevel(logrus.GetLevel())
+ stdout := f
+ stderr := f
+ reporter := f
+ platformOptions.Out = stdout
+ platformOptions.ReportWriter = reporter
+ platformOptions.Err = stderr
+ }
+ thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files)
+ if err != nil {
+ if errorContext := strings.TrimSpace(logPrefix); errorContext != "" {
+ return fmt.Errorf("%s: %w", errorContext, err)
+ }
+ return err
+ }
+ instancesLock.Lock()
+ instances = append(instances, instance{
+ ID: thisID,
+ Ref: thisRef,
+ Platform: platformSpec,
+ })
+ instancesLock.Unlock()
+ return nil
+ })
+ }
+
+ if merr := builds.Wait(); merr != nil {
+ if merr.Len() == 1 {
+ return "", nil, merr.Errors[0]
+ }
+ return "", nil, merr.ErrorOrNil()
+ }
+
+ // Reasons for this id, ref assignment w.r.t to use-case:
+ //
+ // * Single-platform build: On single platform build we only
+ // have one built instance i.e on indice 0 of built instances,
+ // so assign that.
+ //
+ // * Multi-platform build with manifestList: If this is a build for
+ // multiple platforms ( more than one platform ) and --manifest
+ // option then this assignment is insignificant since it will be
+ // overridden anyways with the id and ref of manifest list later in
+ // in this code.
+ //
+ // * Multi-platform build without manifest list: If this is a build for
+ // multiple platforms without --manifest then we are free to return
+ // id and ref of any one of the image in the instance list so always
+ // return indice 0 for predictable output instead returning the id and
+ // ref of the go routine which completed at last.
+ id, ref = instances[0].ID, instances[0].Ref
+
+ if manifestList != "" {
+ rt, err := libimage.RuntimeFromStore(store, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ // Create the manifest list ourselves, so that it's not in a
+ // partially-populated state at any point if we're creating it
+ // fresh.
+ list, err := rt.LookupManifestList(manifestList)
+ if err != nil && errors.Is(err, storage.ErrImageUnknown) {
+ list, err = rt.CreateManifestList(manifestList)
+ }
+ if err != nil {
+ return "", nil, err
+ }
+ // Add each instance to the list in turn.
+ storeTransportName := istorage.Transport.Name()
+ for _, instance := range instances {
+ instanceDigest, err := list.Add(ctx, storeTransportName+":"+instance.ID, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ err = list.AnnotateInstance(instanceDigest, &libimage.ManifestListAnnotateOptions{
+ Architecture: instance.Architecture,
+ OS: instance.OS,
+ Variant: instance.Variant,
+ })
+ if err != nil {
+ return "", nil, err
+ }
+ }
+ id, ref = list.ID(), nil
+ // Put together a canonical reference
+ storeRef, err := istorage.Transport.NewStoreReference(store, nil, list.ID())
+ if err != nil {
+ return "", nil, err
+ }
+ imgSource, err := storeRef.NewImageSource(ctx, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ defer imgSource.Close()
+ manifestBytes, _, err := imgSource.GetManifest(ctx, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return "", nil, err
+ }
+ img, err := store.Image(id)
+ if err != nil {
+ return "", nil, err
+ }
+ for _, name := range img.Names {
+ if named, err := reference.ParseNamed(name); err == nil {
+ if r, err := reference.WithDigest(reference.TrimNamed(named), manifestDigest); err == nil {
+ ref = r
+ break
+ }
+ }
+ }
+ }
+
+ return id, ref, nil
+}
+
+func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, containerFiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) {
+ mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
+ if err != nil {
+ return "", nil, fmt.Errorf("parsing main Dockerfile: %s: %w", containerFiles[0], err)
+ }
+
+ // --platform was explicitly selected for this build
+ // so set correct TARGETPLATFORM in args if it is not
+ // already selected by the user.
+ if options.SystemContext.OSChoice != "" && options.SystemContext.ArchitectureChoice != "" {
+ // os component from --platform string populates TARGETOS
+ // buildkit parity: give priority to user's `--build-arg`
+ if _, ok := options.Args["TARGETOS"]; !ok {
+ options.Args["TARGETOS"] = options.SystemContext.OSChoice
+ }
+ // arch component from --platform string populates TARGETARCH
+ // buildkit parity: give priority to user's `--build-arg`
+ if _, ok := options.Args["TARGETARCH"]; !ok {
+ options.Args["TARGETARCH"] = options.SystemContext.ArchitectureChoice
+ }
+ // variant component from --platform string populates TARGETVARIANT
+ // buildkit parity: give priority to user's `--build-arg`
+ if _, ok := options.Args["TARGETVARIANT"]; !ok {
+ if options.SystemContext.VariantChoice != "" {
+ options.Args["TARGETVARIANT"] = options.SystemContext.VariantChoice
+ }
+ }
+ // buildkit parity: give priority to user's `--build-arg`
+ if _, ok := options.Args["TARGETPLATFORM"]; !ok {
+ // buildkit parity: TARGETPLATFORM should be always created
+ // from SystemContext and not `TARGETOS` and `TARGETARCH` because
+ // users can always override values of `TARGETOS` and `TARGETARCH`
+ // but `TARGETPLATFORM` should be set independent of those values.
+ options.Args["TARGETPLATFORM"] = options.SystemContext.OSChoice + "/" + options.SystemContext.ArchitectureChoice
+ if options.SystemContext.VariantChoice != "" {
+ options.Args["TARGETPLATFORM"] = options.Args["TARGETPLATFORM"] + "/" + options.SystemContext.VariantChoice
+ }
+ }
+ }
+
+ for i, d := range dockerfilecontents[1:] {
+ additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
+ if err != nil {
+ containerFiles := containerFiles[1:]
+ return "", nil, fmt.Errorf("parsing additional Dockerfile %s: %w", containerFiles[i], err)
+ }
+ mainNode.Children = append(mainNode.Children, additionalNode.Children...)
+ }
+
+ exec, err := newExecutor(logger, logPrefix, store, options, mainNode, containerFiles)
+ if err != nil {
+ return "", nil, fmt.Errorf("creating build executor: %w", err)
+ }
+ b := imagebuilder.NewBuilder(options.Args)
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+ b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...)
+ stages, err := imagebuilder.NewStages(mainNode, b)
+ if err != nil {
+ return "", nil, fmt.Errorf("reading multiple stages: %w", err)
+ }
+ if options.Target != "" {
+ stagesTargeted, ok := stages.ThroughTarget(options.Target)
+ if !ok {
+ return "", nil, fmt.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
+ }
+ stages = stagesTargeted
+ }
+ return exec.Build(ctx, stages)
+}
+
+// preprocessContainerfileContents runs CPP(1) in preprocess-only mode on the input
+// dockerfile content and will use ctxDir as the base include path.
+func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string, cppFlags []string) (stdout io.Reader, err error) {
+ cppCommand := "cpp"
+ cppPath, err := exec.LookPath(cppCommand)
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ err = fmt.Errorf("%v: .in support requires %s to be installed", err, cppCommand)
+ }
+ return nil, err
+ }
+
+ stdoutBuffer := bytes.Buffer{}
+ stderrBuffer := bytes.Buffer{}
+
+ cppArgs := []string{"-E", "-iquote", ctxDir, "-traditional", "-undef", "-"}
+ if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok {
+ args, err := shellwords.Parse(flags)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
+ }
+ cppArgs = append(cppArgs, args...)
+ }
+ cppArgs = append(cppArgs, cppFlags...)
+ cmd := exec.Command(cppPath, cppArgs...)
+ cmd.Stdin = r
+ cmd.Stdout = &stdoutBuffer
+ cmd.Stderr = &stderrBuffer
+
+ if err = cmd.Start(); err != nil {
+ return nil, fmt.Errorf("preprocessing %s: %w", containerfile, err)
+ }
+ if err = cmd.Wait(); err != nil {
+ if stderrBuffer.Len() != 0 {
+ logger.Warnf("Ignoring %s\n", stderrBuffer.String())
+ }
+ if stdoutBuffer.Len() == 0 {
+ return nil, fmt.Errorf("preprocessing %s: preprocessor produced no output: %w", containerfile, err)
+ }
+ }
+ return &stdoutBuffer, nil
+}
+
+// platformsForBaseImages resolves the names of base images from the
+// dockerfiles, and if they are all valid references to manifest lists, returns
+// the list of platforms that are supported by all of the base images.
+func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
+ baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args, additionalBuildContext)
+ if err != nil {
+ return nil, fmt.Errorf("determining list of base images: %w", err)
+ }
+ logrus.Debugf("unresolved base images: %v", baseImages)
+ if len(baseImages) == 0 {
+ return nil, fmt.Errorf("build uses no non-scratch base images: %w", err)
+ }
+ targetPlatforms := make(map[string]struct{})
+ var platformList []struct{ OS, Arch, Variant string }
+ for baseImageIndex, baseImage := range baseImages {
+ resolved, err := shortnames.Resolve(systemContext, baseImage)
+ if err != nil {
+ return nil, fmt.Errorf("resolving image name %q: %w", baseImage, err)
+ }
+ var manifestBytes []byte
+ var manifestType string
+ for _, candidate := range resolved.PullCandidates {
+ ref, err := docker.NewReference(candidate.Value)
+ if err != nil {
+ logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err)
+ continue
+ }
+ src, err := ref.NewImageSource(ctx, systemContext)
+ if err != nil {
+ logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
+ continue
+ }
+ candidateBytes, candidateType, err := src.GetManifest(ctx, nil)
+ _ = src.Close()
+ if err != nil {
+ logrus.Debugf("reading image manifest for %q: %v", baseImage, err)
+ continue
+ }
+ if !manifest.MIMETypeIsMultiImage(candidateType) {
+ logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err)
+ continue
+ }
+ if err := candidate.Record(); err != nil {
+ logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err)
+ continue
+ }
+ baseImage = candidate.Value.String()
+ manifestBytes, manifestType = candidateBytes, candidateType
+ break
+ }
+ if len(manifestBytes) == 0 {
+ if len(resolved.PullCandidates) > 0 {
+ return nil, fmt.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
+ }
+ return nil, fmt.Errorf("base image name %q didn't resolve to anything", baseImage)
+ }
+ if manifestType != v1.MediaTypeImageIndex {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest list from base image %q: %w", baseImage, err)
+ }
+ list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex)
+ if err != nil {
+ return nil, fmt.Errorf("converting manifest list from base image %q to v2s2 list: %w", baseImage, err)
+ }
+ manifestBytes, err = list.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("encoding converted v2s2 manifest list for base image %q: %w", baseImage, err)
+ }
+ }
+ index, err := manifest.OCI1IndexFromManifest(manifestBytes)
+ if err != nil {
+ return nil, fmt.Errorf("decoding manifest list for base image %q: %w", baseImage, err)
+ }
+ if baseImageIndex == 0 {
+ // populate the list with the first image's normalized platforms
+ for _, instance := range index.Manifests {
+ if instance.Platform == nil {
+ continue
+ }
+ platform := internalUtil.NormalizePlatform(*instance.Platform)
+ targetPlatforms[platforms.Format(platform)] = struct{}{}
+ logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform))
+ }
+ } else {
+ // prune the list of any normalized platforms this base image doesn't support
+ imagePlatforms := make(map[string]struct{})
+ for _, instance := range index.Manifests {
+ if instance.Platform == nil {
+ continue
+ }
+ platform := internalUtil.NormalizePlatform(*instance.Platform)
+ imagePlatforms[platforms.Format(platform)] = struct{}{}
+ logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform))
+ }
+ var removed []string
+ for platform := range targetPlatforms {
+ if _, present := imagePlatforms[platform]; !present {
+ removed = append(removed, platform)
+ logger.Debugf("image %q does not support %q", baseImage, platform)
+ }
+ }
+ for _, remove := range removed {
+ delete(targetPlatforms, remove)
+ }
+ }
+ if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 {
+ // extract the list
+ for platform := range targetPlatforms {
+ platform, err := platforms.Parse(platform)
+ if err != nil {
+ return nil, fmt.Errorf("parsing platform double/triple %q: %w", platform, err)
+ }
+ platformList = append(platformList, struct{ OS, Arch, Variant string }{
+ OS: platform.OS,
+ Arch: platform.Architecture,
+ Variant: platform.Variant,
+ })
+ logger.Debugf("base images all support %q", platform)
+ }
+ }
+ }
+ if len(platformList) == 0 {
+ return nil, errors.New("base images have no platforms in common")
+ }
+ return platformList, nil
+}
+
+// baseImages parses the dockerfilecontents, possibly replacing the first
+// stage's base image with FROM, and returns the list of base images as
+// provided. Each entry in the dockerfilenames slice corresponds to a slice in
+// dockerfilecontents.
+func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) {
+ mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
+ if err != nil {
+ return nil, fmt.Errorf("parsing main Dockerfile: %s: %w", dockerfilenames[0], err)
+ }
+
+ for i, d := range dockerfilecontents[1:] {
+ additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
+ if err != nil {
+ dockerfilenames := dockerfilenames[1:]
+ return nil, fmt.Errorf("parsing additional Dockerfile %s: %w", dockerfilenames[i], err)
+ }
+ mainNode.Children = append(mainNode.Children, additionalNode.Children...)
+ }
+
+ b := imagebuilder.NewBuilder(args)
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+ b.Env = defaultContainerConfig.GetDefaultEnv()
+ stages, err := imagebuilder.NewStages(mainNode, b)
+ if err != nil {
+ return nil, fmt.Errorf("reading multiple stages: %w", err)
+ }
+ var baseImages []string
+ nicknames := make(map[string]bool)
+ for stageIndex, stage := range stages {
+ node := stage.Node // first line
+ for node != nil { // each line
+ for _, child := range node.Children { // tokens on this line, though we only care about the first
+ switch strings.ToUpper(child.Value) { // first token - instruction
+ case "FROM":
+ if child.Next != nil { // second token on this line
+ // If we have a fromOverride, replace the value of
+ // image name for the first FROM in the Containerfile.
+ if from != "" {
+ child.Next.Value = from
+ from = ""
+ }
+ if replaceBuildContext, ok := additionalBuildContext[child.Next.Value]; ok {
+ if replaceBuildContext.IsImage {
+ child.Next.Value = replaceBuildContext.Value
+ } else {
+ return nil, fmt.Errorf("build context %q is not an image, can not be used for FROM %q", child.Next.Value, child.Next.Value)
+ }
+ }
+ base := child.Next.Value
+ if base != "" && base != buildah.BaseImageFakeName && !nicknames[base] {
+ headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
+ userArgs := argsMapToSlice(stage.Builder.Args)
+ // append heading args so if --build-arg key=value is not
+ // specified but default value is set in Containerfile
+ // via `ARG key=value` so default value can be used.
+ userArgs = append(headingArgs, userArgs...)
+ baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
+ if err != nil {
+ return nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
+ }
+ baseImages = append(baseImages, baseWithArg)
+ }
+ }
+ }
+ }
+ node = node.Next // next line
+ }
+ if stage.Name != strconv.Itoa(stageIndex) {
+ nicknames[stage.Name] = true
+ }
+ }
+ return baseImages, nil
+}
diff --git a/imagebuildah/executor.go b/imagebuildah/executor.go
new file mode 100644
index 0000000..917c84f
--- /dev/null
+++ b/imagebuildah/executor.go
@@ -0,0 +1,1082 @@
+package imagebuildah
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libimage"
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ storageTransport "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+)
+
+// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we
+// complain if we're given values for arguments which have no corresponding ARG
+// instruction in the Dockerfile, since that's usually an indication of a user
+// error, but for these values we make exceptions and ignore them.
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
+ "TARGETARCH": true,
+ "TARGETOS": true,
+ "TARGETPLATFORM": true,
+ "TARGETVARIANT": true,
+}
+
+// Executor is a buildah-based implementation of the imagebuilder.Executor
+// interface. It coordinates the entire build by using one or more
+// StageExecutors to handle each stage of the build.
+type Executor struct {
+ cacheFrom []reference.Named
+ cacheTo []reference.Named
+ cacheTTL time.Duration
+ containerSuffix string
+ logger *logrus.Logger
+ stages map[string]*StageExecutor
+ store storage.Store
+ contextDir string
+ pullPolicy define.PullPolicy
+ registry string
+ ignoreUnrecognizedInstructions bool
+ quiet bool
+ runtime string
+ runtimeArgs []string
+ transientMounts []Mount
+ compression archive.Compression
+ output string
+ outputFormat string
+ additionalTags []string
+ log func(format string, args ...interface{}) // can be nil
+ in io.Reader
+ out io.Writer
+ err io.Writer
+ signaturePolicyPath string
+ skipUnusedStages types.OptionalBool
+ systemContext *types.SystemContext
+ reportWriter io.Writer
+ isolation define.Isolation
+ namespaceOptions []define.NamespaceOption
+ configureNetwork define.NetworkConfigurationPolicy
+ cniPluginPath string
+ cniConfigDir string
+ // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
+ networkInterface nettypes.ContainerNetwork
+ idmappingOptions *define.IDMappingOptions
+ commonBuildOptions *define.CommonBuildOptions
+ defaultMountsFilePath string
+ iidfile string
+ squash bool
+ labels []string
+ layerLabels []string
+ annotations []string
+ layers bool
+ noHostname bool
+ noHosts bool
+ useCache bool
+ removeIntermediateCtrs bool
+ forceRmIntermediateCtrs bool
+ imageMap map[string]string // Used to map images that we create to handle the AS construct.
+ containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
+ baseMap map[string]bool // Holds the names of every base image, as given.
+ rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
+ blobDirectory string
+ excludes []string
+ groupAdd []string
+ ignoreFile string
+ args map[string]string
+ globalArgs map[string]string
+ unusedArgs map[string]struct{}
+ capabilities []string
+ devices define.ContainerDevices
+ signBy string
+ architecture string
+ timestamp *time.Time
+ os string
+ maxPullPushRetries int
+ retryPullPushDelay time.Duration
+ ociDecryptConfig *encconfig.DecryptConfig
+ lastError error
+ terminatedStage map[string]error
+ stagesLock sync.Mutex
+ stagesSemaphore *semaphore.Weighted
+ logRusage bool
+ rusageLogFile io.Writer
+ imageInfoLock sync.Mutex
+ imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
+ fromOverride string
+ additionalBuildContexts map[string]*define.AdditionalBuildContext
+ manifest string
+ secrets map[string]define.Secret
+ sshsources map[string]*sshagent.Source
+ logPrefix string
+ unsetEnvs []string
+ unsetLabels []string
+ processLabel string // Shares processLabel of first stage container with containers of other stages in same build
+ mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
+ buildOutput string // Specifies instructions for any custom build output
+ osVersion string
+ osFeatures []string
+ envs []string
+ confidentialWorkload define.ConfidentialWorkloadOptions
+}
+
+type imageTypeAndHistoryAndDiffIDs struct {
+ manifestType string
+ history []v1.History
+ diffIDs []digest.Digest
+ err error
+}
+
+// newExecutor creates a new instance of the imagebuilder.Executor interface.
+func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node, containerFiles []string) (*Executor, error) {
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ excludes := options.Excludes
+ if len(excludes) == 0 {
+ excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile, containerFiles)
+ if err != nil {
+ return nil, err
+ }
+ }
+ capabilities, err := defaultContainerConfig.Capabilities("", options.AddCapabilities, options.DropCapabilities)
+ if err != nil {
+ return nil, err
+ }
+
+ devices := define.ContainerDevices{}
+ for _, device := range append(defaultContainerConfig.Containers.Devices.Get(), options.Devices...) {
+ dev, err := parse.DeviceFromPath(device)
+ if err != nil {
+ return nil, err
+ }
+ devices = append(dev, devices...)
+ }
+
+ transientMounts := []Mount{}
+ for _, volume := range append(defaultContainerConfig.Volumes(), options.TransientMounts...) {
+ mount, err := parse.Volume(volume)
+ if err != nil {
+ return nil, err
+ }
+ transientMounts = append([]Mount{mount}, transientMounts...)
+ }
+
+ secrets, err := parse.Secrets(options.CommonBuildOpts.Secrets)
+ if err != nil {
+ return nil, err
+ }
+ sshsources, err := parse.SSH(options.CommonBuildOpts.SSHSources)
+ if err != nil {
+ return nil, err
+ }
+
+ writer := options.ReportWriter
+ if options.Quiet {
+ writer = io.Discard
+ }
+
+ var rusageLogFile io.Writer
+
+ if options.LogRusage && !options.Quiet {
+ if options.RusageLogFile == "" {
+ rusageLogFile = options.Out
+ } else {
+ rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ exec := Executor{
+ args: options.Args,
+ cacheFrom: options.CacheFrom,
+ cacheTo: options.CacheTo,
+ cacheTTL: options.CacheTTL,
+ containerSuffix: options.ContainerSuffix,
+ logger: logger,
+ stages: make(map[string]*StageExecutor),
+ store: store,
+ contextDir: options.ContextDirectory,
+ excludes: excludes,
+ groupAdd: options.GroupAdd,
+ ignoreFile: options.IgnoreFile,
+ pullPolicy: options.PullPolicy,
+ registry: options.Registry,
+ ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: transientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ skipUnusedStages: options.SkipUnusedStages,
+ systemContext: options.SystemContext,
+ log: options.Log,
+ in: options.In,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: writer,
+ isolation: options.Isolation,
+ namespaceOptions: options.NamespaceOptions,
+ configureNetwork: options.ConfigureNetwork,
+ cniPluginPath: options.CNIPluginPath,
+ cniConfigDir: options.CNIConfigDir,
+ networkInterface: options.NetworkInterface,
+ idmappingOptions: options.IDMappingOptions,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ iidfile: options.IIDFile,
+ squash: options.Squash,
+ labels: append([]string{}, options.Labels...),
+ layerLabels: append([]string{}, options.LayerLabels...),
+ annotations: append([]string{}, options.Annotations...),
+ layers: options.Layers,
+ noHostname: options.CommonBuildOpts.NoHostname,
+ noHosts: options.CommonBuildOpts.NoHosts,
+ useCache: !options.NoCache,
+ removeIntermediateCtrs: options.RemoveIntermediateCtrs,
+ forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ imageMap: make(map[string]string),
+ containerMap: make(map[string]*buildah.Builder),
+ baseMap: make(map[string]bool),
+ rootfsMap: make(map[string]bool),
+ blobDirectory: options.BlobDirectory,
+ unusedArgs: make(map[string]struct{}),
+ capabilities: capabilities,
+ devices: devices,
+ signBy: options.SignBy,
+ architecture: options.Architecture,
+ timestamp: options.Timestamp,
+ os: options.OS,
+ maxPullPushRetries: options.MaxPullPushRetries,
+ retryPullPushDelay: options.PullPushRetryDelay,
+ ociDecryptConfig: options.OciDecryptConfig,
+ terminatedStage: make(map[string]error),
+ stagesSemaphore: options.JobSemaphore,
+ logRusage: options.LogRusage,
+ rusageLogFile: rusageLogFile,
+ imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
+ fromOverride: options.From,
+ additionalBuildContexts: options.AdditionalBuildContexts,
+ manifest: options.Manifest,
+ secrets: secrets,
+ sshsources: sshsources,
+ logPrefix: logPrefix,
+ unsetEnvs: append([]string{}, options.UnsetEnvs...),
+ unsetLabels: append([]string{}, options.UnsetLabels...),
+ buildOutput: options.BuildOutput,
+ osVersion: options.OSVersion,
+ osFeatures: append([]string{}, options.OSFeatures...),
+ envs: append([]string{}, options.Envs...),
+ confidentialWorkload: options.ConfidentialWorkload,
+ }
+ if exec.err == nil {
+ exec.err = os.Stderr
+ }
+ if exec.out == nil {
+ exec.out = os.Stdout
+ }
+
+ for arg := range options.Args {
+ if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn {
+ exec.unusedArgs[arg] = struct{}{}
+ }
+ }
+ // Use this flag to collect all args declared before
+ // first stage and treat them as global args which is
+ // accessible to all stages.
+ foundFirstStage := false
+ globalArgs := make(map[string]string)
+ for _, line := range mainNode.Children {
+ node := line
+ for node != nil { // tokens on this line, though we only care about the first
+ switch strings.ToUpper(node.Value) { // first token - instruction
+ case "ARG":
+ arg := node.Next
+ if arg != nil {
+ // We have to be careful here - it's either an argument
+ // and value, or just an argument, since they can be
+ // separated by either "=" or whitespace.
+ list := strings.SplitN(arg.Value, "=", 2)
+ if !foundFirstStage {
+ if len(list) > 1 {
+ globalArgs[list[0]] = list[1]
+ }
+ }
+ delete(exec.unusedArgs, list[0])
+ }
+ case "FROM":
+ foundFirstStage = true
+ }
+ break
+ }
+ }
+ exec.globalArgs = globalArgs
+ return &exec, nil
+}
+
+// startStage creates a new stage executor that will be referenced whenever a
+// COPY or ADD statement uses a --from=NAME flag.
+func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
+ stageExec := &StageExecutor{
+ ctx: ctx,
+ executor: b,
+ log: b.log,
+ index: stage.Position,
+ stages: stages,
+ name: stage.Name,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ output: output,
+ stage: stage,
+ }
+ b.stages[stage.Name] = stageExec
+ if idx := strconv.Itoa(stage.Position); idx != stage.Name {
+ b.stages[idx] = stageExec
+ }
+ return stageExec
+}
+
+// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
+func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
+ if imageRef, err := alltransports.ParseImageName(output); err == nil {
+ return imageRef, nil
+ }
+ resolved, err := libimage.NormalizeName(output)
+ if err != nil {
+ return nil, err
+ }
+ imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved.String())
+ if err == nil {
+ return imageRef, nil
+ }
+
+ return imageRef, err
+}
+
+// waitForStage waits for an entry to be added to terminatedStage indicating
+// that the specified stage has finished. If there is no stage defined by that
+// name, then it will return (false, nil). If there is a stage defined by that
+// name, it will return true along with any error it encounters.
+func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) {
+ found := false
+ for _, otherStage := range stages {
+ if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false, nil
+ }
+ for {
+ if b.lastError != nil {
+ return true, b.lastError
+ }
+
+ b.stagesLock.Lock()
+ terminationError, terminated := b.terminatedStage[name]
+ b.stagesLock.Unlock()
+
+ if terminationError != nil {
+ return false, terminationError
+ }
+ if terminated {
+ return true, nil
+ }
+
+ b.stagesSemaphore.Release(1)
+ time.Sleep(time.Millisecond * 10)
+ if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
+ return true, fmt.Errorf("reacquiring job semaphore: %w", err)
+ }
+ }
+}
+
+// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID.
+func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) {
+ b.imageInfoLock.Lock()
+ imageInfo, ok := b.imageInfoCache[imageID]
+ b.imageInfoLock.Unlock()
+ if ok {
+ return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
+ }
+ imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, "@"+imageID)
+ if err != nil {
+ return "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err)
+ }
+ ref, err := imageRef.NewImage(ctx, nil)
+ if err != nil {
+ return "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err)
+ }
+ defer ref.Close()
+ oci, err := ref.OCIConfig(ctx)
+ if err != nil {
+ return "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err)
+ }
+ manifestBytes, manifestFormat, err := ref.Manifest(ctx)
+ if err != nil {
+ return "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err)
+ }
+ if manifestFormat == "" && len(manifestBytes) > 0 {
+ manifestFormat = manifest.GuessMIMEType(manifestBytes)
+ }
+ b.imageInfoLock.Lock()
+ b.imageInfoCache[imageID] = imageTypeAndHistoryAndDiffIDs{
+ manifestType: manifestFormat,
+ history: oci.History,
+ diffIDs: oci.RootFS.DiffIDs,
+ err: nil,
+ }
+ b.imageInfoLock.Unlock()
+ return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
+}
+
+func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) {
+ stage := stages[stageIndex]
+ ib := stage.Builder
+ node := stage.Node
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("buildStage(node.Children=%#v)", node.Children)
+ return "", nil, false, err
+ }
+
+ // If this is the last stage, then the image that we produce at
+ // its end should be given the desired output name.
+ output := ""
+ if stageIndex == len(stages)-1 {
+ output = b.output
+ // Check if any labels were passed in via the API, and add a final line
+ // to the Dockerfile that would provide the same result.
+ // Reason: Docker adds label modification as a last step which can be
+ // processed like regular steps, and if no modification is done to
+ // layers, its easier to re-use cached layers.
+ if len(b.labels) > 0 {
+ var labelLine string
+ labels := append([]string{}, b.labels...)
+ for _, labelSpec := range labels {
+ label := strings.SplitN(labelSpec, "=", 2)
+ key := label[0]
+ value := ""
+ if len(label) > 1 {
+ value = label[1]
+ }
+ // check only for an empty key since docker allows empty values
+ if key != "" {
+ labelLine += fmt.Sprintf(" %q=%q", key, value)
+ }
+ }
+ if len(labelLine) > 0 {
+ additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("LABEL" + labelLine + "\n"))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("while adding additional LABEL step: %w", err)
+ }
+ stage.Node.Children = append(stage.Node.Children, additionalNode.Children...)
+ }
+ }
+ }
+
+ // If this stage is starting out with environment variables that were
+ // passed in via our API, we should include them in the history, since
+ // they affect RUN instructions in this stage.
+ if len(b.envs) > 0 {
+ var envLine string
+ for _, envSpec := range b.envs {
+ env := strings.SplitN(envSpec, "=", 2)
+ key := env[0]
+ if len(env) > 1 {
+ value := env[1]
+ envLine += fmt.Sprintf(" %q=%q", key, value)
+ } else {
+ return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key)
+ }
+ }
+ if len(envLine) > 0 {
+ additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader("ENV" + envLine + "\n"))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("while adding additional ENV step: %w", err)
+ }
+ // make this the first instruction in the stage after its FROM instruction
+ stage.Node.Children = append(additionalNode.Children, stage.Node.Children...)
+ }
+ }
+
+ b.stagesLock.Lock()
+ stageExecutor := b.startStage(ctx, &stage, stages, output)
+ if stageExecutor.log == nil {
+ stepCounter := 0
+ stageExecutor.log = func(format string, args ...interface{}) {
+ prefix := b.logPrefix
+ if len(stages) > 1 {
+ prefix += fmt.Sprintf("[%d/%d] ", stageIndex+1, len(stages))
+ }
+ if !strings.HasPrefix(format, "COMMIT") {
+ stepCounter++
+ prefix += fmt.Sprintf("STEP %d", stepCounter)
+ if stepCounter <= len(stage.Node.Children)+1 {
+ prefix += fmt.Sprintf("/%d", len(stage.Node.Children)+1)
+ }
+ prefix += ": "
+ }
+ suffix := "\n"
+ fmt.Fprintf(stageExecutor.executor.out, prefix+format+suffix, args...)
+ }
+ }
+ b.stagesLock.Unlock()
+
+ // If this a single-layer build, or if it's a multi-layered
+ // build and b.forceRmIntermediateCtrs is set, make sure we
+ // remove the intermediate/build containers, regardless of
+ // whether or not the stage's build fails.
+ if b.forceRmIntermediateCtrs || !b.layers {
+ b.stagesLock.Lock()
+ cleanupStages[stage.Position] = stageExecutor
+ b.stagesLock.Unlock()
+ }
+
+ // Build this stage.
+ if imageID, ref, onlyBaseImage, err = stageExecutor.Execute(ctx, base); err != nil {
+ return "", nil, onlyBaseImage, err
+ }
+
+ // The stage succeeded, so remove its build container if we're
+ // told to delete successful intermediate/build containers for
+ // multi-layered builds.
+ // Skip cleanup if the stage has no instructions.
+ if b.removeIntermediateCtrs && len(stage.Node.Children) > 0 {
+ b.stagesLock.Lock()
+ cleanupStages[stage.Position] = stageExecutor
+ b.stagesLock.Unlock()
+ }
+
+ return imageID, ref, onlyBaseImage, nil
+}
+
+type stageDependencyInfo struct {
+ Name string
+ Position int
+ Needs []string
+ NeededByTarget bool
+}
+
+// Marks `NeededByTarget` as true for the given stage and all its dependency stages as true recursively.
+func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo, stage string) {
+ if stageDependencyInfo, ok := dependencyMap[stage]; ok {
+ if !stageDependencyInfo.NeededByTarget {
+ stageDependencyInfo.NeededByTarget = true
+ for _, need := range stageDependencyInfo.Needs {
+ markDependencyStagesForTarget(dependencyMap, need)
+ }
+ }
+ }
+}
+
+func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMap map[string]*stageDependencyInfo, args map[string]string) {
+ argFound := make(map[string]bool)
+ for _, stage := range stages {
+ node := stage.Node // first line
+ for node != nil { // each line
+ for _, child := range node.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ARG":
+ argName := child.Next.Value
+ if strings.Contains(argName, "=") {
+ res := strings.Split(argName, "=")
+ if res[1] != "" {
+ argFound[res[0]] = true
+ }
+ }
+ argHasValue := true
+ if !strings.Contains(argName, "=") {
+ argHasValue = argFound[argName]
+ }
+ if _, ok := args[argName]; !argHasValue && !ok {
+ shouldWarn := true
+ if stageDependencyInfo, ok := dependencyMap[stage.Name]; ok {
+ if !stageDependencyInfo.NeededByTarget && b.skipUnusedStages != types.OptionalBoolFalse {
+ shouldWarn = false
+ }
+ }
+ if _, isBuiltIn := builtinAllowedBuildArgs[argName]; isBuiltIn {
+ shouldWarn = false
+ }
+ if _, isGlobalArg := b.globalArgs[argName]; isGlobalArg {
+ shouldWarn = false
+ }
+ if shouldWarn {
+ b.logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
+ }
+ }
+ default:
+ continue
+ }
+ }
+ node = node.Next
+ }
+ }
+}
+
+// Build takes care of the details of running Prepare/Execute/Commit/Delete
+// over each of the one or more parsed Dockerfiles and stages.
+func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
+ if len(stages) == 0 {
+ return "", nil, errors.New("building: no stages to build")
+ }
+ var cleanupImages []string
+ cleanupStages := make(map[int]*StageExecutor)
+
+ stdout := b.out
+ if b.quiet {
+ b.out = io.Discard
+ }
+
+ cleanup := func() error {
+ var lastErr error
+ // Clean up any containers associated with the final container
+ // built by a stage, for stages that succeeded, since we no
+ // longer need their filesystem contents.
+
+ b.stagesLock.Lock()
+ for _, stage := range cleanupStages {
+ if err := stage.Delete(); err != nil {
+ logrus.Debugf("Failed to cleanup stage containers: %v", err)
+ lastErr = err
+ }
+ }
+ cleanupStages = nil
+ b.stagesLock.Unlock()
+
+ // Clean up any builders that we used to get data from images.
+ for _, builder := range b.containerMap {
+ if err := builder.Delete(); err != nil {
+ logrus.Debugf("Failed to cleanup image containers: %v", err)
+ lastErr = err
+ }
+ }
+ b.containerMap = nil
+ // Clean up any intermediate containers associated with stages,
+ // since we're not keeping them for debugging.
+ if b.removeIntermediateCtrs {
+ if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
+ logrus.Debugf("Failed to cleanup intermediate containers: %v", err)
+ lastErr = err
+ }
+ }
+ // Remove images from stages except the last one, since we're
+ // not going to use them as a starting point for any new
+ // stages.
+ for i := range cleanupImages {
+ removeID := cleanupImages[len(cleanupImages)-i-1]
+ if removeID == imageID {
+ continue
+ }
+ if _, err := b.store.DeleteImage(removeID, true); err != nil {
+ logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
+ if b.forceRmIntermediateCtrs || !errors.Is(err, storage.ErrImageUsedByContainer) {
+ lastErr = err
+ }
+ }
+ }
+ cleanupImages = nil
+
+ if b.rusageLogFile != nil && b.rusageLogFile != b.out {
+ // we deliberately ignore the error here, as this
+ // function can be called multiple times
+ if closer, ok := b.rusageLogFile.(interface{ Close() error }); ok {
+ closer.Close()
+ }
+ }
+ return lastErr
+ }
+
+ defer func() {
+ if cleanupErr := cleanup(); cleanupErr != nil {
+ if err == nil {
+ err = cleanupErr
+ } else {
+ err = fmt.Errorf("%v: %w", cleanupErr.Error(), err)
+ }
+ }
+ }()
+
+ // dependencyMap contains dependencyInfo for each stage,
+ // dependencyInfo is used later to mark if a particular
+ // stage is needed by target or not.
+ dependencyMap := make(map[string]*stageDependencyInfo)
+ // Build maps of every named base image and every referenced stage root
+ // filesystem. Individual stages can use them to determine whether or
+ // not they can skip certain steps near the end of their stages.
+ for stageIndex, stage := range stages {
+ dependencyMap[stage.Name] = &stageDependencyInfo{Name: stage.Name, Position: stage.Position}
+ node := stage.Node // first line
+ for node != nil { // each line
+ for _, child := range node.Children { // tokens on this line, though we only care about the first
+ switch strings.ToUpper(child.Value) { // first token - instruction
+ case "FROM":
+ if child.Next != nil { // second token on this line
+ // If we have a fromOverride, replace the value of
+ // image name for the first FROM in the Containerfile.
+ if b.fromOverride != "" {
+ child.Next.Value = b.fromOverride
+ b.fromOverride = ""
+ }
+ base := child.Next.Value
+ if base != "" && base != buildah.BaseImageFakeName {
+ if replaceBuildContext, ok := b.additionalBuildContexts[child.Next.Value]; ok {
+ if replaceBuildContext.IsImage {
+ child.Next.Value = replaceBuildContext.Value
+ base = child.Next.Value
+ }
+ }
+ headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
+ userArgs := argsMapToSlice(stage.Builder.Args)
+ // append heading args so if --build-arg key=value is not
+ // specified but default value is set in Containerfile
+ // via `ARG key=value` so default value can be used.
+ userArgs = append(headingArgs, userArgs...)
+ baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
+ if err != nil {
+ return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
+ }
+ b.baseMap[baseWithArg] = true
+ logrus.Debugf("base for stage %d: %q", stageIndex, base)
+ // Check if selected base is not an additional
+ // build context and if base is a valid stage
+ // add it to current stage's dependency tree.
+ if _, ok := b.additionalBuildContexts[baseWithArg]; !ok {
+ if _, ok := dependencyMap[baseWithArg]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, baseWithArg)
+ }
+ }
+ }
+ }
+ case "ADD", "COPY":
+ for _, flag := range child.Flags { // flags for this instruction
+ if strings.HasPrefix(flag, "--from=") {
+ // TODO: this didn't undergo variable and
+ // arg expansion, so if the previous stage
+ // was named using argument values, we might
+ // not record the right value here.
+ rootfs := strings.TrimPrefix(flag, "--from=")
+ b.rootfsMap[rootfs] = true
+ logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
+ // Populate dependency tree and check
+ // if following ADD or COPY needs any other
+ // stage.
+ stageName := rootfs
+ headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
+ userArgs := argsMapToSlice(stage.Builder.Args)
+ // append heading args so if --build-arg key=value is not
+ // specified but default value is set in Containerfile
+ // via `ARG key=value` so default value can be used.
+ userArgs = append(headingArgs, userArgs...)
+ baseWithArg, err := imagebuilder.ProcessWord(stageName, userArgs)
+ if err != nil {
+ return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", stageName, err)
+ }
+ stageName = baseWithArg
+ // If --from=<index> convert index to name
+ if index, err := strconv.Atoi(stageName); err == nil {
+ stageName = stages[index].Name
+ }
+ // Check if selected base is not an additional
+ // build context and if base is a valid stage
+ // add it to current stage's dependency tree.
+ if _, ok := b.additionalBuildContexts[stageName]; !ok {
+ if _, ok := dependencyMap[stageName]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, stageName)
+ }
+ }
+ }
+ }
+ case "RUN":
+ for _, flag := range child.Flags { // flags for this instruction
+ // We need to populate dependency tree of stages
+ // if it is using `--mount` and `from=` field is set
+ // and `from=` points to a stage consider it in
+ // dependency calculation.
+ if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
+ mountFlags := strings.TrimPrefix(flag, "--mount=")
+ fields := strings.Split(mountFlags, ",")
+ for _, field := range fields {
+ if strings.HasPrefix(field, "from=") {
+ fromField := strings.SplitN(field, "=", 2)
+ if len(fromField) > 1 {
+ mountFrom := fromField[1]
+ // Check if this base is a stage if yes
+ // add base to current stage's dependency tree
+ // but also confirm if this is not in additional context.
+ if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
+ // Treat from as a rootfs we need to preserve
+ b.rootfsMap[mountFrom] = true
+ if _, ok := dependencyMap[mountFrom]; ok {
+ // update current stage's dependency info
+ currentStageInfo := dependencyMap[stage.Name]
+ currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
+ }
+ }
+ } else {
+ return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1])
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ node = node.Next // next line
+ }
+ // Last stage is always target stage.
+ // Since last/target stage is processed
+ // let's calculate dependency map of stages
+ // so we can mark stages which can be skipped.
+ if stage.Position == (len(stages) - 1) {
+ markDependencyStagesForTarget(dependencyMap, stage.Name)
+ }
+ }
+ b.warnOnUnsetBuildArgs(stages, dependencyMap, b.args)
+
+ type Result struct {
+ Index int
+ ImageID string
+ OnlyBaseImage bool
+ Ref reference.Canonical
+ Error error
+ }
+
+ ch := make(chan Result, len(stages))
+
+ if b.stagesSemaphore == nil {
+ b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages)))
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(len(stages))
+
+ go func() {
+ cancel := false
+ for stageIndex := range stages {
+ index := stageIndex
+ // Acquire the semaphore before creating the goroutine so we are sure they
+ // run in the specified order.
+ if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
+ cancel = true
+ b.lastError = err
+ ch <- Result{
+ Index: index,
+ Error: err,
+ }
+ wg.Done()
+ continue
+ }
+ b.stagesLock.Lock()
+ cleanupStages := cleanupStages
+ b.stagesLock.Unlock()
+ go func() {
+ defer b.stagesSemaphore.Release(1)
+ defer wg.Done()
+ if cancel || cleanupStages == nil {
+ var err error
+ if stages[index].Name != strconv.Itoa(index) {
+ err = fmt.Errorf("not building stage %d: build canceled", index)
+ } else {
+ err = fmt.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
+ }
+ ch <- Result{
+ Index: index,
+ Error: err,
+ }
+ return
+ }
+ // Skip stage if it is not needed by TargetStage
+ // or any of its dependency stages and `SkipUnusedStages`
+ // is not set to `false`.
+ if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok {
+ if !stageDependencyInfo.NeededByTarget && b.skipUnusedStages != types.OptionalBoolFalse {
+ logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index)
+ ch <- Result{
+ Index: index,
+ Error: nil,
+ }
+ return
+ }
+ }
+ stageID, stageRef, stageOnlyBaseImage, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
+ if stageErr != nil {
+ cancel = true
+ ch <- Result{
+ Index: index,
+ Error: stageErr,
+ OnlyBaseImage: stageOnlyBaseImage,
+ }
+ return
+ }
+
+ ch <- Result{
+ Index: index,
+ ImageID: stageID,
+ Ref: stageRef,
+ OnlyBaseImage: stageOnlyBaseImage,
+ Error: nil,
+ }
+ }()
+ }
+ }()
+ go func() {
+ wg.Wait()
+ close(ch)
+ }()
+
+ for r := range ch {
+ stage := stages[r.Index]
+
+ b.stagesLock.Lock()
+ b.terminatedStage[stage.Name] = r.Error
+ b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error
+
+ if r.Error != nil {
+ b.stagesLock.Unlock()
+ b.lastError = r.Error
+ return "", nil, r.Error
+ }
+
+ // If this is an intermediate stage, make a note of the ID, so
+ // that we can look it up later.
+ if r.Index < len(stages)-1 && r.ImageID != "" {
+ b.imageMap[stage.Name] = r.ImageID
+ // We're not populating the cache with intermediate
+ // images, so add this one to the list of images that
+ // we'll remove later.
+ // Only remove intermediate image is `--layers` is not provided
+ // or following stage was not only a base image ( i.e a different image ).
+ if !b.layers && !r.OnlyBaseImage {
+ cleanupImages = append(cleanupImages, r.ImageID)
+ }
+ }
+ if r.Index == len(stages)-1 {
+ imageID = r.ImageID
+ ref = r.Ref
+ }
+ b.stagesLock.Unlock()
+ }
+
+ if len(b.unusedArgs) > 0 {
+ unusedList := make([]string, 0, len(b.unusedArgs))
+ for k := range b.unusedArgs {
+ unusedList = append(unusedList, k)
+ }
+ sort.Strings(unusedList)
+ fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
+ }
+
+ // Add additional tags and print image names recorded in storage
+ if dest, err := b.resolveNameToImageRef(b.output); err == nil {
+ switch dest.Transport().Name() {
+ case storageTransport.Transport.Name():
+ _, img, err := storageTransport.ResolveReference(dest)
+ if err != nil {
+ return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
+ }
+ if len(b.additionalTags) > 0 {
+ if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
+ return imageID, ref, fmt.Errorf("setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
+ }
+ logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
+ }
+ // Report back the caller the tags applied, if any.
+ _, img, err = storageTransport.ResolveReference(dest)
+ if err != nil {
+ return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
+ }
+ for _, name := range img.Names {
+ fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
+ }
+
+ default:
+ if len(b.additionalTags) > 0 {
+ b.logger.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
+ }
+ }
+ }
+
+ if err := cleanup(); err != nil {
+ return "", nil, err
+ }
+ logrus.Debugf("printing final image id %q", imageID)
+ if b.iidfile != "" {
+ if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
+ return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
+ }
+ } else {
+ if _, err := stdout.Write([]byte(imageID + "\n")); err != nil {
+ return imageID, ref, fmt.Errorf("failed to write image ID to stdout: %w", err)
+ }
+ }
+ return imageID, ref, nil
+}
+
+// deleteSuccessfulIntermediateCtrs goes through the container IDs in each
+// stage's containerIDs list and deletes the containers associated with those
+// IDs.
+func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
+ var lastErr error
+ for _, s := range b.stages {
+ for _, ctr := range s.containerIDs {
+ if err := b.store.DeleteContainer(ctr); err != nil {
+ b.logger.Errorf("error deleting build container %q: %v\n", ctr, err)
+ lastErr = err
+ }
+ }
+ // The stages map includes some stages under multiple keys, so
+ // clearing their lists after we process a given stage is
+ // necessary to avoid triggering errors that would occur if we
+ // tried to delete a given stage's containers multiple times.
+ s.containerIDs = nil
+ }
+ return lastErr
+}
diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go
new file mode 100644
index 0000000..9398dce
--- /dev/null
+++ b/imagebuildah/stage_executor.go
@@ -0,0 +1,2250 @@
+package imagebuildah
+
+import (
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ buildahdocker "github.com/containers/buildah/docker"
+ "github.com/containers/buildah/internal"
+ "github.com/containers/buildah/internal/tmpdir"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/rusage"
+ "github.com/containers/buildah/util"
+ config "github.com/containers/common/pkg/config"
+ cp "github.com/containers/image/v5/copy"
+ imagedocker "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/unshare"
+ docker "github.com/fsouza/go-dockerclient"
+ buildkitparser "github.com/moby/buildkit/frontend/dockerfile/parser"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerfile/command"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
+ "github.com/sirupsen/logrus"
+)
+
+// StageExecutor bundles up what we need to know when executing one stage of a
+// (possibly multi-stage) build.
+// Each stage may need to produce an image to be used as the base in a later
+// stage (with the last stage's image being the end product of the build), and
+// it may need to leave its working container in place so that the container's
+// root filesystem's contents can be used as the source for a COPY instruction
+// in a later stage.
+// Each stage has its own base image, so it starts with its own configuration
+// and set of volumes.
+// If we're naming the result of the build, only the last stage will apply that
+// name to the image that it produces.
+type StageExecutor struct {
+ ctx context.Context
+ executor *Executor
+ log func(format string, args ...interface{})
+ index int
+ stages imagebuilder.Stages
+ name string
+ builder *buildah.Builder
+ preserved int
+ volumes imagebuilder.VolumeSet
+ volumeCache map[string]string
+ volumeCacheInfo map[string]os.FileInfo
+ mountPoint string
+ output string
+ containerIDs []string
+ stage *imagebuilder.Stage
+ didExecute bool
+ argsFromContainerfile []string
+}
+
+// Preserve informs the stage executor that from this point on, it needs to
+// ensure that only COPY and ADD instructions can modify the contents of this
+// directory or anything below it.
+// The StageExecutor handles this by caching the contents of directories which
+// have been marked this way before executing a RUN instruction, invalidating
+// that cache when an ADD or COPY instruction sets any location under the
+// directory as the destination, and using the cache to reset the contents of
+// the directory tree after processing each RUN instruction.
+// It would be simpler if we could just mark the directory as a read-only bind
+// mount of itself during Run(), but the directory is expected to be remain
+// writeable while the RUN instruction is being handled, even if any changes
+// made within the directory are ultimately discarded.
+func (s *StageExecutor) Preserve(path string) error {
+ logrus.Debugf("PRESERVE %q", path)
+ if s.volumes.Covers(path) {
+ // This path is already a subdirectory of a volume path that
+ // we're already preserving, so there's nothing new to be done
+ // except ensure that it exists.
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
+ return fmt.Errorf("ensuring volume path exists: %w", err)
+ }
+ if err := s.volumeCacheInvalidate(path); err != nil {
+ return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
+ }
+ return nil
+ }
+ // Figure out where the cache for this volume would be stored.
+ s.preserved++
+ cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
+ if err != nil {
+ return fmt.Errorf("unable to locate temporary directory for container")
+ }
+ cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
+ // Save info about the top level of the location that we'll be archiving.
+ var archivedPath string
+
+ // Try and resolve the symlink (if one exists)
+ // Set archivedPath and path based on whether a symlink is found or not
+ if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
+ symLink, err := filepath.Rel(s.mountPoint, evaluated)
+ if err != nil {
+ return fmt.Errorf("making evaluated path %q relative to %q: %w", evaluated, s.mountPoint, err)
+ }
+ if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
+ return fmt.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
+ }
+ archivedPath = evaluated
+ path = string(os.PathSeparator) + symLink
+ } else {
+ return fmt.Errorf("evaluating path %q: %w", path, err)
+ }
+
+ st, err := os.Stat(archivedPath)
+ if errors.Is(err, os.ErrNotExist) {
+ createdDirPerms := os.FileMode(0755)
+ if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
+ return fmt.Errorf("ensuring volume path exists: %w", err)
+ }
+ st, err = os.Stat(archivedPath)
+ }
+ if err != nil {
+ logrus.Debugf("error reading info about %q: %v", archivedPath, err)
+ return err
+ }
+ s.volumeCacheInfo[path] = st
+ if !s.volumes.Add(path) {
+ // This path is not a subdirectory of a volume path that we're
+ // already preserving, so adding it to the list should work.
+ return fmt.Errorf("adding %q to the volume cache", path)
+ }
+ s.volumeCache[path] = cacheFile
+ // Now prune cache files for volumes that are now supplanted by this one.
+ removed := []string{}
+ for cachedPath := range s.volumeCache {
+ // Walk our list of cached volumes, and check that they're
+ // still in the list of locations that we need to cache.
+ found := false
+ for _, volume := range s.volumes {
+ if volume == cachedPath {
+ // We need to keep this volume's cache.
+ found = true
+ break
+ }
+ }
+ if !found {
+ // We don't need to keep this volume's cache. Make a
+ // note to remove it.
+ removed = append(removed, cachedPath)
+ }
+ }
+ // Actually remove the caches that we decided to remove.
+ for _, cachedPath := range removed {
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
+ logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
+ if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return err
+ }
+ delete(s.volumeCache, cachedPath)
+ }
+ return nil
+}
+
+// Remove any volume cache item which will need to be re-saved because we're
+// writing to part of it.
+func (s *StageExecutor) volumeCacheInvalidate(path string) error {
+ invalidated := []string{}
+ for cachedPath := range s.volumeCache {
+ if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
+ invalidated = append(invalidated, cachedPath)
+ }
+ }
+ for _, cachedPath := range invalidated {
+ if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return err
+ }
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
+ logrus.Debugf("invalidated volume cache %q for %q from %q", archivedPath, path, s.volumeCache[cachedPath])
+ }
+ return nil
+}
+
+// Save the contents of each of the executor's list of volumes for which we
+// don't already have a cache file.
+func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
+ for cachedPath, cacheFile := range s.volumeCache {
+ archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("evaluating volume path: %w", err)
+ }
+ relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
+ if err != nil {
+ return nil, fmt.Errorf("converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
+ }
+ if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
+ return nil, fmt.Errorf("converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ }
+ _, err = os.Stat(cacheFile)
+ if err == nil {
+ logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
+ continue
+ }
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
+ return nil, fmt.Errorf("ensuring volume path exists: %w", err)
+ }
+ logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
+ cache, err := os.Create(cacheFile)
+ if err != nil {
+ return nil, err
+ }
+ defer cache.Close()
+ rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
+ if err != nil {
+ return nil, fmt.Errorf("archiving %q: %w", archivedPath, err)
+ }
+ defer rc.Close()
+ _, err = io.Copy(cache, rc)
+ if err != nil {
+ return nil, fmt.Errorf("archiving %q to %q: %w", archivedPath, cacheFile, err)
+ }
+ mount := specs.Mount{
+ Source: archivedPath,
+ Destination: string(os.PathSeparator) + relativePath,
+ Type: "bind",
+ Options: []string{"private"},
+ }
+ mounts = append(mounts, mount)
+ }
+ return nil, nil
+}
+
+// Restore the contents of each of the executor's list of volumes.
+func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
+ for cachedPath, cacheFile := range s.volumeCache {
+ archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
+ if err != nil {
+ return fmt.Errorf("evaluating volume path: %w", err)
+ }
+ logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
+ cache, err := os.Open(cacheFile)
+ if err != nil {
+ return err
+ }
+ defer cache.Close()
+ if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
+ return err
+ }
+ createdDirPerms := os.FileMode(0755)
+ if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
+ return err
+ }
+ err = chrootarchive.Untar(cache, archivedPath, nil)
+ if err != nil {
+ return fmt.Errorf("extracting archive at %q: %w", archivedPath, err)
+ }
+ if st, ok := s.volumeCacheInfo[cachedPath]; ok {
+ if err := os.Chmod(archivedPath, st.Mode()); err != nil {
+ return err
+ }
+ uid := 0
+ gid := 0
+ if st.Sys() != nil {
+ uid = util.UID(st)
+ gid = util.GID(st)
+ }
+ if err := os.Chown(archivedPath, uid, gid); err != nil {
+ return err
+ }
+ if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Save the contents of each of the executor's list of volumes for which we
+// don't already have a cache file.
+func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
+ for cachedPath := range s.volumeCache {
+ err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("ensuring volume exists: %w", err)
+ }
+ volumePath := filepath.Join(s.mountPoint, cachedPath)
+ mount := specs.Mount{
+ Source: volumePath,
+ Destination: cachedPath,
+ Options: []string{"O", "private"},
+ }
+ mounts = append(mounts, mount)
+ }
+ return mounts, nil
+}
+
+// Reset the contents of each of the executor's list of volumes.
+func (s *StageExecutor) volumeCacheRestoreOverlay() error {
+ return nil
+}
+
+// Save the contents of each of the executor's list of volumes for which we
+// don't already have a cache file.
+func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
+ switch s.executor.store.GraphDriverName() {
+ case "overlay":
+ return s.volumeCacheSaveOverlay()
+ }
+ return s.volumeCacheSaveVFS()
+}
+
+// Reset the contents of each of the executor's list of volumes.
+func (s *StageExecutor) volumeCacheRestore() error {
+ switch s.executor.store.GraphDriverName() {
+ case "overlay":
+ return s.volumeCacheRestoreOverlay()
+ }
+ return s.volumeCacheRestoreVFS()
+}
+
+// Copy copies data into the working tree. The "Download" field is how
+// imagebuilder tells us the instruction was "ADD" and not "COPY".
+func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+ s.builder.ContentDigester.Restart()
+ return s.performCopy(excludes, copies...)
+}
+
+func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Copy) error {
+ copiesExtend := []imagebuilder.Copy{}
+ for _, copy := range copies {
+ if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
+ return err
+ }
+ var sources []string
+ // The From field says to read the content from another
+ // container. Update the ID mappings and
+ // all-content-comes-from-below-this-directory value.
+ var idMappingOptions *define.IDMappingOptions
+ var copyExcludes []string
+ stripSetuid := false
+ stripSetgid := false
+ preserveOwnership := false
+ contextDir := s.executor.contextDir
+ // If we are copying files via heredoc syntax, then
+ // its time to create these temporary files on host
+ // and copy these to container
+ if len(copy.Files) > 0 {
+ // If we are copying files from heredoc syntax, there
+ // maybe regular files from context as well so split and
+ // process them differently
+ if len(copy.Src) > len(copy.Files) {
+ regularSources := []string{}
+ for _, src := range copy.Src {
+ // If this source is not a heredoc, then it is a regular file from
+ // build context or from another stage (`--from=`) so treat this differently.
+ if !strings.HasPrefix(src, "<<") {
+ regularSources = append(regularSources, src)
+ }
+ }
+ copyEntry := copy
+ // Remove heredoc if any, since we are already processing them
+ // so create new entry with sources containing regular files
+ // only, since regular files can have different context then
+ // heredoc files.
+ copyEntry.Files = nil
+ copyEntry.Src = regularSources
+ copiesExtend = append(copiesExtend, copyEntry)
+ }
+ copySources := []string{}
+ for _, file := range copy.Files {
+ data := file.Data
+ // remove first break line added while parsing heredoc
+ data = strings.TrimPrefix(data, "\n")
+ // add breakline when heredoc ends for docker compat
+ data = data + "\n"
+ tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name))))
+ if err != nil {
+ return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
+ }
+ err = tmpFile.Chmod(0644) // 644 is consistent with buildkit
+ if err != nil {
+ tmpFile.Close()
+ return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err)
+ }
+ defer os.Remove(tmpFile.Name())
+ _, err = tmpFile.WriteString(data)
+ if err != nil {
+ tmpFile.Close()
+ return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
+ }
+ copySources = append(copySources, filepath.Base(tmpFile.Name()))
+ tmpFile.Close()
+ }
+ contextDir = parse.GetTempDir()
+ copy.Src = copySources
+ }
+
+ if len(copy.From) > 0 && len(copy.Files) == 0 {
+ // If from has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return fmt.Errorf("unable to resolve argument %q: %w", copy.From, fromErr)
+ }
+ var additionalBuildContext *define.AdditionalBuildContext
+ if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ additionalBuildContext = foundContext
+ } else {
+ // Maybe index is given in COPY --from=index
+ // if that's the case check if provided index
+ // exists and if stage short_name matches any
+ // additionalContext replace stage with additional
+ // build context.
+ if index, err := strconv.Atoi(from); err == nil {
+ from = s.stages[index].Name
+ }
+ if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ additionalBuildContext = foundContext
+ }
+ }
+ if additionalBuildContext != nil {
+ if !additionalBuildContext.IsImage {
+ contextDir = additionalBuildContext.Value
+ if additionalBuildContext.IsURL {
+ // Check if following buildContext was already
+ // downloaded before in any other RUN step. If not
+ // download it and populate DownloadCache field for
+ // future RUN steps.
+ if additionalBuildContext.DownloadedCache == "" {
+ // additional context contains a tar file
+ // so download and explode tar to buildah
+ // temp and point context to that.
+ path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
+ if err != nil {
+ return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
+ }
+ // point context dir to the extracted path
+ contextDir = filepath.Join(path, subdir)
+ // populate cache for next RUN step
+ additionalBuildContext.DownloadedCache = contextDir
+ } else {
+ contextDir = additionalBuildContext.DownloadedCache
+ }
+ } else {
+ // This points to a path on the filesystem
+ // Check to see if there's a .containerignore
+ // file, update excludes for this stage before
+ // proceeding
+ buildContextExcludes, _, err := parse.ContainerIgnoreFile(additionalBuildContext.Value, "", nil)
+ if err != nil {
+ return err
+ }
+ excludes = append(excludes, buildContextExcludes...)
+ }
+ } else {
+ copy.From = additionalBuildContext.Value
+ }
+ }
+ if additionalBuildContext == nil {
+ if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
+ return err
+ }
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
+ contextDir = other.mountPoint
+ idMappingOptions = &other.builder.IDMappingOptions
+ } else if builder, ok := s.executor.containerMap[copy.From]; ok {
+ contextDir = builder.MountPoint
+ idMappingOptions = &builder.IDMappingOptions
+ } else {
+ return fmt.Errorf("the stage %q has not been built", copy.From)
+ }
+ } else if additionalBuildContext.IsImage {
+ // Image was selected as additionalContext so only process image.
+ mountPoint, err := s.getImageRootfs(s.ctx, copy.From)
+ if err != nil {
+ return err
+ }
+ contextDir = mountPoint
+ }
+ // Original behaviour of buildah still stays true for COPY irrespective of additional context.
+ preserveOwnership = true
+ copyExcludes = excludes
+ } else {
+ copyExcludes = append(s.executor.excludes, excludes...)
+ stripSetuid = true // did this change between 18.06 and 19.03?
+ stripSetgid = true // did this change between 18.06 and 19.03?
+ }
+ if copy.Download {
+ logrus.Debugf("ADD %#v, %#v", excludes, copy)
+ } else {
+ logrus.Debugf("COPY %#v, %#v", excludes, copy)
+ }
+ for _, src := range copy.Src {
+ if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
+ // Source is a URL, allowed for ADD but not COPY.
+ if copy.Download {
+ sources = append(sources, src)
+ } else {
+ // returns an error to be compatible with docker
+ return fmt.Errorf("source can't be a URL for COPY")
+ }
+ } else {
+ sources = append(sources, filepath.Join(contextDir, src))
+ }
+ }
+ options := buildah.AddAndCopyOptions{
+ Chmod: copy.Chmod,
+ Chown: copy.Chown,
+ Checksum: copy.Checksum,
+ PreserveOwnership: preserveOwnership,
+ ContextDir: contextDir,
+ Excludes: copyExcludes,
+ IgnoreFile: s.executor.ignoreFile,
+ IDMappingOptions: idMappingOptions,
+ StripSetuidBit: stripSetuid,
+ StripSetgidBit: stripSetgid,
+ }
+ if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
+ return err
+ }
+ }
+ if len(copiesExtend) > 0 {
+ // If we found heredocs and regularfiles together
+ // in same statement then we produced new copies to
+ // process regular files separately since they need
+ // different context.
+ return s.performCopy(excludes, copiesExtend...)
+ }
+ return nil
+}
+
+// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided
+// Stage can automatically cleanup this mounts when a stage is removed
+// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run.
+// stages mounted here will we used be Run().
+func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
+ stageMountPoints := make(map[string]internal.StageMountDetails)
+ for _, flag := range mountList {
+ if strings.Contains(flag, "from") {
+ arr := strings.SplitN(flag, ",", 2)
+ if len(arr) < 2 {
+ return nil, fmt.Errorf("Invalid --mount command: %s", flag)
+ }
+ tokens := strings.Split(flag, ",")
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "from":
+ if len(kv) == 1 {
+ return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument")
+ }
+ if kv[1] == "" {
+ return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value")
+ }
+ from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr)
+ }
+ // If additional buildContext contains this
+ // give priority to that and break if additional
+ // is not an external image.
+ if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ if additionalBuildContext.IsImage {
+ mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
+ if err != nil {
+ return nil, fmt.Errorf("%s from=%s: image found with that name", flag, from)
+ }
+ // The `from` in stageMountPoints should point
+ // to `mountPoint` replaced from additional
+ // build-context. Reason: Parser will use this
+ // `from` to refer from stageMountPoints map later.
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: false, DidExecute: true, MountPoint: mountPoint}
+ break
+ } else {
+ // Most likely this points to path on filesystem
+ // or external tar archive, Treat it as a stage
+ // nothing is different for this. So process and
+ // point mountPoint to path on host and it will
+ // be automatically handled correctly by since
+ // GetBindMount will honor IsStage:false while
+ // processing stageMountPoints.
+ mountPoint := additionalBuildContext.Value
+ if additionalBuildContext.IsURL {
+ // Check if following buildContext was already
+ // downloaded before in any other RUN step. If not
+ // download it and populate DownloadCache field for
+ // future RUN steps.
+ if additionalBuildContext.DownloadedCache == "" {
+ // additional context contains a tar file
+ // so download and explode tar to buildah
+ // temp and point context to that.
+ path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
+ }
+ // point context dir to the extracted path
+ mountPoint = filepath.Join(path, subdir)
+ // populate cache for next RUN step
+ additionalBuildContext.DownloadedCache = mountPoint
+ } else {
+ mountPoint = additionalBuildContext.DownloadedCache
+ }
+ }
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: true, MountPoint: mountPoint}
+ break
+ }
+ }
+ // If the source's name corresponds to the
+ // result of an earlier stage, wait for that
+ // stage to finish being built.
+ if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
+ return nil, err
+ }
+ if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: otherStage.didExecute, MountPoint: otherStage.mountPoint}
+ break
+ } else {
+ mountPoint, err := s.getImageRootfs(s.ctx, from)
+ if err != nil {
+ return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
+ }
+ stageMountPoints[from] = internal.StageMountDetails{IsStage: false, DidExecute: true, MountPoint: mountPoint}
+ break
+ }
+ default:
+ continue
+ }
+ }
+ }
+ }
+ return stageMountPoints, nil
+}
+
+func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.File) ([]Mount, error) {
+ mountResult := []Mount{}
+ for _, file := range files {
+ f, err := os.CreateTemp(parse.GetTempDir(), "buildahheredoc")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := f.WriteString(file.Data); err != nil {
+ f.Close()
+ return nil, err
+ }
+ err = f.Chmod(0755)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ // dest path is same as buildkit for compat
+ dest := filepath.Join("/dev/pipes/", filepath.Base(f.Name()))
+ mount := Mount{Destination: dest, Type: define.TypeBind, Source: f.Name(), Options: append(define.BindOptions, "rprivate", "z", "Z")}
+ mountResult = append(mountResult, mount)
+ f.Close()
+ }
+ return mountResult, nil
+}
+
+// Run executes a RUN instruction using the stage's current working container
+// as a root directory.
+func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
+ logrus.Debugf("RUN %#v, %#v", run, config)
+ args := run.Args
+ heredocMounts := []Mount{}
+ if len(run.Files) > 0 {
+ if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil {
+ if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") {
+ // This is a single heredoc with a shebang, so create a file
+ // and run it.
+ heredocMount, err := s.createNeededHeredocMountsForRun(run.Files)
+ if err != nil {
+ return err
+ }
+ args = []string{heredocMount[0].Destination}
+ heredocMounts = append(heredocMounts, heredocMount...)
+ } else {
+ args = []string{run.Files[0].Data}
+ }
+ } else {
+ full := args[0]
+ for _, file := range run.Files {
+ full += file.Data + "\n" + file.Name
+ }
+ args = []string{full}
+ }
+ }
+ stageMountPoints, err := s.runStageMountPoints(run.Mounts)
+ if err != nil {
+ return err
+ }
+ if s.builder == nil {
+ return fmt.Errorf("no build container available")
+ }
+ stdin := s.executor.in
+ if stdin == nil {
+ devNull, err := os.Open(os.DevNull)
+ if err != nil {
+ return fmt.Errorf("opening %q for reading: %v", os.DevNull, err)
+ }
+ defer devNull.Close()
+ stdin = devNull
+ }
+ namespaceOptions := append([]define.NamespaceOption{}, s.executor.namespaceOptions...)
+ options := buildah.RunOptions{
+ Args: s.executor.runtimeArgs,
+ Cmd: config.Cmd,
+ ContextDir: s.executor.contextDir,
+ ConfigureNetwork: s.executor.configureNetwork,
+ Entrypoint: config.Entrypoint,
+ Env: config.Env,
+ Hostname: config.Hostname,
+ Logger: s.executor.logger,
+ Mounts: append([]Mount{}, s.executor.transientMounts...),
+ NamespaceOptions: namespaceOptions,
+ NoHostname: s.executor.noHostname,
+ NoHosts: s.executor.noHosts,
+ NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
+ Quiet: s.executor.quiet,
+ RunMounts: run.Mounts,
+ Runtime: s.executor.runtime,
+ Secrets: s.executor.secrets,
+ SSHSources: s.executor.sshsources,
+ StageMountPoints: stageMountPoints,
+ Stderr: s.executor.err,
+ Stdin: stdin,
+ Stdout: s.executor.out,
+ SystemContext: s.executor.systemContext,
+ Terminal: buildah.WithoutTerminal,
+ User: config.User,
+ WorkingDir: config.WorkingDir,
+ }
+
+ // Honor `RUN --network=<>`.
+ switch run.Network {
+ case "host":
+ options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: "network", Host: true})
+ options.ConfigureNetwork = define.NetworkEnabled
+ case "none":
+ options.ConfigureNetwork = define.NetworkDisabled
+ case "", "default":
+ // do nothing
+ default:
+ return fmt.Errorf(`unsupported value %q for "RUN --network", must be either "host" or "none"`, run.Network)
+ }
+
+ if config.NetworkDisabled {
+ options.ConfigureNetwork = buildah.NetworkDisabled
+ }
+
+ if run.Shell {
+ if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
+ args = append(config.Shell, args...)
+ } else {
+ args = append([]string{"/bin/sh", "-c"}, args...)
+ }
+ }
+ mounts, err := s.volumeCacheSave()
+ if err != nil {
+ return err
+ }
+ options.Mounts = append(options.Mounts, mounts...)
+ if len(heredocMounts) > 0 {
+ options.Mounts = append(options.Mounts, heredocMounts...)
+ }
+ err = s.builder.Run(args, options)
+ if err2 := s.volumeCacheRestore(); err2 != nil {
+ if err == nil {
+ return err2
+ }
+ }
+ return err
+}
+
+// UnrecognizedInstruction is called when we encounter an instruction that the
+// imagebuilder parser didn't understand.
+func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+ errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
+ err := fmt.Sprintf(errStr+"%#v", step)
+ if s.executor.ignoreUnrecognizedInstructions {
+ logrus.Debugf(err)
+ return nil
+ }
+
+ switch logrus.GetLevel() {
+ case logrus.ErrorLevel:
+ s.executor.logger.Errorf(errStr)
+ case logrus.DebugLevel:
+ logrus.Debugf(err)
+ default:
+ s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
+ }
+
+ return fmt.Errorf(err)
+}
+
+// prepare creates a working container based on the specified image, or if one
+// isn't specified, the first argument passed to the first FROM instruction we
+// can find in the stage's parsed tree.
+func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBConfig, rebase, preserveBaseImageAnnotations bool, pullPolicy define.PullPolicy) (builder *buildah.Builder, err error) {
+ stage := s.stage
+ ib := stage.Builder
+ node := stage.Node
+
+ if from == "" {
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("prepare(node.Children=%#v)", node.Children)
+ return nil, fmt.Errorf("determining starting point for build: %w", err)
+ }
+ from = base
+ }
+ displayFrom := from
+
+ // stage.Name will be a numeric string for all stages without an "AS" clause
+ asImageName := stage.Name
+ if asImageName != "" {
+ if _, err := strconv.Atoi(asImageName); err != nil {
+ displayFrom = from + " AS " + asImageName
+ }
+ }
+
+ if initializeIBConfig && rebase {
+ logrus.Debugf("FROM %#v", displayFrom)
+ if !s.executor.quiet {
+ s.log("FROM %s", displayFrom)
+ }
+ }
+
+ builderSystemContext := s.executor.systemContext
+ // get platform string from stage
+ if stage.Builder.Platform != "" {
+ os, arch, variant, err := parse.Platform(stage.Builder.Platform)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse platform %q: %w", stage.Builder.Platform, err)
+ }
+ if arch != "" || variant != "" {
+ builderSystemContext.ArchitectureChoice = arch
+ builderSystemContext.VariantChoice = variant
+ }
+ if os != "" {
+ builderSystemContext.OSChoice = os
+ }
+ }
+
+ builderOptions := buildah.BuilderOptions{
+ Args: ib.Args,
+ FromImage: from,
+ GroupAdd: s.executor.groupAdd,
+ PullPolicy: pullPolicy,
+ ContainerSuffix: s.executor.containerSuffix,
+ Registry: s.executor.registry,
+ BlobDirectory: s.executor.blobDirectory,
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ ReportWriter: s.executor.reportWriter,
+ SystemContext: builderSystemContext,
+ Isolation: s.executor.isolation,
+ NamespaceOptions: s.executor.namespaceOptions,
+ ConfigureNetwork: s.executor.configureNetwork,
+ CNIPluginPath: s.executor.cniPluginPath,
+ CNIConfigDir: s.executor.cniConfigDir,
+ NetworkInterface: s.executor.networkInterface,
+ IDMappingOptions: s.executor.idmappingOptions,
+ CommonBuildOpts: s.executor.commonBuildOptions,
+ DefaultMountsFilePath: s.executor.defaultMountsFilePath,
+ Format: s.executor.outputFormat,
+ Capabilities: s.executor.capabilities,
+ Devices: s.executor.devices,
+ MaxPullRetries: s.executor.maxPullPushRetries,
+ PullRetryDelay: s.executor.retryPullPushDelay,
+ OciDecryptConfig: s.executor.ociDecryptConfig,
+ Logger: s.executor.logger,
+ ProcessLabel: s.executor.processLabel,
+ MountLabel: s.executor.mountLabel,
+ PreserveBaseImageAnns: preserveBaseImageAnnotations,
+ }
+
+ builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
+ if err != nil {
+ return nil, fmt.Errorf("creating build container: %w", err)
+ }
+
+ // If executor's ProcessLabel and MountLabel is empty means this is the first stage
+ // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages
+ // Doing this will ensure and one stage in same build can mount another stage even if `selinux`
+ // is enabled.
+
+ if s.executor.mountLabel == "" && s.executor.processLabel == "" {
+ s.executor.mountLabel = builder.MountLabel
+ s.executor.processLabel = builder.ProcessLabel
+ }
+
+ if initializeIBConfig {
+ volumes := map[string]struct{}{}
+ for _, v := range builder.Volumes() {
+ volumes[v] = struct{}{}
+ }
+ ports := map[docker.Port]struct{}{}
+ for _, p := range builder.Ports() {
+ ports[docker.Port(p)] = struct{}{}
+ }
+ dConfig := docker.Config{
+ Hostname: builder.Hostname(),
+ Domainname: builder.Domainname(),
+ User: builder.User(),
+ Env: builder.Env(),
+ Cmd: builder.Cmd(),
+ Image: from,
+ Volumes: volumes,
+ WorkingDir: builder.WorkDir(),
+ Entrypoint: builder.Entrypoint(),
+ Healthcheck: (*docker.HealthConfig)(builder.Healthcheck()),
+ Labels: builder.Labels(),
+ Shell: builder.Shell(),
+ StopSignal: builder.StopSignal(),
+ OnBuild: builder.OnBuild(),
+ ExposedPorts: ports,
+ }
+ var rootfs *docker.RootFS
+ if builder.Docker.RootFS != nil {
+ rootfs = &docker.RootFS{
+ Type: builder.Docker.RootFS.Type,
+ }
+ for _, id := range builder.Docker.RootFS.DiffIDs {
+ rootfs.Layers = append(rootfs.Layers, id.String())
+ }
+ }
+ dImage := docker.Image{
+ Parent: builder.FromImageID,
+ ContainerConfig: dConfig,
+ Container: builder.Container,
+ Author: builder.Maintainer(),
+ Architecture: builder.Architecture(),
+ RootFS: rootfs,
+ }
+ dImage.Config = &dImage.ContainerConfig
+ err = ib.FromImage(&dImage, node)
+ if err != nil {
+ if err2 := builder.Delete(); err2 != nil {
+ logrus.Debugf("error deleting container which we failed to update: %v", err2)
+ }
+ return nil, fmt.Errorf("updating build context: %w", err)
+ }
+ }
+ mountPoint, err := builder.Mount(builder.MountLabel)
+ if err != nil {
+ if err2 := builder.Delete(); err2 != nil {
+ logrus.Debugf("error deleting container which we failed to mount: %v", err2)
+ }
+ return nil, fmt.Errorf("mounting new container: %w", err)
+ }
+ if rebase {
+ // Make this our "current" working container.
+ s.mountPoint = mountPoint
+ s.builder = builder
+ }
+ logrus.Debugln("Container ID:", builder.ContainerID)
+ return builder, nil
+}
+
+// Delete deletes the stage's working container, if we have one.
+func (s *StageExecutor) Delete() (err error) {
+ if s.builder != nil {
+ err = s.builder.Delete()
+ s.builder = nil
+ }
+ return err
+}
+
+// stepRequiresLayer indicates whether or not the step should be followed by
+// committing a layer container when creating an intermediate image.
+func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool {
+ switch strings.ToUpper(step.Command) {
+ case "ADD", "COPY", "RUN":
+ return true
+ }
+ return false
+}
+
+// getImageRootfs checks for an image matching the passed-in name in local
+// storage. If it isn't found, it pulls down a copy. Then, if we don't have a
+// working container root filesystem based on the image, it creates one. Then
+// it returns that root filesystem's location.
+func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mountPoint string, err error) {
+ if builder, ok := s.executor.containerMap[image]; ok {
+ return builder.MountPoint, nil
+ }
+ builder, err := s.prepare(ctx, image, false, false, false, s.executor.pullPolicy)
+ if err != nil {
+ return "", err
+ }
+ s.executor.containerMap[image] = builder
+ return builder.MountPoint, nil
+}
+
+// getContentSummary generates content summary for cases where we added content and need
+// to get summary with updated digests.
+func (s *StageExecutor) getContentSummaryAfterAddingContent() string {
+ contentType, digest := s.builder.ContentDigester.Digest()
+ summary := contentType
+ if digest != "" {
+ if summary != "" {
+ summary = summary + ":"
+ }
+ summary = summary + digest.Encoded()
+ logrus.Debugf("added content %s", summary)
+ }
+ return summary
+}
+
+// Execute runs each of the steps in the stage's parsed tree, in turn.
+func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, onlyBaseImg bool, err error) {
+ var resourceUsage rusage.Rusage
+ stage := s.stage
+ ib := stage.Builder
+ checkForLayers := s.executor.layers && s.executor.useCache
+ moreStages := s.index < len(s.stages)-1
+ lastStage := !moreStages
+ onlyBaseImage := false
+ imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
+ rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
+
+ // If the base image's name corresponds to the result of an earlier
+ // stage, make sure that stage has finished building an image, and
+ // substitute that image's ID for the base image's name here and force
+ // the pull policy to "never" to avoid triggering an error when it's
+ // set to "always", which doesn't make sense for image IDs.
+ // If not, then go on assuming that it's just a regular image that's
+ // either in local storage, or one that we have to pull from a
+ // registry, subject to the passed-in pull policy.
+ if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
+ return "", nil, false, err
+ }
+ pullPolicy := s.executor.pullPolicy
+ s.executor.stagesLock.Lock()
+ var preserveBaseImageAnnotationsAtStageStart bool
+ if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
+ base = stageImage
+ pullPolicy = define.PullNever
+ preserveBaseImageAnnotationsAtStageStart = true
+ }
+ s.executor.stagesLock.Unlock()
+
+ // Set things up so that we can log resource usage as we go.
+ logRusage := func() {
+ if rusage.Supported() {
+ usage, err := rusage.Get()
+ if err != nil {
+ fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
+ return
+ }
+ if s.executor.rusageLogFile != nil {
+ fmt.Fprintf(s.executor.rusageLogFile, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
+ }
+ resourceUsage = usage
+ }
+ }
+
+ // Start counting resource usage before we potentially pull a base image.
+ if rusage.Supported() {
+ if resourceUsage, err = rusage.Get(); err != nil {
+ return "", nil, false, err
+ }
+ // Log the final incremental resource usage counter before we return.
+ defer logRusage()
+ }
+
+ // Create the (first) working container for this stage. Reinitializing
+ // the imagebuilder configuration may alter the list of steps we have,
+ // so take a snapshot of them *after* that.
+ if _, err := s.prepare(ctx, base, true, true, preserveBaseImageAnnotationsAtStageStart, pullPolicy); err != nil {
+ return "", nil, false, err
+ }
+ children := stage.Node.Children
+
+ // A helper function to only log "COMMIT" as an explicit step if it's
+ // the very last step of a (possibly multi-stage) build.
+ logCommit := func(output string, instruction int) {
+ moreInstructions := instruction < len(children)-1
+ if moreInstructions || moreStages {
+ return
+ }
+ commitMessage := "COMMIT"
+ if output != "" {
+ commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
+ }
+ logrus.Debugf(commitMessage)
+ if !s.executor.quiet {
+ s.log(commitMessage)
+ }
+ }
+ // logCachePulled produces build log for cases when `--cache-from`
+ // is used and a valid intermediate image is pulled from remote source.
+ logCachePulled := func(cacheKey string, remote reference.Named) {
+ if !s.executor.quiet {
+ cachePullMessage := "--> Cache pulled from remote"
+ fmt.Fprintf(s.executor.out, "%s %s\n", cachePullMessage, fmt.Sprintf("%s:%s", remote.String(), cacheKey))
+ }
+ }
+ // logCachePush produces build log for cases when `--cache-to`
+ // is used and a valid intermediate image is pushed tp remote source.
+ logCachePush := func(cacheKey string) {
+ if !s.executor.quiet {
+ cachePushMessage := "--> Pushing cache"
+ fmt.Fprintf(s.executor.out, "%s %s\n", cachePushMessage, fmt.Sprintf("%s:%s", s.executor.cacheTo, cacheKey))
+ }
+ }
+ logCacheHit := func(cacheID string) {
+ if !s.executor.quiet {
+ cacheHitMessage := "--> Using cache"
+ fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, cacheID)
+ }
+ }
+ logImageID := func(imgID string) {
+ if len(imgID) > 12 {
+ imgID = imgID[:12]
+ }
+ if s.executor.iidfile == "" {
+ fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
+ }
+ }
+
+ // Parse and populate buildOutputOption if needed
+ var buildOutputOption define.BuildOutputOption
+ canGenerateBuildOutput := (s.executor.buildOutput != "" && lastStage)
+ if canGenerateBuildOutput {
+ logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
+ buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
+ if err != nil {
+ return "", nil, false, fmt.Errorf("failed to parse build output: %w", err)
+ }
+ }
+
+ if len(children) == 0 {
+ // There are no steps.
+ if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 {
+ // We either don't have a base image, or we need to
+ // transform the contents of the base image, or we need
+ // to make some changes to just the config blob. Whichever
+ // is the case, we need to commit() to create a new image.
+ logCommit(s.output, -1)
+ // No base image means there's nothing to put in a
+ // layer, so don't create one.
+ emptyLayer := (s.builder.FromImageID == "")
+ if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash, lastStage); err != nil {
+ return "", nil, false, fmt.Errorf("committing base container: %w", err)
+ }
+ } else {
+ // We don't need to squash or otherwise transform the
+ // base image, and the image wouldn't be modified by
+ // the command line options, so just reuse the base
+ // image.
+ logCommit(s.output, -1)
+ if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
+ return "", nil, onlyBaseImage, err
+ }
+ onlyBaseImage = true
+ }
+ // Generate build output from the new image, or the preexisting
+ // one if we didn't actually do anything, if needed.
+ if canGenerateBuildOutput {
+ if err := s.generateBuildOutput(buildOutputOption); err != nil {
+ return "", nil, onlyBaseImage, err
+ }
+ }
+ logImageID(imgID)
+ }
+
+ for i, node := range children {
+ logRusage()
+ moreInstructions := i < len(children)-1
+ lastInstruction := !moreInstructions
+ // Resolve any arguments in this instruction.
+ step := ib.Step()
+ if err := step.Resolve(node); err != nil {
+ return "", nil, false, fmt.Errorf("resolving step %+v: %w", *node, err)
+ }
+ logrus.Debugf("Parsed Step: %+v", *step)
+ if !s.executor.quiet {
+ s.log("%s", step.Original)
+ }
+
+ // Check if there's a --from if the step command is COPY.
+ // Also check the chmod and the chown flags for validity.
+ for _, flag := range step.Flags {
+ command := strings.ToUpper(step.Command)
+ // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
+ if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
+ return "", nil, false, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
+ }
+ if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum") {
+ return "", nil, false, fmt.Errorf("ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags")
+ }
+ if strings.Contains(flag, "--from") && command == "COPY" {
+ arr := strings.Split(flag, "=")
+ if len(arr) != 2 {
+ return "", nil, false, fmt.Errorf("%s: invalid --from flag %q, should be --from=<name|stage>", command, flag)
+ }
+ // If arr[1] has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return "", nil, false, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr)
+ }
+
+ // Before looking into additional context
+ // also account if the index is given instead
+ // of name so convert index in --from=<index>
+ // to name.
+ if index, err := strconv.Atoi(from); err == nil {
+ from = s.stages[index].Name
+ }
+ // If additional buildContext contains this
+ // give priority to that and break if additional
+ // is not an external image.
+ if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
+ if !additionalBuildContext.IsImage {
+ // We don't need to pull this
+ // since this additional context
+ // is not an image.
+ break
+ } else {
+ // replace with image set in build context
+ from = additionalBuildContext.Value
+ if _, err := s.getImageRootfs(ctx, from); err != nil {
+ return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
+ }
+ break
+ }
+ }
+
+ // If the source's name corresponds to the
+ // result of an earlier stage, wait for that
+ // stage to finish being built.
+ if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
+ return "", nil, false, err
+ }
+ if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
+ break
+ } else if _, err = s.getImageRootfs(ctx, from); err != nil {
+ return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
+ }
+ break
+ }
+ }
+
+ // Determine if there are any RUN instructions to be run after
+ // this step. If not, we won't have to bother preserving the
+ // contents of any volumes declared between now and when we
+ // finish.
+ noRunsRemaining := false
+ if moreInstructions {
+ noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
+ }
+
+ // If we're doing a single-layer build, just process the
+ // instruction.
+ if !s.executor.layers {
+ s.didExecute = true
+ err := ib.Run(step, s, noRunsRemaining)
+ if err != nil {
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
+ }
+ // In case we added content, retrieve its digest.
+ addedContentSummary := s.getContentSummaryAfterAddingContent()
+ if moreInstructions {
+ // There are still more instructions to process
+ // for this stage. Make a note of the
+ // instruction in the history that we'll write
+ // for the image when we eventually commit it.
+ timestamp := time.Now().UTC()
+ if s.executor.timestamp != nil {
+ timestamp = *s.executor.timestamp
+ }
+ s.builder.AddPrependedEmptyLayer(&timestamp, s.getCreatedBy(node, addedContentSummary), "", "")
+ continue
+ } else {
+ // This is the last instruction for this stage,
+ // so we should commit this container to create
+ // an image, but only if it's the last stage,
+ // or if it's used as the basis for a later
+ // stage.
+ if lastStage || imageIsUsedLater {
+ logCommit(s.output, i)
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
+ if err != nil {
+ return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
+ }
+ logImageID(imgID)
+ // Generate build output if needed.
+ if canGenerateBuildOutput {
+ if err := s.generateBuildOutput(buildOutputOption); err != nil {
+ return "", nil, false, err
+ }
+ }
+ } else {
+ imgID = ""
+ }
+ break
+ }
+ }
+
+ // We're in a multi-layered build.
+ s.didExecute = false
+ var (
+ commitName string
+ cacheID string
+ cacheKey string
+ pulledAndUsedCacheImage bool
+ err error
+ rebase bool
+ addedContentSummary string
+ canMatchCacheOnlyAfterRun bool
+ )
+
+ // Only attempt to find cache if its needed, this part is needed
+ // so that if a step is using RUN --mount and mounts content from
+ // previous stages then it uses the freshly built stage instead
+ // of re-using the older stage from the store.
+ avoidLookingCache := false
+ var mounts []string
+ for _, a := range node.Flags {
+ arg, err := imagebuilder.ProcessWord(a, s.stage.Builder.Arguments())
+ if err != nil {
+ return "", nil, false, err
+ }
+ switch {
+ case strings.HasPrefix(arg, "--mount="):
+ mount := strings.TrimPrefix(arg, "--mount=")
+ mounts = append(mounts, mount)
+ default:
+ continue
+ }
+ }
+ stageMountPoints, err := s.runStageMountPoints(mounts)
+ if err != nil {
+ return "", nil, false, err
+ }
+ for _, mountPoint := range stageMountPoints {
+ if mountPoint.DidExecute {
+ avoidLookingCache = true
+ }
+ }
+
+ needsCacheKey := (len(s.executor.cacheFrom) != 0 || len(s.executor.cacheTo) != 0) && !avoidLookingCache
+
+ // If we have to commit for this instruction, only assign the
+ // stage's configured output name to the last layer.
+ if lastInstruction {
+ commitName = s.output
+ }
+
+ // If --cache-from or --cache-to is specified make sure to populate
+ // cacheKey since it will be used either while pulling or pushing the
+ // cache images.
+ if needsCacheKey {
+ cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
+ }
+ }
+ // Check if there's already an image based on our parent that
+ // has the same change that we're about to make, so far as we
+ // can tell.
+ // Only do this if the step we are on is not an ARG step,
+ // we need to call ib.Run() to correctly put the args together before
+ // determining if a cached layer with the same build args already exists
+ // and that is done in the if block below.
+ if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) && !avoidLookingCache {
+ // For `COPY` and `ADD`, history entries include digests computed from
+ // the content that's copied in. We need to compute that information so that
+ // it can be used to evaluate the cache, which means we need to go ahead
+ // and copy the content.
+ canMatchCacheOnlyAfterRun = (step.Command == command.Add || step.Command == command.Copy)
+ if canMatchCacheOnlyAfterRun {
+ s.didExecute = true
+ if err = ib.Run(step, s, noRunsRemaining); err != nil {
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
+ }
+ // Retrieve the digest info for the content that we just copied
+ // into the rootfs.
+ addedContentSummary = s.getContentSummaryAfterAddingContent()
+ // regenerate cache key with updated content summary
+ if needsCacheKey {
+ cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
+ }
+ }
+ }
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
+ }
+ // All the best effort to find cache on localstorage have failed try pulling
+ // cache from remote repo if `--cache-from` was configured.
+ if cacheID == "" && len(s.executor.cacheFrom) != 0 {
+ // only attempt to use cache again if pulling was successful
+ // otherwise do nothing and attempt to run the step, err != nil
+ // is ignored and will be automatically logged for --log-level debug
+ if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil {
+ logCachePulled(cacheKey, ref)
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
+ }
+ if cacheID != "" {
+ pulledAndUsedCacheImage = true
+ }
+ }
+ }
+ }
+
+ // If we didn't find a cache entry, or we need to add content
+ // to find the digest of the content to check for a cached
+ // image, run the step so that we can check if the result
+ // matches a cache.
+ // We already called ib.Run() for the `canMatchCacheOnlyAfterRun`
+ // cases above, so we shouldn't do it again.
+ if cacheID == "" && !canMatchCacheOnlyAfterRun {
+ // Process the instruction directly.
+ s.didExecute = true
+ if err = ib.Run(step, s, noRunsRemaining); err != nil {
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
+ }
+
+ // In case we added content, retrieve its digest.
+ addedContentSummary = s.getContentSummaryAfterAddingContent()
+ // regenerate cache key with updated content summary
+ if needsCacheKey {
+ cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
+ }
+ }
+
+ // Check if there's already an image based on our parent that
+ // has the same change that we just made.
+ if checkForLayers && !avoidLookingCache {
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
+ }
+ // All the best effort to find cache on localstorage have failed try pulling
+ // cache from remote repo if `--cache-from` was configured and cacheKey was
+ // generated again after adding content summary.
+ if cacheID == "" && len(s.executor.cacheFrom) != 0 {
+ // only attempt to use cache again if pulling was successful
+ // otherwise do nothing and attempt to run the step, err != nil
+ // is ignored and will be automatically logged for --log-level debug
+ if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil {
+ logCachePulled(cacheKey, ref)
+ cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
+ if err != nil {
+ return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
+ }
+ if cacheID != "" {
+ pulledAndUsedCacheImage = true
+ }
+ }
+ }
+ }
+ } else {
+ // This log line is majorly here so we can verify in tests
+ // that our cache is performing in the most optimal way for
+ // various cases.
+ logrus.Debugf("Found a cache hit in the first iteration with id %s", cacheID)
+ // If the instruction would affect our configuration,
+ // process the configuration change so that, if we fall
+ // off the cache path, the filesystem changes from the
+ // last cache image will be all that we need, since we
+ // still don't want to restart using the image's
+ // configuration blob.
+ if !s.stepRequiresLayer(step) {
+ s.didExecute = true
+ err := ib.Run(step, s, noRunsRemaining)
+ if err != nil {
+ logrus.Debugf("Error building at step %+v: %v", *step, err)
+ return "", nil, false, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
+ }
+ }
+ }
+
+ // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
+ // So only perform commit if it's the lastInstruction of lastStage.
+ if cacheID != "" {
+ logCacheHit(cacheID)
+ // A suitable cached image was found, so we can just
+ // reuse it. If we need to add a name to the resulting
+ // image because it's the last step in this stage, add
+ // the name to the image.
+ imgID = cacheID
+ if commitName != "" {
+ logCommit(commitName, i)
+ if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
+ return "", nil, false, err
+ }
+ }
+ } else {
+ // We're not going to find any more cache hits, so we
+ // can stop looking for them.
+ checkForLayers = false
+ // Create a new image, maybe with a new layer, with the
+ // name for this stage if it's the last instruction.
+ logCommit(s.output, i)
+ // While committing we always set squash to false here
+ // because at this point we want to save history for
+ // layers even if its a squashed build so that they
+ // can be part of the build cache.
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction)
+ if err != nil {
+ return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
+ }
+ // Generate build output if needed.
+ if canGenerateBuildOutput {
+ if err := s.generateBuildOutput(buildOutputOption); err != nil {
+ return "", nil, false, err
+ }
+ }
+ }
+
+ // Following step is just built and was not used from
+ // cache so check if --cache-to was specified if yes
+ // then attempt pushing this cache to remote repo and
+ // fail accordingly.
+ //
+ // Or
+ //
+ // Try to push this cache to remote repository only
+ // if cache was present on local storage and not
+ // pulled from remote source while processing this
+ if len(s.executor.cacheTo) != 0 && (!pulledAndUsedCacheImage || cacheID == "") && needsCacheKey {
+ logCachePush(cacheKey)
+ if err = s.pushCache(ctx, imgID, cacheKey); err != nil {
+ return "", nil, false, err
+ }
+ }
+
+ if lastInstruction && lastStage {
+ if s.executor.squash || s.executor.confidentialWorkload.Convert {
+ // Create a squashed version of this image
+ // if we're supposed to create one and this
+ // is the last instruction of the last stage.
+ imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction)
+ if err != nil {
+ return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err)
+ }
+ // Generate build output if needed.
+ if canGenerateBuildOutput {
+ if err := s.generateBuildOutput(buildOutputOption); err != nil {
+ return "", nil, false, err
+ }
+ }
+ } else if cacheID != "" {
+ // If we found a valid cache hit and this is lastStage
+ // and not a squashed build then there is no opportunity
+ // for us to perform a `commit` later in the code since
+ // everything will be used from cache.
+ //
+ // If above statement is true and --output was provided
+ // then generate output manually since there is no opportunity
+ // for us to perform `commit` anywhere in the code.
+ // Generate build output if needed.
+ if canGenerateBuildOutput {
+ if err := s.generateBuildOutput(buildOutputOption); err != nil {
+ return "", nil, false, err
+ }
+ }
+ }
+ }
+
+ logImageID(imgID)
+
+ // Update our working container to be based off of the cached
+ // image, if we might need to use it as a basis for the next
+ // instruction, or if we need the root filesystem to match the
+ // image contents for the sake of a later stage that wants to
+ // copy content from it.
+ rebase = moreInstructions || rootfsIsUsedLater
+
+ if rebase {
+ // Since we either committed the working container or
+ // are about to replace it with one based on a cached
+ // image, add the current working container's ID to the
+ // list of successful intermediate containers that
+ // we'll clean up later.
+ s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
+
+ // Prepare for the next step or subsequent phases by
+ // creating a new working container with the
+ // just-committed or updated cached image as its new
+ // base image.
+ // Enforce pull "never" since we already have an image
+ // ID that we really should not be pulling anymore (see
+ // containers/podman/issues/10307).
+ if _, err := s.prepare(ctx, imgID, false, true, true, define.PullNever); err != nil {
+ return "", nil, false, fmt.Errorf("preparing container for next step: %w", err)
+ }
+ }
+ }
+
+ return imgID, ref, onlyBaseImage, nil
+}
+
+func historyEntriesEqual(base, derived v1.History) bool {
+ if base.CreatedBy != derived.CreatedBy {
+ return false
+ }
+ if base.Comment != derived.Comment {
+ return false
+ }
+ if base.Author != derived.Author {
+ return false
+ }
+ if base.EmptyLayer != derived.EmptyLayer {
+ return false
+ }
+ if base.Created != nil && derived.Created == nil {
+ return false
+ }
+ if base.Created == nil && derived.Created != nil {
+ return false
+ }
+ if base.Created != nil && derived.Created != nil && !base.Created.Equal(*derived.Created) {
+ return false
+ }
+ return true
+}
+
+// historyAndDiffIDsMatch returns true if a candidate history matches the
+// history of our base image (if we have one), plus the current instruction,
+// and if the list of diff IDs for the images do for the part of the history
+// that we're comparing.
+// Used to verify whether a cache of the intermediate image exists and whether
+// to run the build again.
+func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
+ // our history should be as long as the base's, plus one entry for what
+ // we're doing
+ if len(history) != len(baseHistory)+1 {
+ return false
+ }
+ // check that each entry in the base history corresponds to an entry in
+ // our history, and count how many of them add a layer diff
+ expectedDiffIDs := 0
+ for i := range baseHistory {
+ if !historyEntriesEqual(baseHistory[i], history[i]) {
+ return false
+ }
+ if !baseHistory[i].EmptyLayer {
+ expectedDiffIDs++
+ }
+ }
+ if len(baseDiffIDs) != expectedDiffIDs {
+ return false
+ }
+ if buildAddsLayer {
+ // we're adding a layer, so we should have exactly one more
+ // layer than the base image
+ if len(diffIDs) != expectedDiffIDs+1 {
+ return false
+ }
+ } else {
+ // we're not adding a layer, so we should have exactly the same
+ // layers as the base image
+ if len(diffIDs) != expectedDiffIDs {
+ return false
+ }
+ }
+ // compare the diffs for the layers that we should have in common
+ for i := range baseDiffIDs {
+ if diffIDs[i] != baseDiffIDs[i] {
+ return false
+ }
+ }
+ return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
+}
+
+// getCreatedBy returns the command the image at node will be created by. If
+// the passed-in CompositeDigester is not nil, it is assumed to have the digest
+// information for the content if the node is ADD or COPY.
+func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
+ if node == nil {
+ return "/bin/sh"
+ }
+ switch strings.ToUpper(node.Value) {
+ case "ARG":
+ for _, variable := range strings.Fields(node.Original) {
+ if variable != "ARG" {
+ s.argsFromContainerfile = append(s.argsFromContainerfile, variable)
+ }
+ }
+ buildArgs := s.getBuildArgsKey()
+ return "/bin/sh -c #(nop) ARG " + buildArgs
+ case "RUN":
+ buildArgs := s.getBuildArgsResolvedForRun()
+ if buildArgs != "" {
+ return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
+ }
+ return "/bin/sh -c " + node.Original[4:]
+ case "ADD", "COPY":
+ destination := node
+ for destination.Next != nil {
+ destination = destination.Next
+ }
+ return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
+ default:
+ return "/bin/sh -c #(nop) " + node.Original
+ }
+}
+
+// getBuildArgs returns a string of the build-args specified during the build process
+// it excludes any build-args that were not used in the build process
+// values for args are overridden by the values specified using ENV.
+// Reason: Values from ENV will always override values specified arg.
+func (s *StageExecutor) getBuildArgsResolvedForRun() string {
+ var envs []string
+ configuredEnvs := make(map[string]string)
+ dockerConfig := s.stage.Builder.Config()
+
+ for _, env := range dockerConfig.Env {
+ splitv := strings.SplitN(env, "=", 2)
+ if len(splitv) == 2 {
+ configuredEnvs[splitv[0]] = splitv[1]
+ }
+ }
+
+ for key, value := range s.stage.Builder.Args {
+ if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
+ // if value was in image it will be given higher priority
+ // so please embed that into build history
+ _, inImage := configuredEnvs[key]
+ if inImage {
+ envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key]))
+ } else {
+ // By default everything must be added to history.
+ // Following variable is configured to false only for special cases.
+ addToHistory := true
+
+ // Following value is being assigned from build-args,
+ // check if this key belongs to any of the predefined allowlist args e.g Proxy Variables
+ // and if that arg is not manually set in Containerfile/Dockerfile
+ // then don't write its value to history.
+ // Following behaviour ensures parity with docker/buildkit.
+ for _, variable := range config.ProxyEnv {
+ if key == variable {
+ // found in predefined args
+ // so don't add to history
+ // unless user did explicit `ARG <some-predefined-proxy-variable>`
+ addToHistory = false
+ for _, processedArg := range s.argsFromContainerfile {
+ if key == processedArg {
+ addToHistory = true
+ }
+ }
+ }
+ }
+ if addToHistory {
+ envs = append(envs, fmt.Sprintf("%s=%s", key, value))
+ }
+ }
+ }
+ }
+ sort.Strings(envs)
+ return strings.Join(envs, " ")
+}
+
+// getBuildArgs key returns the set of args which were specified during the
+// build process, formatted for inclusion in the build history
+func (s *StageExecutor) getBuildArgsKey() string {
+ var args []string
+ for key := range s.stage.Builder.Args {
+ if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
+ args = append(args, key)
+ }
+ }
+ sort.Strings(args)
+ return strings.Join(args, " ")
+}
+
+// tagExistingImage adds names to an image already in the store
+func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
+ // If we don't need to attach a name to the image, just return the cache ID.
+ if output == "" {
+ return cacheID, nil, nil
+ }
+
+ // Get the destination image reference.
+ dest, err := s.executor.resolveNameToImageRef(output)
+ if err != nil {
+ return "", nil, err
+ }
+
+ policyContext, err := util.GetPolicyContext(s.executor.systemContext)
+ if err != nil {
+ return "", nil, err
+ }
+ defer func() {
+ if destroyErr := policyContext.Destroy(); destroyErr != nil {
+ if err == nil {
+ err = destroyErr
+ } else {
+ err = fmt.Errorf("%v: %w", destroyErr.Error(), err)
+ }
+ }
+ }()
+
+ // Look up the source image, expecting it to be in local storage
+ src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
+ if err != nil {
+ return "", nil, fmt.Errorf("getting source imageReference for %q: %w", cacheID, err)
+ }
+ options := cp.Options{
+ RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
+ }
+ manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
+ if err != nil {
+ return "", nil, fmt.Errorf("copying image %q: %w", cacheID, err)
+ }
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return "", nil, fmt.Errorf("computing digest of manifest for image %q: %w", cacheID, err)
+ }
+ _, img, err := is.ResolveReference(dest)
+ if err != nil {
+ return "", nil, fmt.Errorf("locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
+ }
+ var ref reference.Canonical
+ if dref := dest.DockerReference(); dref != nil {
+ if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
+ return "", nil, fmt.Errorf("computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
+ }
+ }
+ return img.ID, ref, nil
+}
+
+// generateCacheKey returns a computed digest for the current STEP
+// running its history and diff against a hash algorithm and this
+// generated CacheKey is further used by buildah to lock and decide
+// tag for the intermediate image which can be pushed and pulled to/from
+// the remote repository.
+func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
+ hash := sha256.New()
+ var baseHistory []v1.History
+ var diffIDs []digest.Digest
+ var manifestType string
+ var err error
+ if s.builder.FromImageID != "" {
+ manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
+ if err != nil {
+ return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
+ }
+ for i := 0; i < len(diffIDs); i++ {
+ fmt.Fprintln(hash, diffIDs[i].String())
+ }
+ }
+ createdBy := s.getCreatedBy(currNode, addedContentDigest)
+ fmt.Fprintf(hash, "%t", buildAddsLayer)
+ fmt.Fprintln(hash, createdBy)
+ fmt.Fprintln(hash, manifestType)
+ for _, element := range baseHistory {
+ fmt.Fprintln(hash, element.CreatedBy)
+ fmt.Fprintln(hash, element.Author)
+ fmt.Fprintln(hash, element.Comment)
+ fmt.Fprintln(hash, element.Created)
+ fmt.Fprintf(hash, "%t", element.EmptyLayer)
+ fmt.Fprintln(hash)
+ }
+ return fmt.Sprintf("%x", hash.Sum(nil)), nil
+}
+
+// cacheImageReference is internal function which generates ImageReference from Named repo sources
+// and a tag.
+func cacheImageReferences(repos []reference.Named, cachekey string) ([]types.ImageReference, error) {
+ var result []types.ImageReference
+ for _, repo := range repos {
+ tagged, err := reference.WithTag(repo, cachekey)
+ if err != nil {
+ return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err)
+ }
+ dest, err := imagedocker.NewReference(tagged)
+ if err != nil {
+ return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err)
+ }
+ result = append(result, dest)
+ }
+ return result, nil
+}
+
+// pushCache takes the image id of intermediate image and attempts
+// to perform push at the remote repository with cacheKey as the tag.
+// Returns error if fails otherwise returns nil.
+func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error {
+ destList, err := cacheImageReferences(s.executor.cacheTo, cacheKey)
+ if err != nil {
+ return err
+ }
+ for _, dest := range destList {
+ logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src)
+ options := buildah.PushOptions{
+ Compression: s.executor.compression,
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ Store: s.executor.store,
+ SystemContext: s.executor.systemContext,
+ BlobDirectory: s.executor.blobDirectory,
+ SignBy: s.executor.signBy,
+ MaxRetries: s.executor.maxPullPushRetries,
+ RetryDelay: s.executor.retryPullPushDelay,
+ }
+ ref, digest, err := buildah.Push(ctx, src, dest, options)
+ if err != nil {
+ return fmt.Errorf("failed pushing cache to %q: %w", dest, err)
+ }
+ logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest)
+ }
+ return nil
+}
+
+// pullCache takes the image source of the cache assuming tag
+// already points to the valid cacheKey and pulls the image to
+// local storage only if it was not already present on local storage
+// or a newer version of cache was found in the upstream repo. If new
+// image was pulled function returns image id otherwise returns empty
+// string "" or error if any error was encontered while pulling the cache.
+func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (reference.Named, string, error) {
+ srcList, err := cacheImageReferences(s.executor.cacheFrom, cacheKey)
+ if err != nil {
+ return nil, "", err
+ }
+ for _, src := range srcList {
+ logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference())
+ options := buildah.PullOptions{
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ Store: s.executor.store,
+ SystemContext: s.executor.systemContext,
+ BlobDirectory: s.executor.blobDirectory,
+ MaxRetries: s.executor.maxPullPushRetries,
+ RetryDelay: s.executor.retryPullPushDelay,
+ AllTags: false,
+ ReportWriter: nil,
+ PullPolicy: define.PullIfNewer,
+ }
+ id, err := buildah.Pull(ctx, src.DockerReference().String(), options)
+ if err != nil {
+ logrus.Debugf("failed pulling cache from source %s: %v", src, err)
+ continue // failed pulling this one try next
+ //return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
+ }
+ logrus.Debugf("successfully pulled cache from repo %s: %s", src, id)
+ return src.DockerReference(), id, nil
+ }
+ return nil, "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
+}
+
+// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
+// It verifies this by checking the parent of the top layer of the image and the history.
+func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
+ // Get the list of images available in the image store
+ images, err := s.executor.store.Images()
+ if err != nil {
+ return "", fmt.Errorf("getting image list from store: %w", err)
+ }
+ var baseHistory []v1.History
+ var baseDiffIDs []digest.Digest
+ if s.builder.FromImageID != "" {
+ _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
+ if err != nil {
+ return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
+ }
+ }
+ for _, image := range images {
+ // If s.executor.cacheTTL was specified
+ // then ignore processing image if it
+ // was created before the specified
+ // duration.
+ if int64(s.executor.cacheTTL) != 0 {
+ timeNow := time.Now()
+ imageDuration := timeNow.Sub(image.Created)
+ if s.executor.cacheTTL < imageDuration {
+ continue
+ }
+ }
+ var imageTopLayer *storage.Layer
+ var imageParentLayerID string
+ if image.TopLayer != "" {
+ imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
+ if err != nil {
+ return "", fmt.Errorf("getting top layer info: %w", err)
+ }
+ // Figure out which layer from this image we should
+ // compare our container's base layer to.
+ imageParentLayerID = imageTopLayer.ID
+ // If we haven't added a layer here, then our base
+ // layer should be the same as the image's layer. If
+ // did add a layer, then our base layer should be the
+ // same as the parent of the image's layer.
+ if buildAddsLayer {
+ imageParentLayerID = imageTopLayer.Parent
+ }
+ }
+ // If the parent of the top layer of an image is equal to the current build image's top layer,
+ // it means that this image is potentially a cached intermediate image from a previous
+ // build.
+ if s.builder.TopLayer != imageParentLayerID {
+ continue
+ }
+ // Next we double check that the history of this image is equivalent to the previous
+ // lines in the Dockerfile up till the point we are at in the build.
+ manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
+ if err != nil {
+ // It's possible that this image is for another architecture, which results
+ // in a custom-crafted error message that we'd have to use substring matching
+ // to recognize. Instead, ignore the image.
+ logrus.Debugf("error getting history of %q (%v), ignoring it", image.ID, err)
+ continue
+ }
+ // If this candidate isn't of the type that we're building, then it may have lost
+ // some format-specific information that a building-without-cache run wouldn't lose.
+ if manifestType != s.executor.outputFormat {
+ continue
+ }
+ // children + currNode is the point of the Dockerfile we are currently at.
+ if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
+ return image.ID, nil
+ }
+ }
+ return "", nil
+}
+
+// commit writes the container's contents to an image, using a passed-in tag as
+// the name if there is one, generating a unique ID-based one otherwise.
+// or commit via any custom exporter if specified.
+func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash, finalInstruction bool) (string, reference.Canonical, error) {
+ ib := s.stage.Builder
+ var imageRef types.ImageReference
+ if output != "" {
+ imageRef2, err := s.executor.resolveNameToImageRef(output)
+ if err != nil {
+ return "", nil, err
+ }
+ imageRef = imageRef2
+ }
+
+ if ib.Author != "" {
+ s.builder.SetMaintainer(ib.Author)
+ }
+ config := ib.Config()
+ if createdBy != "" {
+ s.builder.SetCreatedBy(createdBy)
+ }
+ s.builder.SetHostname(config.Hostname)
+ s.builder.SetDomainname(config.Domainname)
+ if s.executor.architecture != "" {
+ s.builder.SetArchitecture(s.executor.architecture)
+ }
+ if s.executor.os != "" {
+ s.builder.SetOS(s.executor.os)
+ }
+ if s.executor.osVersion != "" {
+ s.builder.SetOSVersion(s.executor.osVersion)
+ }
+ for _, osFeatureSpec := range s.executor.osFeatures {
+ switch {
+ case strings.HasSuffix(osFeatureSpec, "-"):
+ s.builder.UnsetOSFeature(strings.TrimSuffix(osFeatureSpec, "-"))
+ default:
+ s.builder.SetOSFeature(osFeatureSpec)
+ }
+ }
+ s.builder.SetUser(config.User)
+ s.builder.ClearPorts()
+ for p := range config.ExposedPorts {
+ s.builder.SetPort(string(p))
+ }
+ for _, envSpec := range config.Env {
+ spec := strings.SplitN(envSpec, "=", 2)
+ s.builder.SetEnv(spec[0], spec[1])
+ }
+ for _, envSpec := range s.executor.unsetEnvs {
+ s.builder.UnsetEnv(envSpec)
+ }
+ s.builder.SetCmd(config.Cmd)
+ s.builder.ClearVolumes()
+ for v := range config.Volumes {
+ s.builder.AddVolume(v)
+ }
+ s.builder.ClearOnBuild()
+ for _, onBuildSpec := range config.OnBuild {
+ s.builder.SetOnBuild(onBuildSpec)
+ }
+ s.builder.SetWorkDir(config.WorkingDir)
+ s.builder.SetEntrypoint(config.Entrypoint)
+ s.builder.SetShell(config.Shell)
+ s.builder.SetStopSignal(config.StopSignal)
+ if config.Healthcheck != nil {
+ s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
+ Test: append([]string{}, config.Healthcheck.Test...),
+ Interval: config.Healthcheck.Interval,
+ Timeout: config.Healthcheck.Timeout,
+ StartPeriod: config.Healthcheck.StartPeriod,
+ Retries: config.Healthcheck.Retries,
+ })
+ } else {
+ s.builder.SetHealthcheck(nil)
+ }
+ s.builder.ClearLabels()
+
+ if output == "" {
+ // If output is not set then we are committing
+ // an intermediate image, in such case we must
+ // honor layer labels if they are configured.
+ for _, labelString := range s.executor.layerLabels {
+ label := strings.SplitN(labelString, "=", 2)
+ if len(label) > 1 {
+ s.builder.SetLabel(label[0], label[1])
+ } else {
+ s.builder.SetLabel(label[0], "")
+ }
+ }
+ }
+ for k, v := range config.Labels {
+ s.builder.SetLabel(k, v)
+ }
+ if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue {
+ s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
+ }
+ for _, key := range s.executor.unsetLabels {
+ s.builder.UnsetLabel(key)
+ }
+ for _, annotationSpec := range s.executor.annotations {
+ annotation := strings.SplitN(annotationSpec, "=", 2)
+ if len(annotation) > 1 {
+ s.builder.SetAnnotation(annotation[0], annotation[1])
+ } else {
+ s.builder.SetAnnotation(annotation[0], "")
+ }
+ }
+ if imageRef != nil {
+ logName := transports.ImageName(imageRef)
+ logrus.Debugf("COMMIT %q", logName)
+ } else {
+ logrus.Debugf("COMMIT")
+ }
+ writer := s.executor.reportWriter
+ if s.executor.layers || !s.executor.useCache {
+ writer = nil
+ }
+ options := buildah.CommitOptions{
+ Compression: s.executor.compression,
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ ReportWriter: writer,
+ PreferredManifestType: s.executor.outputFormat,
+ SystemContext: s.executor.systemContext,
+ Squash: squash,
+ OmitHistory: s.executor.commonBuildOptions.OmitHistory,
+ EmptyLayer: emptyLayer,
+ BlobDirectory: s.executor.blobDirectory,
+ SignBy: s.executor.signBy,
+ MaxRetries: s.executor.maxPullPushRetries,
+ RetryDelay: s.executor.retryPullPushDelay,
+ HistoryTimestamp: s.executor.timestamp,
+ Manifest: s.executor.manifest,
+ }
+ if finalInstruction {
+ options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload
+ }
+ imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
+ if err != nil {
+ return "", nil, err
+ }
+ var ref reference.Canonical
+ if imageRef != nil {
+ if dref := imageRef.DockerReference(); dref != nil {
+ if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
+ return "", nil, fmt.Errorf("computing canonical reference for new image %q: %w", imgID, err)
+ }
+ }
+ }
+ return imgID, ref, nil
+}
+
+func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOption) error {
+ extractRootfsOpts := buildah.ExtractRootfsOptions{}
+ if unshare.IsRootless() {
+ // In order to maintain as much parity as possible
+ // with buildkit's version of --output and to avoid
+ // unsafe invocation of exported executables it was
+ // decided to strip setuid,setgid and extended attributes.
+ // Since modes like setuid,setgid leaves room for executable
+ // to get invoked with different file-system permission its safer
+ // to strip them off for unprivileged invocation.
+ // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
+ extractRootfsOpts.StripSetuidBit = true
+ extractRootfsOpts.StripSetgidBit = true
+ extractRootfsOpts.StripXattrs = true
+ }
+ rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{}, extractRootfsOpts)
+ if err != nil {
+ return fmt.Errorf("failed to extract rootfs from given container image: %w", err)
+ }
+ defer rc.Close()
+ err = internalUtil.ExportFromReader(rc, buildOutputOpts)
+ if err != nil {
+ return fmt.Errorf("failed to export build output: %w", err)
+ }
+ if errChan != nil {
+ err = <-errChan
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *StageExecutor) EnsureContainerPath(path string) error {
+ return s.builder.EnsureContainerPathAs(path, "", nil)
+}
+
+func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ return s.builder.EnsureContainerPathAs(path, user, mode)
+}
diff --git a/imagebuildah/stage_executor_test.go b/imagebuildah/stage_executor_test.go
new file mode 100644
index 0000000..d65174d
--- /dev/null
+++ b/imagebuildah/stage_executor_test.go
@@ -0,0 +1,100 @@
+package imagebuildah
+
+import (
+ "encoding/json"
+ "strconv"
+ "testing"
+
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHistoryEntriesEqual(t *testing.T) {
+ testCases := []struct {
+ a, b string
+ equal bool
+ }{
+ {
+ a: `{}`,
+ b: `{}`,
+ equal: true,
+ },
+ {
+ a: `{"created":"2020-06-17T00:22:25.47282687Z"}`,
+ b: `{"created":"2020-06-17T00:22:25.47282687Z"}`,
+ equal: true,
+ },
+ {
+ a: `{"created":"2020-07-16T12:38:26.733333497-04:00"}`,
+ b: `{"created":"2020-07-16T12:38:26.733333497-04:00"}`,
+ equal: true,
+ },
+ {
+ a: `{"created":"2020-07-16T12:38:26.733333497-04:00"}`,
+ b: `{"created":"2020-07-16T12:38:26.733333497Z"}`,
+ equal: false,
+ },
+ {
+ a: `{"created":"2020-07-16T12:38:26.733333497Z"}`,
+ b: `{}`,
+ equal: false,
+ },
+ {
+ a: `{}`,
+ b: `{"created":"2020-07-16T12:38:26.733333497Z"}`,
+ equal: false,
+ },
+ {
+ a: `{"comment":"thing"}`,
+ b: `{"comment":"thing"}`,
+ equal: true,
+ },
+ {
+ a: `{"comment":"thing","ignored-field-for-testing":"ignored"}`,
+ b: `{"comment":"thing"}`,
+ equal: true,
+ },
+ {
+ a: `{"CoMmEnT":"thing"}`,
+ b: `{"comment":"thing"}`,
+ equal: true,
+ },
+ {
+ a: `{"comment":"thing"}`,
+ b: `{"comment":"things"}`,
+ equal: false,
+ },
+ {
+ a: `{"author":"respected"}`,
+ b: `{"author":"respected"}`,
+ equal: true,
+ },
+ {
+ a: `{"author":"respected"}`,
+ b: `{"author":"discredited"}`,
+ equal: false,
+ },
+ {
+ a: `{"created_by":"actions"}`,
+ b: `{"created_by":"actions"}`,
+ equal: true,
+ },
+ {
+ a: `{"created_by":"jiggery"}`,
+ b: `{"created_by":"pokery"}`,
+ equal: false,
+ },
+ }
+ for i := range testCases {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var a, b v1.History
+ err := json.Unmarshal([]byte(testCases[i].a), &a)
+ require.Nil(t, err, "error unmarshalling history %q: %v", testCases[i].a, err)
+ err = json.Unmarshal([]byte(testCases[i].b), &b)
+ require.Nil(t, err, "error unmarshalling history %q: %v", testCases[i].b, err)
+ equal := historyEntriesEqual(a, b)
+ assert.Equal(t, testCases[i].equal, equal, "historyEntriesEqual(%q, %q) != %v", testCases[i].a, testCases[i].b, testCases[i].equal)
+ })
+ }
+}
diff --git a/imagebuildah/util.go b/imagebuildah/util.go
new file mode 100644
index 0000000..90c018f
--- /dev/null
+++ b/imagebuildah/util.go
@@ -0,0 +1,22 @@
+package imagebuildah
+
+import (
+ "github.com/containers/buildah"
+)
+
+// InitReexec is a wrapper for buildah.InitReexec(). It should be called at
+// the start of main(), and if it returns true, main() should return
+// successfully immediately.
+func InitReexec() bool {
+ return buildah.InitReexec()
+}
+
+// argsMapToSlice returns the contents of a map[string]string as a slice of keys
+// and values joined with "=".
+func argsMapToSlice(m map[string]string) []string {
+ s := make([]string, 0, len(m))
+ for k, v := range m {
+ s = append(s, k+"="+v)
+ }
+ return s
+}
diff --git a/import.go b/import.go
new file mode 100644
index 0000000..88f732a
--- /dev/null
+++ b/import.go
@@ -0,0 +1,176 @@
+package buildah
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
+)
+
+func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
+ if imageID == "" {
+ return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage")
+ }
+
+ storeopts, err := storage.DefaultStoreOptions(false, 0)
+ if err != nil {
+ return nil, err
+ }
+ uidmap, gidmap := convertStorageIDMaps(storeopts.UIDMap, storeopts.GIDMap)
+
+ ref, err := is.Transport.ParseStoreReference(store, imageID)
+ if err != nil {
+ return nil, fmt.Errorf("no such image %q: %w", imageID, err)
+ }
+ src, err := ref.NewImageSource(ctx, systemContext)
+ if err != nil {
+ return nil, fmt.Errorf("instantiating image source: %w", err)
+ }
+ defer src.Close()
+
+ imageDigest := ""
+ manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
+ }
+ if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
+ imageDigest = manifestDigest.String()
+ }
+
+ var instanceDigest *digest.Digest
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
+ }
+ instance, err := list.ChooseInstance(systemContext)
+ if err != nil {
+ return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
+ }
+ instanceDigest = &instance
+ }
+
+ image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
+ if err != nil {
+ return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
+ }
+
+ imageName := ""
+ if img, err3 := store.Image(imageID); err3 == nil {
+ if len(img.Names) > 0 {
+ imageName = img.Names[0]
+ }
+ if img.TopLayer != "" {
+ layer, err4 := store.Layer(img.TopLayer)
+ if err4 != nil {
+ return nil, fmt.Errorf("reading information about image's top layer: %w", err4)
+ }
+ uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap)
+ }
+ }
+
+ defaultNamespaceOptions, err := DefaultNamespaceOptions()
+ if err != nil {
+ return nil, err
+ }
+
+ netInt, err := getNetworkInterface(store, "", "")
+ if err != nil {
+ return nil, err
+ }
+
+ builder := &Builder{
+ store: store,
+ Type: containerType,
+ FromImage: imageName,
+ FromImageID: imageID,
+ FromImageDigest: imageDigest,
+ Container: containerName,
+ ContainerID: containerID,
+ ImageAnnotations: map[string]string{},
+ ImageCreatedBy: "",
+ NamespaceOptions: defaultNamespaceOptions,
+ IDMappingOptions: define.IDMappingOptions{
+ HostUIDMapping: len(uidmap) == 0,
+ HostGIDMapping: len(uidmap) == 0,
+ UIDMap: uidmap,
+ GIDMap: gidmap,
+ },
+ NetworkInterface: netInt,
+ CommonBuildOpts: &CommonBuildOptions{},
+ }
+
+ if err := builder.initConfig(ctx, image, systemContext); err != nil {
+ return nil, fmt.Errorf("preparing image configuration: %w", err)
+ }
+
+ return builder, nil
+}
+
+func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
+ if options.Container == "" {
+ return nil, errors.New("container name must be specified")
+ }
+
+ c, err := store.Container(options.Container)
+ if err != nil {
+ return nil, err
+ }
+
+ systemContext := getSystemContext(store, &types.SystemContext{}, options.SignaturePolicyPath)
+
+ builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ if builder.FromImageID != "" {
+ if d, err2 := digest.Parse(builder.FromImageID); err2 == nil {
+ builder.Docker.Parent = docker.ID(d)
+ } else {
+ builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID))
+ }
+ }
+ if builder.FromImage != "" {
+ builder.Docker.ContainerConfig.Image = builder.FromImage
+ }
+ builder.IDMappingOptions.UIDMap, builder.IDMappingOptions.GIDMap = convertStorageIDMaps(c.UIDMap, c.GIDMap)
+
+ err = builder.Save()
+ if err != nil {
+ return nil, fmt.Errorf("saving builder state: %w", err)
+ }
+
+ return builder, nil
+}
+
+func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
+ if options.Image == "" {
+ return nil, errors.New("image name must be specified")
+ }
+
+ systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
+
+ _, img, err := util.FindImage(store, "", systemContext, options.Image)
+ if err != nil {
+ return nil, fmt.Errorf("importing settings: %w", err)
+ }
+
+ builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
+ if err != nil {
+ return nil, fmt.Errorf("importing build settings from image %q: %w", options.Image, err)
+ }
+
+ builder.setupLogger()
+ return builder, nil
+}
diff --git a/info.go b/info.go
new file mode 100644
index 0000000..85e570c
--- /dev/null
+++ b/info.go
@@ -0,0 +1,190 @@
+package buildah
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+
+ internalUtil "github.com/containers/buildah/internal/util"
+ putil "github.com/containers/buildah/pkg/util"
+ "github.com/containers/buildah/util"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/system"
+ "github.com/containers/storage/pkg/unshare"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// InfoData holds the info type, i.e store, host etc and the data for each type
+type InfoData struct {
+ Type string
+ Data map[string]interface{}
+}
+
+// Info returns the store and host information
+func Info(store storage.Store) ([]InfoData, error) {
+ info := []InfoData{}
+ // get host information
+ hostInfo := hostInfo()
+ info = append(info, InfoData{Type: "host", Data: hostInfo})
+
+ // get store information
+ storeInfo, err := storeInfo(store)
+ if err != nil {
+ logrus.Error(err, "error getting store info")
+ }
+ info = append(info, InfoData{Type: "store", Data: storeInfo})
+ return info, nil
+}
+
+func hostInfo() map[string]interface{} {
+ info := map[string]interface{}{}
+ ps := internalUtil.NormalizePlatform(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH})
+ info["os"] = ps.OS
+ info["arch"] = ps.Architecture
+ info["variant"] = ps.Variant
+ info["cpus"] = runtime.NumCPU()
+ info["rootless"] = unshare.IsRootless()
+
+ unified, err := util.IsCgroup2UnifiedMode()
+ if err != nil {
+ logrus.Error(err, "err reading cgroups mode")
+ }
+ cgroupVersion := "v1"
+ ociruntime := util.Runtime()
+ if unified {
+ cgroupVersion = "v2"
+ }
+ info["CgroupVersion"] = cgroupVersion
+ info["OCIRuntime"] = ociruntime
+
+ mi, err := system.ReadMemInfo()
+ if err != nil {
+ logrus.Error(err, "err reading memory info")
+ info["MemTotal"] = ""
+ info["MemFree"] = ""
+ info["SwapTotal"] = ""
+ info["SwapFree"] = ""
+ } else {
+ info["MemTotal"] = mi.MemTotal
+ info["MemFree"] = mi.MemFree
+ info["SwapTotal"] = mi.SwapTotal
+ info["SwapFree"] = mi.SwapFree
+ }
+ hostDistributionInfo := getHostDistributionInfo()
+ info["Distribution"] = map[string]interface{}{
+ "distribution": hostDistributionInfo["Distribution"],
+ "version": hostDistributionInfo["Version"],
+ }
+
+ kv, err := putil.ReadKernelVersion()
+ if err != nil {
+ logrus.Error(err, "error reading kernel version")
+ }
+ info["kernel"] = kv
+
+ upDuration, err := putil.ReadUptime()
+ if err != nil {
+ logrus.Error(err, "error reading up time")
+ }
+
+ hoursFound := false
+ var timeBuffer bytes.Buffer
+ var hoursBuffer bytes.Buffer
+ for _, elem := range upDuration.String() {
+ timeBuffer.WriteRune(elem)
+ if elem == 'h' || elem == 'm' {
+ timeBuffer.WriteRune(' ')
+ if elem == 'h' {
+ hoursFound = true
+ }
+ }
+ if !hoursFound {
+ hoursBuffer.WriteRune(elem)
+ }
+ }
+
+ info["uptime"] = timeBuffer.String()
+ if hoursFound {
+ hours, err := strconv.ParseFloat(hoursBuffer.String(), 64)
+ if err == nil {
+ days := hours / 24
+ info["uptime"] = fmt.Sprintf("%s (Approximately %.2f days)", info["uptime"], days)
+ }
+ }
+
+ host, err := os.Hostname()
+ if err != nil {
+ logrus.Error(err, "error getting hostname")
+ }
+ info["hostname"] = host
+
+ return info
+}
+
+// top-level "store" info
+func storeInfo(store storage.Store) (map[string]interface{}, error) {
+ // lets say storage driver in use, number of images, number of containers
+ info := map[string]interface{}{}
+ info["GraphRoot"] = store.GraphRoot()
+ info["RunRoot"] = store.RunRoot()
+ info["GraphDriverName"] = store.GraphDriverName()
+ info["GraphOptions"] = store.GraphOptions()
+ statusPairs, err := store.Status()
+ if err != nil {
+ return nil, err
+ }
+ status := map[string]string{}
+ for _, pair := range statusPairs {
+ status[pair[0]] = pair[1]
+ }
+ info["GraphStatus"] = status
+ images, err := store.Images()
+ if err != nil {
+ logrus.Error(err, "error getting number of images")
+ }
+ info["ImageStore"] = map[string]interface{}{
+ "number": len(images),
+ }
+
+ containers, err := store.Containers()
+ if err != nil {
+ logrus.Error(err, "error getting number of containers")
+ }
+ info["ContainerStore"] = map[string]interface{}{
+ "number": len(containers),
+ }
+
+ return info, nil
+}
+
+// getHostDistributionInfo returns a map containing the host's distribution and version
+func getHostDistributionInfo() map[string]string {
+ dist := make(map[string]string)
+
+ // Populate values in case we cannot find the values
+ // or the file
+ dist["Distribution"] = "unknown"
+ dist["Version"] = "unknown"
+
+ f, err := os.Open("/etc/os-release")
+ if err != nil {
+ return dist
+ }
+ defer f.Close()
+
+ l := bufio.NewScanner(f)
+ for l.Scan() {
+ if strings.HasPrefix(l.Text(), "ID=") {
+ dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=")
+ }
+ if strings.HasPrefix(l.Text(), "VERSION_ID=") {
+ dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"")
+ }
+ }
+ return dist
+}
diff --git a/install.md b/install.md
new file mode 100644
index 0000000..6ad735a
--- /dev/null
+++ b/install.md
@@ -0,0 +1,402 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Installation Instructions
+
+## Installing packaged versions of buildah
+
+### [Arch Linux](https://www.archlinux.org)
+
+```bash
+sudo pacman -S buildah
+```
+
+#### [CentOS](https://www.centos.org)
+
+Buildah is available in the default Extras repos for CentOS 7 and in
+the AppStream repo for CentOS 8 and Stream, however the available version often
+lags the upstream release.
+
+```bash
+sudo yum -y install buildah
+```
+
+#### [Debian](https://debian.org)
+
+The buildah package is available in
+the [Bookworm](https://packages.debian.org/bookworm/buildah), which
+is the current stable release (Debian 12), as well as Debian Unstable/Sid.
+
+```bash
+# Debian Stable/Bookworm or Unstable/Sid
+sudo apt-get update
+sudo apt-get -y install buildah
+```
+
+
+### [Fedora](https://www.fedoraproject.org)
+
+```bash
+sudo dnf -y install buildah
+```
+
+### [Fedora SilverBlue](https://silverblue.fedoraproject.org)
+
+Installed by default
+
+### [Fedora CoreOS](https://coreos.fedoraproject.org)
+
+Not Available. Must be installed via package layering.
+
+rpm-ostree install buildah
+
+Note: [`podman`](https://podman.io) build is available by default.
+
+### [Gentoo](https://www.gentoo.org)
+[app-containers/buildah](https://packages.gentoo.org/packages/app-containers/buildah)
+```bash
+sudo emerge app-containers/buildah
+```
+
+### [openSUSE](https://www.opensuse.org)
+
+```bash
+sudo zypper install buildah
+```
+
+### [openSUSE Kubic](https://kubic.opensuse.org)
+
+transactional-update pkg in buildah
+
+### [RHEL7](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)
+
+Subscribe, then enable Extras channel and install buildah.
+
+```bash
+sudo subscription-manager repos --enable=rhel-7-server-extras-rpms
+sudo yum -y install buildah
+```
+
+#### [Raspberry Pi OS arm64 (beta)](https://downloads.raspberrypi.org/raspios_arm64/images/)
+
+Raspberry Pi OS use the standard Debian's repositories,
+so it is fully compatible with Debian's arm64 repository.
+You can simply follow the [steps for Debian](#debian) to install buildah.
+
+
+### [RHEL8 Beta](https://www.redhat.com/en/blog/powering-its-future-while-preserving-present-introducing-red-hat-enterprise-linux-8-beta?intcmp=701f2000001Cz6OAAS)
+
+```bash
+sudo yum module enable -y container-tools:1.0
+sudo yum module install -y buildah
+```
+
+### [Ubuntu](https://www.ubuntu.com)
+
+The buildah package is available in the official repositories for Ubuntu 20.10
+and newer.
+
+```bash
+# Ubuntu 20.10 and newer
+sudo apt-get -y update
+sudo apt-get -y install buildah
+```
+
+# Building from scratch
+
+## System Requirements
+
+### Kernel Version Requirements
+To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required.
+On other Linux distributions Buildah requires a kernel version that supports the OverlayFS and/or fuse-overlayfs filesystem -- you'll need to consult your distribution's documentation to determine a minimum version number.
+
+### runc Requirement
+
+Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build`
+encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
+[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases. If Buildah is installed
+via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process.
+
+### CNI Requirement
+
+When Buildah uses `runc` to run commands, it defaults to running those commands
+in the host's network namespace. If the command is being run in a separate
+user namespace, though, for example when ID mapping is used, then the command
+will also be run in a separate network namespace.
+
+A newly-created network namespace starts with no network interfaces, so
+commands which are run in that namespace are effectively disconnected from the
+network unless additional setup is done. Buildah relies on the CNI
+[library](https://github.com/containernetworking/cni) and
+[plugins](https://github.com/containernetworking/plugins) to set up interfaces
+and routing for network namespaces.
+
+If Buildah is installed via a package manager such as yum, dnf or apt-get, a
+package containing CNI plugins may be available (in Fedora, the package is
+named `containernetworking-cni`). If not, they will need to be installed,
+for example using:
+```
+ git clone https://github.com/containernetworking/plugins
+ ( cd ./plugins; ./build_linux.sh )
+ sudo mkdir -p /opt/cni/bin
+ sudo install -v ./plugins/bin/* /opt/cni/bin
+```
+
+The CNI library needs to be configured so that it will know which plugins to
+call to set up namespaces. Usually, this configuration takes the form of one
+or more configuration files in the `/etc/cni/net.d` directory. A set of example
+configuration files is included in the
+[`docs/cni-examples`](https://github.com/containers/buildah/tree/main/docs/cni-examples)
+directory of this source tree.
+
+## Package Installation
+
+Buildah is available on several software repositories and can be installed via a package manager such
+as yum, dnf or apt-get on a number of Linux distributions.
+
+## Installation from GitHub
+
+Prior to installing Buildah, install the following packages on your Linux distro:
+* make
+* golang (Requires version 1.13 or higher.)
+* bats
+* btrfs-progs-devel
+* bzip2
+* git
+* go-md2man
+* gpgme-devel
+* glib2-devel
+* libassuan-devel
+* libseccomp-devel
+* runc (Requires version 1.0 RC4 or higher.)
+* containers-common
+
+### Fedora
+
+In Fedora, you can use this command:
+
+```
+ dnf -y install \
+ make \
+ golang \
+ bats \
+ btrfs-progs-devel \
+ glib2-devel \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
+ git \
+ bzip2 \
+ go-md2man \
+ runc \
+ containers-common
+```
+
+Then to install Buildah on Fedora follow the steps in this example:
+
+```
+ mkdir ~/buildah
+ cd ~/buildah
+ export GOPATH=`pwd`
+ git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
+ cd ./src/github.com/containers/buildah
+ make
+ sudo make install
+ buildah --help
+```
+
+### RHEL, CentOS
+
+In RHEL and CentOS, run this command to install the build dependencies:
+
+```
+ yum -y install \
+ make \
+ golang \
+ bats \
+ btrfs-progs-devel \
+ glib2-devel \
+ gpgme-devel \
+ libassuan-devel \
+ libseccomp-devel \
+ git \
+ bzip2 \
+ go-md2man \
+ runc \
+ skopeo-containers
+```
+
+The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above.
+
+### openSUSE
+
+On openSUSE Tumbleweed, install go via `zypper in go`, then run this command:
+
+```
+ zypper in make \
+ git \
+ golang \
+ runc \
+ bzip2 \
+ libgpgme-devel \
+ libseccomp-devel \
+ libbtrfs-devel \
+ go-md2man
+```
+
+The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above.
+
+
+### Ubuntu/Debian
+
+In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands:
+
+```
+ sudo apt-get -y -qq update
+ sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo
+```
+
+Then to install Buildah follow the steps in this example:
+
+```
+ git clone https://github.com/containers/buildah
+ cd buildah
+ make runc all SECURITYTAGS="apparmor seccomp"
+ sudo make install install.runc
+ buildah --help
+```
+
+## Vendoring - Dependency Management
+
+This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory.
+
+## Configuration files
+
+The following configuration files are required in order for Buildah to run appropriately. The
+majority of these files are commonly contained in the `containers-common` package.
+
+### [registries.conf](https://github.com/containers/buildah/blob/main/docs/samples/registries.conf)
+
+#### Man Page: [registries.conf.5](https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md)
+
+`/etc/containers/registries.conf`
+
+registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
+
+#### Example from the Fedora `containers-common` package
+
+```
+cat /etc/containers/registries.conf
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to TOML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries.search', 'registries.insecure',
+# and 'registries.block'.
+
+[registries.search]
+registries = ['docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.access.redhat.com']
+
+# If you need to access insecure registries, add the registry's fully-qualified name.
+# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
+[registries.insecure]
+registries = []
+
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#
+# Docker only
+[registries.block]
+registries = []
+```
+
+### [mounts.conf](https://src.fedoraproject.org/rpms/skopeo/blob/main/f/mounts.conf)
+
+`/usr/share/containers/mounts.conf` and optionally `/etc/containers/mounts.conf`
+
+The mounts.conf files specify volume mount files or directories that are automatically mounted inside containers when executing the `buildah run` or `buildah build` commands. Container processes can then use this content. The volume mount content does not get committed to the final image. This file is usually provided by the containers-common package.
+
+Usually these directories are used for passing secrets or credentials required by the package software to access remote package repositories.
+
+For example, a mounts.conf with the line "`/usr/share/rhel/secrets:/run/secrets`", the content of `/usr/share/rhel/secrets` directory is mounted on `/run/secrets` inside the container. This mountpoint allows Red Hat Enterprise Linux subscriptions from the host to be used within the container. It is also possible to omit the destination if it's equal to the source path. For example, specifying `/var/lib/secrets` will mount the directory into the same container destination path `/var/lib/secrets`.
+
+Note this is not a volume mount. The content of the volumes is copied into container storage, not bind mounted directly from the host.
+
+#### Example from the Fedora `containers-common` package:
+
+```
+cat /usr/share/containers/mounts.conf
+/usr/share/rhel/secrets:/run/secrets
+```
+
+### [seccomp.json](https://src.fedoraproject.org/rpms/skopeo/blob/main/f/seccomp.json)
+
+`/usr/share/containers/seccomp.json`
+
+seccomp.json contains the list of seccomp rules to be allowed inside of
+containers. This file is usually provided by the containers-common package.
+
+The link above takes you to the seccomp.json
+
+### [policy.json](https://github.com/containers/skopeo/blob/main/default-policy.json)
+
+`/etc/containers/policy.json`
+
+#### Man Page: [policy.json.5](https://github.com/containers/image/blob/main/docs/policy.json.md)
+
+
+#### Example from the Fedora `containers-common` package:
+
+```
+cat /etc/containers/policy.json
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ],
+ "transports":
+ {
+ "docker-daemon":
+ {
+ "": [{"type":"insecureAcceptAnything"}]
+ }
+ }
+}
+```
+
+## Debug with Delve and the like
+
+To make a source debug build without optimizations use `BUILDDEBUG=1`, like:
+```
+make all BUILDDEBUG=1
+```
+
+## Vendoring
+
+Buildah uses Go Modules for vendoring purposes. If you need to update or add a vendored package into Buildah, please follow this procedure:
+ * Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above.
+ * `export GO111MODULE=on`
+ * `go get` the needed version:
+ * Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
+ * Assuming that you want to 'bump' the `github.com/containers/storage` package to a particular commit, use this command: `go get github.com/containers/storage@e307568568533c4afccdf7b56df7b4493e4e9a7b`
+ * `make vendor-in-container`
+ * `make`
+ * `make install`
+ * Then add any updated or added files with `git add` then do a `git commit` and create a PR.
+
+### Vendor from your own fork
+
+If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example):
+
+ * `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH`
+ * `make vendor-in-container`
+
+To revert
+ * `go mod edit -dropreplace github.com/containers/storage`
+ * `make vendor-in-container`
+
+To speed up fetching dependencies, you can use a [Go Module Proxy](https://proxy.golang.org) by setting `GOPROXY=https://proxy.golang.org`.
diff --git a/internal/config/convert.go b/internal/config/convert.go
new file mode 100644
index 0000000..7287c67
--- /dev/null
+++ b/internal/config/convert.go
@@ -0,0 +1,121 @@
+package config
+
+import (
+ "github.com/containers/image/v5/manifest"
+ dockerclient "github.com/fsouza/go-dockerclient"
+)
+
+// Schema2ConfigFromGoDockerclientConfig converts a go-dockerclient Config
+// structure to a manifest Schema2Config.
+func Schema2ConfigFromGoDockerclientConfig(config *dockerclient.Config) *manifest.Schema2Config {
+ overrideExposedPorts := make(map[manifest.Schema2Port]struct{})
+ for port := range config.ExposedPorts {
+ overrideExposedPorts[manifest.Schema2Port(port)] = struct{}{}
+ }
+ var overrideHealthCheck *manifest.Schema2HealthConfig
+ if config.Healthcheck != nil {
+ overrideHealthCheck = &manifest.Schema2HealthConfig{
+ Test: config.Healthcheck.Test,
+ StartPeriod: config.Healthcheck.StartPeriod,
+ Interval: config.Healthcheck.Interval,
+ Timeout: config.Healthcheck.Timeout,
+ Retries: config.Healthcheck.Retries,
+ }
+ }
+ labels := make(map[string]string)
+ for k, v := range config.Labels {
+ labels[k] = v
+ }
+ volumes := make(map[string]struct{})
+ for v := range config.Volumes {
+ volumes[v] = struct{}{}
+ }
+ s2config := &manifest.Schema2Config{
+ Hostname: config.Hostname,
+ Domainname: config.Domainname,
+ User: config.User,
+ AttachStdin: config.AttachStdin,
+ AttachStdout: config.AttachStdout,
+ AttachStderr: config.AttachStderr,
+ ExposedPorts: overrideExposedPorts,
+ Tty: config.Tty,
+ OpenStdin: config.OpenStdin,
+ StdinOnce: config.StdinOnce,
+ Env: append([]string{}, config.Env...),
+ Cmd: append([]string{}, config.Cmd...),
+ Healthcheck: overrideHealthCheck,
+ ArgsEscaped: config.ArgsEscaped,
+ Image: config.Image,
+ Volumes: volumes,
+ WorkingDir: config.WorkingDir,
+ Entrypoint: append([]string{}, config.Entrypoint...),
+ NetworkDisabled: config.NetworkDisabled,
+ MacAddress: config.MacAddress,
+ OnBuild: append([]string{}, config.OnBuild...),
+ Labels: labels,
+ StopSignal: config.StopSignal,
+ Shell: config.Shell,
+ }
+ if config.StopTimeout != 0 {
+ s2config.StopTimeout = &config.StopTimeout
+ }
+ return s2config
+}
+
+// GoDockerclientConfigFromSchema2Config converts a manifest Schema2Config
+// to a go-dockerclient config structure.
+func GoDockerclientConfigFromSchema2Config(s2config *manifest.Schema2Config) *dockerclient.Config {
+ overrideExposedPorts := make(map[dockerclient.Port]struct{})
+ for port := range s2config.ExposedPorts {
+ overrideExposedPorts[dockerclient.Port(port)] = struct{}{}
+ }
+ var healthCheck *dockerclient.HealthConfig
+ if s2config.Healthcheck != nil {
+ healthCheck = &dockerclient.HealthConfig{
+ Test: s2config.Healthcheck.Test,
+ StartPeriod: s2config.Healthcheck.StartPeriod,
+ Interval: s2config.Healthcheck.Interval,
+ Timeout: s2config.Healthcheck.Timeout,
+ Retries: s2config.Healthcheck.Retries,
+ }
+ }
+ labels := make(map[string]string)
+ for k, v := range s2config.Labels {
+ labels[k] = v
+ }
+ volumes := make(map[string]struct{})
+ for v := range s2config.Volumes {
+ volumes[v] = struct{}{}
+ }
+ config := &dockerclient.Config{
+ Hostname: s2config.Hostname,
+ Domainname: s2config.Domainname,
+ User: s2config.User,
+ AttachStdin: s2config.AttachStdin,
+ AttachStdout: s2config.AttachStdout,
+ AttachStderr: s2config.AttachStderr,
+ PortSpecs: nil,
+ ExposedPorts: overrideExposedPorts,
+ Tty: s2config.Tty,
+ OpenStdin: s2config.OpenStdin,
+ StdinOnce: s2config.StdinOnce,
+ Env: append([]string{}, s2config.Env...),
+ Cmd: append([]string{}, s2config.Cmd...),
+ Healthcheck: healthCheck,
+ ArgsEscaped: s2config.ArgsEscaped,
+ Image: s2config.Image,
+ Volumes: volumes,
+ WorkingDir: s2config.WorkingDir,
+ Entrypoint: append([]string{}, s2config.Entrypoint...),
+ NetworkDisabled: s2config.NetworkDisabled,
+ MacAddress: s2config.MacAddress,
+ OnBuild: append([]string{}, s2config.OnBuild...),
+ Labels: labels,
+ StopSignal: s2config.StopSignal,
+ Shell: s2config.Shell,
+ }
+ if s2config.StopTimeout != nil {
+ config.StopTimeout = *s2config.StopTimeout
+ }
+ return config
+}
diff --git a/internal/config/convert_test.go b/internal/config/convert_test.go
new file mode 100644
index 0000000..589bced
--- /dev/null
+++ b/internal/config/convert_test.go
@@ -0,0 +1,166 @@
+package config
+
+import (
+ "reflect"
+ "strconv"
+ "testing"
+
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/manifest"
+ dockerclient "github.com/fsouza/go-dockerclient"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// fillAllFields recursively fills in 1 or "1" for every field in the passed-in
+// structure, and that slices and maps have at least one value in them.
+func fillAllFields[pStruct any](t *testing.T, st pStruct) {
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Pointer {
+ v = reflect.Indirect(v)
+ }
+ fillAllValueFields(t, v)
+}
+
+func fillAllValueFields(t *testing.T, v reflect.Value) {
+ fields := reflect.VisibleFields(v.Type())
+ for _, field := range fields {
+ if field.Anonymous {
+ // all right, fine, keep your secrets
+ continue
+ }
+ f := v.FieldByName(field.Name)
+ var keyType, elemType reflect.Type
+ if field.Type.Kind() == reflect.Map {
+ keyType = field.Type.Key()
+ }
+ switch field.Type.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Pointer, reflect.Slice:
+ elemType = field.Type.Elem()
+ }
+ fillValue(t, f, field.Name, field.Type.Kind(), keyType, elemType)
+ }
+}
+
+func fillValue(t *testing.T, value reflect.Value, name string, kind reflect.Kind, keyType, elemType reflect.Type) {
+ switch kind {
+ case reflect.Invalid,
+ reflect.Array, reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer,
+ reflect.Float32, reflect.Float64,
+ reflect.Complex64, reflect.Complex128:
+ require.NotEqualf(t, kind, kind, "unhandled %s field %s: tests require updating", kind, name)
+ case reflect.Bool:
+ value.SetBool(true)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ value.SetInt(1)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ value.SetUint(1)
+ case reflect.Map:
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+ keyPtr := reflect.New(keyType)
+ key := reflect.Indirect(keyPtr)
+ fillValue(t, key, name, keyType.Kind(), nil, nil)
+ elemPtr := reflect.New(elemType)
+ elem := reflect.Indirect(elemPtr)
+ fillValue(t, elem, name, elemType.Kind(), nil, nil)
+ value.SetMapIndex(key, reflect.Indirect(elem))
+ case reflect.Slice:
+ vPtr := reflect.New(elemType)
+ v := reflect.Indirect(vPtr)
+ fillValue(t, v, name, elemType.Kind(), nil, nil)
+ value.Set(reflect.Append(reflect.MakeSlice(value.Type(), 0, 1), v))
+ case reflect.String:
+ value.SetString("1")
+ case reflect.Struct:
+ fillAllValueFields(t, value)
+ case reflect.Pointer:
+ p := reflect.New(elemType)
+ fillValue(t, reflect.Indirect(p), name, elemType.Kind(), nil, nil)
+ value.Set(p)
+ }
+}
+
+// checkAllFields recursively checks that every field not listed in allowZeroed
+// is not set to its zero value, that every slice is not empty, and that every
+// map has at least one entry. It makes an additional exception for structs
+// which have no defined fields.
+func checkAllFields[pStruct any](t *testing.T, st pStruct, allowZeroed []string) {
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Pointer {
+ v = reflect.Indirect(v)
+ }
+ checkAllValueFields(t, v, "", allowZeroed)
+}
+
+func checkAllValueFields(t *testing.T, v reflect.Value, name string, allowedToBeZero []string) {
+ fields := reflect.VisibleFields(v.Type())
+ for _, field := range fields {
+ if field.Anonymous {
+ // all right, fine, keep your secrets
+ continue
+ }
+ fieldName := field.Name
+ if name != "" {
+ fieldName = name + "." + field.Name
+ }
+ if util.StringInSlice(fieldName, allowedToBeZero) {
+ continue
+ }
+ f := v.FieldByName(field.Name)
+ var elemType reflect.Type
+ switch field.Type.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Pointer, reflect.Slice:
+ elemType = field.Type.Elem()
+ }
+ checkValue(t, f, fieldName, field.Type.Kind(), elemType, allowedToBeZero)
+ }
+}
+
+func checkValue(t *testing.T, value reflect.Value, name string, kind reflect.Kind, elemType reflect.Type, allowedToBeZero []string) {
+ if kind != reflect.Invalid {
+ switch kind {
+ case reflect.Map:
+ assert.Falsef(t, value.IsZero(), "map field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ keys := value.MapKeys()
+ for i := 0; i < len(keys); i++ {
+ v := value.MapIndex(keys[i])
+ checkValue(t, v, name+"{"+keys[i].String()+"}", elemType.Kind(), nil, allowedToBeZero)
+ }
+ case reflect.Slice:
+ assert.Falsef(t, value.IsZero(), "slice field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ for i := 0; i < value.Len(); i++ {
+ v := value.Index(i)
+ checkValue(t, v, name+"["+strconv.Itoa(i)+"]", elemType.Kind(), nil, allowedToBeZero)
+ }
+ case reflect.Struct:
+ if fields := reflect.VisibleFields(value.Type()); len(fields) != 0 {
+ // structs which are defined with no fields are okay
+ assert.Falsef(t, value.IsZero(), "slice field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ }
+ checkAllValueFields(t, value, name, allowedToBeZero)
+ case reflect.Pointer:
+ assert.Falsef(t, value.IsZero(), "pointer field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ checkValue(t, reflect.Indirect(value), name, elemType.Kind(), nil, allowedToBeZero)
+ }
+ }
+}
+
+func TestGoDockerclientConfigFromSchema2Config(t *testing.T) {
+ var input manifest.Schema2Config
+ fillAllFields(t, &input)
+ output := GoDockerclientConfigFromSchema2Config(&input)
+ // make exceptions for fields in "output" which have no corresponding field in "input"
+ notInSchema2Config := []string{"CPUSet", "CPUShares", "DNS", "Memory", "KernelMemory", "MemorySwap", "MemoryReservation", "Mounts", "PortSpecs", "PublishService", "SecurityOpts", "VolumeDriver", "VolumesFrom"}
+ checkAllFields(t, output, notInSchema2Config)
+}
+
+func TestSchema2ConfigFromGoDockerclientConfig(t *testing.T) {
+ var input dockerclient.Config
+ fillAllFields(t, &input)
+ output := Schema2ConfigFromGoDockerclientConfig(&input)
+ // make exceptions for fields in "output" which have no corresponding field in "input"
+ notInDockerConfig := []string{}
+ checkAllFields(t, output, notInDockerConfig)
+}
diff --git a/internal/config/executor.go b/internal/config/executor.go
new file mode 100644
index 0000000..19b1429
--- /dev/null
+++ b/internal/config/executor.go
@@ -0,0 +1,45 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ dockerclient "github.com/fsouza/go-dockerclient"
+ "github.com/openshift/imagebuilder"
+)
+
+// configOnlyExecutor implements the Executor interface that an
+// imagebuilder.Builder expects to be able to call to do some heavy lifting,
+// but it just refuses to do the work of ADD, COPY, or RUN. It also doesn't
+// care if the working directory exists in a container, because it's really
+// only concerned with letting the Builder's RunConfig get updated by changes
+// from a Dockerfile. Try anything more than that and it'll return an error.
+type configOnlyExecutor struct{}
+
+func (g *configOnlyExecutor) Preserve(path string) error {
+ return errors.New("ADD/COPY/RUN not supported as changes")
+}
+
+func (g *configOnlyExecutor) EnsureContainerPath(path string) error {
+ return nil
+}
+
+func (g *configOnlyExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ return nil
+}
+
+func (g *configOnlyExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+ if len(copies) == 0 {
+ return nil
+ }
+ return errors.New("ADD/COPY not supported as changes")
+}
+
+func (g *configOnlyExecutor) Run(run imagebuilder.Run, config dockerclient.Config) error {
+ return errors.New("RUN not supported as changes")
+}
+
+func (g *configOnlyExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+ return fmt.Errorf("did not understand change instruction %q", step.Original)
+}
diff --git a/internal/config/executor_test.go b/internal/config/executor_test.go
new file mode 100644
index 0000000..1fbc263
--- /dev/null
+++ b/internal/config/executor_test.go
@@ -0,0 +1,5 @@
+package config
+
+import "github.com/openshift/imagebuilder"
+
+var _ imagebuilder.Executor = &configOnlyExecutor{}
diff --git a/internal/config/override.go b/internal/config/override.go
new file mode 100644
index 0000000..a1dfebf
--- /dev/null
+++ b/internal/config/override.go
@@ -0,0 +1,181 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah/docker"
+ "github.com/containers/image/v5/manifest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/openshift/imagebuilder"
+)
+
+// firstStringElseSecondString takes two strings, and returns the first
+// string if it isn't empty, else the second string
+func firstStringElseSecondString(first, second string) string {
+ if first != "" {
+ return first
+ }
+ return second
+}
+
+// firstSliceElseSecondSlice takes two string slices, and returns the first
+// slice of strings if it has contents, else the second slice
+func firstSliceElseSecondSlice(first, second []string) []string {
+ if len(first) > 0 {
+ return append([]string{}, first...)
+ }
+ return append([]string{}, second...)
+}
+
+// firstSlicePairElseSecondSlicePair takes two pairs of string slices, and
+// returns the first pair of slices if either has contents, else the second
+// pair
+func firstSlicePairElseSecondSlicePair(firstA, firstB, secondA, secondB []string) ([]string, []string) {
+ if len(firstA) > 0 || len(firstB) > 0 {
+ return append([]string{}, firstA...), append([]string{}, firstB...)
+ }
+ return append([]string{}, secondA...), append([]string{}, secondB...)
+}
+
+// mergeEnv combines variables from a and b into a single environment slice. if
+// a and b both provide values for the same variable, the value from b is
+// preferred
+func mergeEnv(a, b []string) []string {
+ index := make(map[string]int)
+ results := make([]string, 0, len(a)+len(b))
+ for _, kv := range append(append([]string{}, a...), b...) {
+ k, _, specifiesValue := strings.Cut(kv, "=")
+ if !specifiesValue {
+ if value, ok := os.LookupEnv(kv); ok {
+ kv = kv + "=" + value
+ } else {
+ kv = kv + "="
+ }
+ }
+ if i, seen := index[k]; seen {
+ results[i] = kv
+ } else {
+ index[k] = len(results)
+ results = append(results, kv)
+ }
+ }
+ return results
+}
+
+// Override takes a buildah docker config and an OCI ImageConfig, and applies a
+// mixture of a slice of Dockerfile-style instructions and fields from a config
+// blob to them both
+func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
+ if len(overrideChanges) > 0 {
+ if overrideConfig == nil {
+ overrideConfig = &manifest.Schema2Config{}
+ }
+ // Parse the set of changes as we would a Dockerfile.
+ changes := strings.Join(overrideChanges, "\n")
+ parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes))
+ if err != nil {
+ return fmt.Errorf("parsing change set %+v: %w", changes, err)
+ }
+ // Create a dummy builder object to process configuration-related
+ // instructions.
+ subBuilder := imagebuilder.NewBuilder(nil)
+ // Convert the incoming data into an initial RunConfig.
+ subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig)
+ // Process the change instructions one by one.
+ for _, node := range parsed.Children {
+ var step imagebuilder.Step
+ if err := step.Resolve(node); err != nil {
+ return fmt.Errorf("resolving change %q: %w", node.Original, err)
+ }
+ if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil {
+ return fmt.Errorf("processing change %q: %w", node.Original, err)
+ }
+ }
+ // Pull settings out of the dummy builder's RunConfig.
+ overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig)
+ }
+ if overrideConfig != nil {
+ // Apply changes from a possibly-provided possibly-changed config struct.
+ dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
+ dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
+ dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
+ oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User)
+ dconfig.AttachStdin = overrideConfig.AttachStdin
+ dconfig.AttachStdout = overrideConfig.AttachStdout
+ dconfig.AttachStderr = overrideConfig.AttachStderr
+ if len(overrideConfig.ExposedPorts) > 0 {
+ dexposedPorts := make(map[docker.Port]struct{})
+ oexposedPorts := make(map[string]struct{})
+ for port := range dconfig.ExposedPorts {
+ dexposedPorts[port] = struct{}{}
+ }
+ for port := range overrideConfig.ExposedPorts {
+ dexposedPorts[docker.Port(port)] = struct{}{}
+ }
+ for port := range oconfig.ExposedPorts {
+ oexposedPorts[port] = struct{}{}
+ }
+ for port := range overrideConfig.ExposedPorts {
+ oexposedPorts[string(port)] = struct{}{}
+ }
+ dconfig.ExposedPorts = dexposedPorts
+ oconfig.ExposedPorts = oexposedPorts
+ }
+ dconfig.Tty = overrideConfig.Tty
+ dconfig.OpenStdin = overrideConfig.OpenStdin
+ dconfig.StdinOnce = overrideConfig.StdinOnce
+ if len(overrideConfig.Env) > 0 {
+ dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env)
+ oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
+ }
+ dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd)
+ oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
+ if overrideConfig.Healthcheck != nil {
+ dconfig.Healthcheck = &docker.HealthConfig{
+ Test: append([]string{}, overrideConfig.Healthcheck.Test...),
+ Interval: overrideConfig.Healthcheck.Interval,
+ Timeout: overrideConfig.Healthcheck.Timeout,
+ StartPeriod: overrideConfig.Healthcheck.StartPeriod,
+ Retries: overrideConfig.Healthcheck.Retries,
+ }
+ }
+ dconfig.ArgsEscaped = overrideConfig.ArgsEscaped
+ dconfig.Image = firstStringElseSecondString(overrideConfig.Image, dconfig.Image)
+ if len(overrideConfig.Volumes) > 0 {
+ if dconfig.Volumes == nil {
+ dconfig.Volumes = make(map[string]struct{})
+ }
+ if oconfig.Volumes == nil {
+ oconfig.Volumes = make(map[string]struct{})
+ }
+ for volume := range overrideConfig.Volumes {
+ dconfig.Volumes[volume] = struct{}{}
+ oconfig.Volumes[volume] = struct{}{}
+ }
+ }
+ dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir)
+ oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
+ dconfig.NetworkDisabled = overrideConfig.NetworkDisabled
+ dconfig.MacAddress = overrideConfig.MacAddress
+ dconfig.OnBuild = overrideConfig.OnBuild
+ if len(overrideConfig.Labels) > 0 {
+ if dconfig.Labels == nil {
+ dconfig.Labels = make(map[string]string)
+ }
+ if oconfig.Labels == nil {
+ oconfig.Labels = make(map[string]string)
+ }
+ for k, v := range overrideConfig.Labels {
+ dconfig.Labels[k] = v
+ oconfig.Labels[k] = v
+ }
+ }
+ dconfig.StopSignal = overrideConfig.StopSignal
+ oconfig.StopSignal = overrideConfig.StopSignal
+ dconfig.StopTimeout = overrideConfig.StopTimeout
+ dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell)
+ }
+ return nil
+}
diff --git a/internal/mkcw/archive.go b/internal/mkcw/archive.go
new file mode 100644
index 0000000..a0677e4
--- /dev/null
+++ b/internal/mkcw/archive.go
@@ -0,0 +1,464 @@
+package mkcw
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/luksy"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/go-units"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+const minimumImageSize = 10 * 1024 * 1024
+
+// ArchiveOptions includes optional settings for generating an archive.
+type ArchiveOptions struct {
+ // If supplied, we'll register the workload with this server.
+ // Practically necessary if DiskEncryptionPassphrase is not set, in
+ // which case we'll generate one and throw it away after.
+ AttestationURL string
+
+ // Used to measure the environment. If left unset (0, ""), defaults will be applied.
+ CPUs int
+ Memory int
+
+ // Can be manually set. If left unset ("", false, nil), reasonable values will be used.
+ TempDir string
+ TeeType TeeType
+ IgnoreAttestationErrors bool
+ ImageSize int64
+ WorkloadID string
+ Slop string
+ DiskEncryptionPassphrase string
+ FirmwareLibrary string
+ Logger *logrus.Logger
+}
+
+type chainRetrievalError struct {
+ stderr string
+ err error
+}
+
+func (c chainRetrievalError) Error() string {
+ if trimmed := strings.TrimSpace(c.stderr); trimmed != "" {
+ return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v: %v", strings.TrimSpace(c.stderr), c.err)
+ }
+ return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v", c.err)
+}
+
+// Archive generates a WorkloadConfig for a specified directory and produces a
+// tar archive of a container image's rootfs with the expected contents.
+// The input directory will have a ".krun_config.json" file added to it while
+// this function is running, but it will be removed on completion.
+func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) {
+ const (
+ teeDefaultCPUs = 2
+ teeDefaultMemory = 512
+ teeDefaultFilesystem = "ext4"
+ teeDefaultTeeType = SNP
+ )
+
+ if path == "" {
+ return nil, WorkloadConfig{}, fmt.Errorf("required path not specified")
+ }
+ logger := options.Logger
+ if logger == nil {
+ logger = logrus.StandardLogger()
+ }
+
+ teeType := options.TeeType
+ if teeType == "" {
+ teeType = teeDefaultTeeType
+ }
+ cpus := options.CPUs
+ if cpus == 0 {
+ cpus = teeDefaultCPUs
+ }
+ memory := options.Memory
+ if memory == 0 {
+ memory = teeDefaultMemory
+ }
+ filesystem := teeDefaultFilesystem
+ workloadID := options.WorkloadID
+ if workloadID == "" {
+ digestInput := path + filesystem + time.Now().String()
+ workloadID = digest.Canonical.FromString(digestInput).Encoded()
+ }
+ workloadConfig := WorkloadConfig{
+ Type: teeType,
+ WorkloadID: workloadID,
+ CPUs: cpus,
+ Memory: memory,
+ AttestationURL: options.AttestationURL,
+ }
+
+ // Do things which are specific to the type of TEE we're building for.
+ var chainBytes []byte
+ var chainBytesFile string
+ var chainInfo fs.FileInfo
+ switch teeType {
+ default:
+ return nil, WorkloadConfig{}, fmt.Errorf("don't know how to generate TeeData for TEE type %q", teeType)
+ case SEV, SEV_NO_ES:
+ // If we need a certificate chain, get it.
+ chain, err := os.CreateTemp(options.TempDir, "chain")
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ chain.Close()
+ defer func() {
+ if err := os.Remove(chain.Name()); err != nil {
+ logger.Warnf("error removing temporary file %q: %v", chain.Name(), err)
+ }
+ }()
+ logrus.Debugf("sevctl export -f %s", chain.Name())
+ cmd := exec.Command("sevctl", "export", "-f", chain.Name())
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout, cmd.Stderr = &stdout, &stderr
+ if err := cmd.Run(); err != nil {
+ if !options.IgnoreAttestationErrors {
+ return nil, WorkloadConfig{}, chainRetrievalError{stderr.String(), err}
+ }
+ logger.Warn(chainRetrievalError{stderr.String(), err}.Error())
+ }
+ if chainBytes, err = os.ReadFile(chain.Name()); err != nil {
+ chainBytes = []byte{}
+ }
+ var teeData SevWorkloadData
+ if len(chainBytes) > 0 {
+ chainBytesFile = "sev.chain"
+ chainInfo, err = os.Stat(chain.Name())
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ teeData.VendorChain = "/" + chainBytesFile
+ }
+ encodedTeeData, err := json.Marshal(teeData)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
+ }
+ workloadConfig.TeeData = string(encodedTeeData)
+ case SNP:
+ teeData := SnpWorkloadData{
+ Generation: "milan",
+ }
+ encodedTeeData, err := json.Marshal(teeData)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
+ }
+ workloadConfig.TeeData = string(encodedTeeData)
+ }
+
+ // Write part of the config blob where the krun init process will be
+ // looking for it. The oci2cw tool used `buildah inspect` output, but
+ // init is just looking for fields that have the right names in any
+ // object, and the image's config will have that, so let's try encoding
+ // it directly.
+ krunConfigPath := filepath.Join(path, ".krun_config.json")
+ krunConfigBytes, err := json.Marshal(ociConfig)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err)
+ }
+ if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err)
+ }
+ defer func() {
+ if err := os.Remove(krunConfigPath); err != nil {
+ logger.Warnf("removing krun configuration file: %v", err)
+ }
+ }()
+
+ // Encode the workload config, in case it fails for any reason.
+ cleanedUpWorkloadConfig := workloadConfig
+ switch cleanedUpWorkloadConfig.Type {
+ default:
+ return nil, WorkloadConfig{}, fmt.Errorf("don't know how to canonicalize TEE type %q", cleanedUpWorkloadConfig.Type)
+ case SEV, SEV_NO_ES:
+ cleanedUpWorkloadConfig.Type = SEV
+ case SNP:
+ cleanedUpWorkloadConfig.Type = SNP
+ }
+ workloadConfigBytes, err := json.Marshal(cleanedUpWorkloadConfig)
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+
+ // Make sure we have the passphrase to use for encrypting the disk image.
+ diskEncryptionPassphrase := options.DiskEncryptionPassphrase
+ if diskEncryptionPassphrase == "" {
+ diskEncryptionPassphrase, err = GenerateDiskEncryptionPassphrase()
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ }
+
+ // If we weren't told how big the image should be, get a rough estimate
+ // of the input data size, then add a hedge to it.
+ imageSize := slop(options.ImageSize, options.Slop)
+ if imageSize == 0 {
+ var sourceSize int64
+ if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error {
+ if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ info, err := d.Info()
+ if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ sourceSize += info.Size()
+ return nil
+ }); err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ imageSize = slop(sourceSize, options.Slop)
+ }
+ if imageSize%4096 != 0 {
+ imageSize += (4096 - (imageSize % 4096))
+ }
+ if imageSize < minimumImageSize {
+ imageSize = minimumImageSize
+ }
+
+ // Create a file to use as the unencrypted version of the disk image.
+ plain, err := os.CreateTemp(options.TempDir, "plain.img")
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ removePlain := true
+ defer func() {
+ if removePlain {
+ if err := os.Remove(plain.Name()); err != nil {
+ logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
+ }
+ }
+ }()
+
+ // Lengthen the plaintext disk image file.
+ if err := plain.Truncate(imageSize); err != nil {
+ plain.Close()
+ return nil, WorkloadConfig{}, err
+ }
+ plainInfo, err := plain.Stat()
+ plain.Close()
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+
+ // Format the disk image with the filesystem contents.
+ if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil {
+ if strings.TrimSpace(stderr) != "" {
+ return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err)
+ }
+ return nil, WorkloadConfig{}, err
+ }
+
+ // If we're registering the workload, we can do that now.
+ if workloadConfig.AttestationURL != "" {
+ if err := SendRegistrationRequest(workloadConfig, diskEncryptionPassphrase, options.FirmwareLibrary, options.IgnoreAttestationErrors, logger); err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ }
+
+ // Try to encrypt on the fly.
+ pipeReader, pipeWriter := io.Pipe()
+ removePlain = false
+ go func() {
+ var err error
+ defer func() {
+ if err := os.Remove(plain.Name()); err != nil {
+ logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ } else {
+ pipeWriter.Close()
+ }
+ }()
+ plain, err := os.Open(plain.Name())
+ if err != nil {
+ logrus.Errorf("opening unencrypted disk image %q: %v", plain.Name(), err)
+ return
+ }
+ defer plain.Close()
+ tw := tar.NewWriter(pipeWriter)
+ defer tw.Flush()
+
+ // Write /entrypoint
+ var decompressedEntrypoint bytes.Buffer
+ decompressor, err := gzip.NewReader(bytes.NewReader(entrypointCompressedBytes))
+ if err != nil {
+ logrus.Errorf("decompressing copy of entrypoint: %v", err)
+ return
+ }
+ defer decompressor.Close()
+ if _, err = io.Copy(&decompressedEntrypoint, decompressor); err != nil {
+ logrus.Errorf("decompressing copy of entrypoint: %v", err)
+ return
+ }
+ entrypointHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for entrypoint: %v", err)
+ return
+ }
+ entrypointHeader.Name = "entrypoint"
+ entrypointHeader.Mode = 0o755
+ entrypointHeader.Uname, entrypointHeader.Gname = "", ""
+ entrypointHeader.Uid, entrypointHeader.Gid = 0, 0
+ entrypointHeader.Size = int64(decompressedEntrypoint.Len())
+ if err = tw.WriteHeader(entrypointHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", entrypointHeader.Name, err)
+ return
+ }
+ if _, err = io.Copy(tw, &decompressedEntrypoint); err != nil {
+ logrus.Errorf("writing %q: %v", entrypointHeader.Name, err)
+ return
+ }
+
+ // Write /sev.chain
+ if chainInfo != nil {
+ chainHeader, err := tar.FileInfoHeader(chainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", chainInfo.Name(), err)
+ return
+ }
+ chainHeader.Name = chainBytesFile
+ chainHeader.Mode = 0o600
+ chainHeader.Uname, chainHeader.Gname = "", ""
+ chainHeader.Uid, chainHeader.Gid = 0, 0
+ chainHeader.Size = int64(len(chainBytes))
+ if err = tw.WriteHeader(chainHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", chainHeader.Name, err)
+ return
+ }
+ if _, err = tw.Write(chainBytes); err != nil {
+ logrus.Errorf("writing %q: %v", chainHeader.Name, err)
+ return
+ }
+ }
+
+ // Write /krun-sev.json.
+ workloadConfigHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
+ return
+ }
+ workloadConfigHeader.Name = "krun-sev.json"
+ workloadConfigHeader.Mode = 0o600
+ workloadConfigHeader.Uname, workloadConfigHeader.Gname = "", ""
+ workloadConfigHeader.Uid, workloadConfigHeader.Gid = 0, 0
+ workloadConfigHeader.Size = int64(len(workloadConfigBytes))
+ if err = tw.WriteHeader(workloadConfigHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", workloadConfigHeader.Name, err)
+ return
+ }
+ if _, err = tw.Write(workloadConfigBytes); err != nil {
+ logrus.Errorf("writing %q: %v", workloadConfigHeader.Name, err)
+ return
+ }
+
+ // Write /tmp.
+ tmpHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
+ return
+ }
+ tmpHeader.Name = "tmp/"
+ tmpHeader.Typeflag = tar.TypeDir
+ tmpHeader.Mode = 0o1777
+ tmpHeader.Uname, workloadConfigHeader.Gname = "", ""
+ tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0
+ tmpHeader.Size = 0
+ if err = tw.WriteHeader(tmpHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err)
+ return
+ }
+
+ // Now figure out the footer that we'll append to the encrypted disk.
+ var footer bytes.Buffer
+ lengthBuffer := make([]byte, 8)
+ footer.Write(workloadConfigBytes)
+ footer.WriteString("KRUN")
+ binary.LittleEndian.PutUint64(lengthBuffer, uint64(len(workloadConfigBytes)))
+ footer.Write(lengthBuffer)
+
+ // Start encrypting and write /disk.img.
+ header, encrypt, blockSize, err := luksy.EncryptV1([]string{diskEncryptionPassphrase}, "")
+ paddingBoundary := int64(4096)
+ paddingNeeded := (paddingBoundary - ((int64(len(header)) + imageSize + int64(footer.Len())) % paddingBoundary)) % paddingBoundary
+ diskHeader := workloadConfigHeader
+ diskHeader.Name = "disk.img"
+ diskHeader.Mode = 0o600
+ diskHeader.Size = int64(len(header)) + imageSize + paddingNeeded + int64(footer.Len())
+ if err = tw.WriteHeader(diskHeader); err != nil {
+ logrus.Errorf("writing archive header for disk.img: %v", err)
+ return
+ }
+ if _, err = io.Copy(tw, bytes.NewReader(header)); err != nil {
+ logrus.Errorf("writing encryption header for disk.img: %v", err)
+ return
+ }
+ encryptWrapper := luksy.EncryptWriter(encrypt, tw, blockSize)
+ if _, err = io.Copy(encryptWrapper, plain); err != nil {
+ logrus.Errorf("encrypting disk.img: %v", err)
+ return
+ }
+ encryptWrapper.Close()
+ if _, err = tw.Write(make([]byte, paddingNeeded)); err != nil {
+ logrus.Errorf("writing padding for disk.img: %v", err)
+ return
+ }
+ if _, err = io.Copy(tw, &footer); err != nil {
+ logrus.Errorf("writing footer for disk.img: %v", err)
+ return
+ }
+ tw.Close()
+ }()
+
+ return pipeReader, workloadConfig, nil
+}
+
+func slop(size int64, slop string) int64 {
+ if slop == "" {
+ return size * 5 / 4
+ }
+ for _, factor := range strings.Split(slop, "+") {
+ factor = strings.TrimSpace(factor)
+ if factor == "" {
+ continue
+ }
+ if strings.HasSuffix(factor, "%") {
+ percentage := strings.TrimSuffix(factor, "%")
+ percent, err := strconv.ParseInt(percentage, 10, 8)
+ if err != nil {
+ logrus.Warnf("parsing percentage %q: %v", factor, err)
+ } else {
+ size *= (percent + 100)
+ size /= 100
+ }
+ } else {
+ more, err := units.RAMInBytes(factor)
+ if err != nil {
+ logrus.Warnf("parsing %q as a size: %v", factor, err)
+ } else {
+ size += more
+ }
+ }
+ }
+ return size
+}
diff --git a/internal/mkcw/archive_test.go b/internal/mkcw/archive_test.go
new file mode 100644
index 0000000..c2e06fc
--- /dev/null
+++ b/internal/mkcw/archive_test.go
@@ -0,0 +1,181 @@
+package mkcw
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSlop(t *testing.T) {
+ testCases := []struct {
+ input int64
+ slop string
+ output int64
+ }{
+ {100, "", 125},
+ {100, "10%", 110},
+ {100, "100%", 200},
+ {100, "10GB", 10*1024*1024*1024 + 100},
+ {100, "10%+10GB", 10*1024*1024*1024 + 110},
+ {100, "10% + 10GB", 10*1024*1024*1024 + 110},
+ }
+ for _, testCase := range testCases {
+ t.Run(testCase.slop, func(t *testing.T) {
+ assert.Equal(t, testCase.output, slop(testCase.input, testCase.slop))
+ })
+ }
+}
+
+// dummyAttestationHandler replies with a fixed response code to requests to
+// the right path, and caches passphrases indexed by workload ID
+type dummyAttestationHandler struct {
+ t *testing.T
+ status int
+ passphrases map[string]string
+ passphrasesLock sync.Mutex
+}
+
+func (d *dummyAttestationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ var body bytes.Buffer
+ if req.Body != nil {
+ if _, err := io.Copy(&body, req.Body); err != nil {
+ d.t.Logf("reading request body: %v", err)
+ return
+ }
+ req.Body.Close()
+ }
+ if req.URL != nil && req.URL.Path == "/kbs/v0/register_workload" {
+ var registrationRequest RegistrationRequest
+ // if we can't decode the client request, bail
+ if err := json.Unmarshal(body.Bytes(), &registrationRequest); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ // cache the passphrase
+ d.passphrasesLock.Lock()
+ if d.passphrases == nil {
+ d.passphrases = make(map[string]string)
+ }
+ d.passphrases[registrationRequest.WorkloadID] = registrationRequest.Passphrase
+ d.passphrasesLock.Unlock()
+ // return the predetermined status
+ status := d.status
+ if status == 0 {
+ status = http.StatusOK
+ }
+ rw.WriteHeader(status)
+ return
+ }
+ // no such handler
+ rw.WriteHeader(http.StatusInternalServerError)
+}
+
+func TestArchive(t *testing.T) {
+ ociConfig := &v1.Image{
+ Config: v1.ImageConfig{
+ User: "root",
+ Env: []string{"PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/usr/sbin:/sbin:/usr/sbin:/sbin"},
+ Cmd: []string{"/bin/bash"},
+ WorkingDir: "/root",
+ Labels: map[string]string{
+ "label_a": "b",
+ "label_c": "d",
+ },
+ },
+ }
+ for _, status := range []int{http.StatusOK, http.StatusInternalServerError} {
+ for _, ignoreChainRetrievalErrors := range []bool{false, true} {
+ for _, ignoreAttestationErrors := range []bool{false, true} {
+ t.Run(fmt.Sprintf("status=%d,ignoreChainRetrievalErrors=%v,ignoreAttestationErrors=%v", status, ignoreChainRetrievalErrors, ignoreAttestationErrors), func(t *testing.T) {
+ // listen on a system-assigned port
+ listener, err := net.Listen("tcp", ":0")
+ require.NoError(t, err)
+ // keep track of our listener address
+ addr := listener.Addr()
+ // serve requests on that listener
+ handler := &dummyAttestationHandler{t: t, status: status}
+ server := http.Server{
+ Handler: handler,
+ }
+ go func() {
+ if err := server.Serve(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ t.Logf("serve: %v", err)
+ }
+ }()
+ // clean up at the end of this test
+ t.Cleanup(func() { assert.NoError(t, server.Close()) })
+ // generate the container rootfs using a temporary empty directory
+ archiveOptions := ArchiveOptions{
+ CPUs: 4,
+ Memory: 256,
+ TempDir: t.TempDir(),
+ AttestationURL: "http://" + addr.String(),
+ IgnoreAttestationErrors: ignoreAttestationErrors,
+ }
+ inputPath := t.TempDir()
+ rc, workloadConfig, err := Archive(inputPath, ociConfig, archiveOptions)
+ // bail now if we got an error we didn't expect
+ if err != nil {
+ if errors.As(err, &chainRetrievalError{}) {
+ if !ignoreChainRetrievalErrors {
+ return
+ }
+ }
+ if errors.As(err, &attestationError{}) {
+ if !ignoreAttestationErrors {
+ require.NoError(t, err)
+ }
+ }
+ return
+ }
+ if err == nil {
+ defer rc.Close()
+ }
+ // read each archive entry's contents into a map
+ contents := make(map[string][]byte)
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for hdr != nil {
+ contents[hdr.Name], err = io.ReadAll(tr)
+ require.NoError(t, err)
+ hdr, err = tr.Next()
+ }
+ if err != nil {
+ require.ErrorIs(t, err, io.EOF)
+ }
+ // check that krun-sev.json is a JSON-encoded copy of the workload config
+ var writtenWorkloadConfig WorkloadConfig
+ err = json.Unmarshal(contents["krun-sev.json"], &writtenWorkloadConfig)
+ require.NoError(t, err)
+ assert.Equal(t, workloadConfig, writtenWorkloadConfig)
+ // save the disk image to a file
+ encryptedFile := filepath.Join(t.TempDir(), "encrypted.img")
+ err = os.WriteFile(encryptedFile, contents["disk.img"], 0o600)
+ require.NoError(t, err)
+ // check that we have a configuration footer in there
+ _, err = ReadWorkloadConfigFromImage(encryptedFile)
+ require.NoError(t, err)
+ // check that the attestation server got the encryption passphrase
+ handler.passphrasesLock.Lock()
+ passphrase := handler.passphrases[workloadConfig.WorkloadID]
+ handler.passphrasesLock.Unlock()
+ err = CheckLUKSPassphrase(encryptedFile, passphrase)
+ require.NoError(t, err)
+ })
+ }
+ }
+ }
+}
diff --git a/internal/mkcw/attest.go b/internal/mkcw/attest.go
new file mode 100644
index 0000000..91362d3
--- /dev/null
+++ b/internal/mkcw/attest.go
@@ -0,0 +1,250 @@
+package mkcw
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/internal/mkcw/types"
+ "github.com/sirupsen/logrus"
+)
+
+type (
+ RegistrationRequest = types.RegistrationRequest
+ TeeConfig = types.TeeConfig
+ TeeConfigFlags = types.TeeConfigFlags
+ TeeConfigMinFW = types.TeeConfigMinFW
+)
+
+type measurementError struct {
+ err error
+}
+
+func (m measurementError) Error() string {
+ return fmt.Sprintf("generating measurement for attestation: %v", m.err)
+}
+
+type attestationError struct {
+ err error
+}
+
+func (a attestationError) Error() string {
+ return fmt.Sprintf("registering workload: %v", a.err)
+}
+
+type httpError struct {
+ statusCode int
+}
+
+func (h httpError) Error() string {
+ if statusText := http.StatusText(h.statusCode); statusText != "" {
+ return fmt.Sprintf("received server status %d (%q)", h.statusCode, statusText)
+ }
+ return fmt.Sprintf("received server status %d", h.statusCode)
+}
+
+// SendRegistrationRequest registers a workload with the specified decryption
+// passphrase with the service whose location is part of the WorkloadConfig.
+func SendRegistrationRequest(workloadConfig WorkloadConfig, diskEncryptionPassphrase, firmwareLibrary string, ignoreAttestationErrors bool, logger *logrus.Logger) error {
+ if workloadConfig.AttestationURL == "" {
+ return errors.New("attestation URL not provided")
+ }
+
+ // Measure the execution environment.
+ measurement, err := GenerateMeasurement(workloadConfig, firmwareLibrary)
+ if err != nil {
+ if !ignoreAttestationErrors {
+ return &measurementError{err}
+ }
+ logger.Warnf("generating measurement for attestation: %v", err)
+ }
+
+ // Build the workload registration (attestation) request body.
+ var teeConfigBytes []byte
+ switch workloadConfig.Type {
+ case SEV, SEV_NO_ES, SNP:
+ var cbits types.TeeConfigFlagBits
+ switch workloadConfig.Type {
+ case SEV:
+ cbits = types.SEV_CONFIG_NO_DEBUG |
+ types.SEV_CONFIG_NO_KEY_SHARING |
+ types.SEV_CONFIG_ENCRYPTED_STATE |
+ types.SEV_CONFIG_NO_SEND |
+ types.SEV_CONFIG_DOMAIN |
+ types.SEV_CONFIG_SEV
+ case SEV_NO_ES:
+ cbits = types.SEV_CONFIG_NO_DEBUG |
+ types.SEV_CONFIG_NO_KEY_SHARING |
+ types.SEV_CONFIG_NO_SEND |
+ types.SEV_CONFIG_DOMAIN |
+ types.SEV_CONFIG_SEV
+ case SNP:
+ cbits = types.SNP_CONFIG_SMT |
+ types.SNP_CONFIG_MANDATORY |
+ types.SNP_CONFIG_MIGRATE_MA |
+ types.SNP_CONFIG_DEBUG
+ default:
+ panic("internal error") // shouldn't happen
+ }
+ teeConfig := TeeConfig{
+ Flags: TeeConfigFlags{
+ Bits: cbits,
+ },
+ MinFW: TeeConfigMinFW{
+ Major: 0,
+ Minor: 0,
+ },
+ }
+ teeConfigBytes, err = json.Marshal(teeConfig)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("don't know how to generate tee_config for %q TEEs", workloadConfig.Type)
+ }
+
+ registrationRequest := RegistrationRequest{
+ WorkloadID: workloadConfig.WorkloadID,
+ LaunchMeasurement: measurement,
+ TeeConfig: string(teeConfigBytes),
+ Passphrase: diskEncryptionPassphrase,
+ }
+ registrationRequestBytes, err := json.Marshal(registrationRequest)
+ if err != nil {
+ return err
+ }
+
+ // Register the workload.
+ parsedURL, err := url.Parse(workloadConfig.AttestationURL)
+ if err != nil {
+ return err
+ }
+ parsedURL.Path = path.Join(parsedURL.Path, "/kbs/v0/register_workload")
+ if err != nil {
+ return err
+ }
+ url := parsedURL.String()
+ requestContentType := "application/json"
+ requestBody := bytes.NewReader(registrationRequestBytes)
+ defer http.DefaultClient.CloseIdleConnections()
+ resp, err := http.Post(url, requestContentType, requestBody)
+ if resp != nil {
+ if resp.Body != nil {
+ resp.Body.Close()
+ }
+ switch resp.StatusCode {
+ default:
+ if !ignoreAttestationErrors {
+ return &attestationError{&httpError{resp.StatusCode}}
+ }
+ logger.Warn(attestationError{&httpError{resp.StatusCode}}.Error())
+ case http.StatusOK, http.StatusAccepted:
+ // great!
+ }
+ }
+ if err != nil {
+ if !ignoreAttestationErrors {
+ return &attestationError{err}
+ }
+ logger.Warn(attestationError{err}.Error())
+ }
+ return nil
+}
+
+// GenerateMeasurement generates the runtime measurement using the CPU count,
+// memory size, and the firmware shared library, whatever it's called, wherever
+// it is.
+// If firmwareLibrary is a path, it will be the only one checked.
+// If firmwareLibrary is a filename, it will be checked for in a hard-coded set
+// of directories.
+// If firmwareLibrary is empty, both the filename and the directory it is in
+// will be taken from a hard-coded set of candidates.
+func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) (string, error) {
+ cpuString := fmt.Sprintf("%d", workloadConfig.CPUs)
+ memoryString := fmt.Sprintf("%d", workloadConfig.Memory)
+ var prefix string
+ switch workloadConfig.Type {
+ case SEV:
+ prefix = "SEV-ES"
+ case SEV_NO_ES:
+ prefix = "SEV"
+ case SNP:
+ prefix = "SNP"
+ default:
+ return "", fmt.Errorf("don't know which measurement to use for TEE type %q", workloadConfig.Type)
+ }
+
+ sharedLibraryDirs := []string{
+ "/usr/local/lib64",
+ "/usr/local/lib",
+ "/lib64",
+ "/lib",
+ "/usr/lib64",
+ "/usr/lib",
+ }
+ if llp, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
+ sharedLibraryDirs = append(sharedLibraryDirs, strings.Split(llp, ":")...)
+ }
+ libkrunfwNames := []string{
+ "libkrunfw-sev.so.4",
+ "libkrunfw-sev.so.3",
+ "libkrunfw-sev.so",
+ }
+ var pathsToCheck []string
+ if firmwareLibrary == "" {
+ for _, sharedLibraryDir := range sharedLibraryDirs {
+ if sharedLibraryDir == "" {
+ continue
+ }
+ for _, libkrunfw := range libkrunfwNames {
+ candidate := filepath.Join(sharedLibraryDir, libkrunfw)
+ pathsToCheck = append(pathsToCheck, candidate)
+ }
+ }
+ } else {
+ if filepath.IsAbs(firmwareLibrary) {
+ pathsToCheck = append(pathsToCheck, firmwareLibrary)
+ } else {
+ for _, sharedLibraryDir := range sharedLibraryDirs {
+ if sharedLibraryDir == "" {
+ continue
+ }
+ candidate := filepath.Join(sharedLibraryDir, firmwareLibrary)
+ pathsToCheck = append(pathsToCheck, candidate)
+ }
+ }
+ }
+ for _, candidate := range pathsToCheck {
+ if _, err := os.Lstat(candidate); err == nil {
+ var stdout, stderr bytes.Buffer
+ logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate)
+ cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ if stderr.Len() > 0 {
+ err = fmt.Errorf("krunfw_measurement: %s: %w", strings.TrimSpace(stderr.String()), err)
+ }
+ return "", err
+ }
+ scanner := bufio.NewScanner(&stdout)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, prefix+":") {
+ return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil
+ }
+ }
+ return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":")
+ }
+ }
+ return "", fmt.Errorf("generating measurement: none of %v found: %w", pathsToCheck, os.ErrNotExist)
+}
diff --git a/internal/mkcw/embed/entrypoint.gz b/internal/mkcw/embed/entrypoint.gz
new file mode 100755
index 0000000..0e00351
--- /dev/null
+++ b/internal/mkcw/embed/entrypoint.gz
Binary files differ
diff --git a/internal/mkcw/embed/entrypoint.s b/internal/mkcw/embed/entrypoint.s
new file mode 100644
index 0000000..0e4429c
--- /dev/null
+++ b/internal/mkcw/embed/entrypoint.s
@@ -0,0 +1,16 @@
+ .section .rodata.1,"aMS",@progbits,1
+msg:
+ .string "This image is designed to be run as a confidential workload using libkrun.\n"
+ .section .text._start,"ax",@progbits
+ .globl _start
+ .type _start,@function
+_start:
+ movq $1, %rax # write
+ movq $2, %rdi # fd=stderr_fileno
+ movq $msg, %rsi # message
+ movq $75, %rdx # length
+ syscall
+ movq $60, %rax # exit
+ movq $1, %rdi # status=1
+ syscall
+ .section .note.GNU-stack,"",@progbits
diff --git a/internal/mkcw/entrypoint.go b/internal/mkcw/entrypoint.go
new file mode 100644
index 0000000..d720321
--- /dev/null
+++ b/internal/mkcw/entrypoint.go
@@ -0,0 +1,6 @@
+package mkcw
+
+import _ "embed"
+
+//go:embed "embed/entrypoint.gz"
+var entrypointCompressedBytes []byte
diff --git a/internal/mkcw/luks.go b/internal/mkcw/luks.go
new file mode 100644
index 0000000..0d795e6
--- /dev/null
+++ b/internal/mkcw/luks.go
@@ -0,0 +1,51 @@
+package mkcw
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "os"
+
+ "github.com/containers/luksy"
+)
+
+// CheckLUKSPassphrase checks that the specified LUKS-encrypted file can be
+// decrypted using the specified passphrase.
+func CheckLUKSPassphrase(path, decryptionPassphrase string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ v1header, v2headerA, v2headerB, v2json, err := luksy.ReadHeaders(f, luksy.ReadHeaderOptions{})
+ if err != nil {
+ return err
+ }
+ if v1header != nil {
+ _, _, _, _, err = v1header.Decrypt(decryptionPassphrase, f)
+ return err
+ }
+ if v2headerA == nil && v2headerB == nil {
+ return fmt.Errorf("no LUKS headers read from %q", path)
+ }
+ if v2headerA != nil {
+ if _, _, _, _, err = v2headerA.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
+ return err
+ }
+ }
+ if v2headerB != nil {
+ if _, _, _, _, err = v2headerB.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GenerateDiskEncryptionPassphrase generates a random disk encryption password
+func GenerateDiskEncryptionPassphrase() (string, error) {
+ randomizedBytes := make([]byte, 32)
+ if _, err := rand.Read(randomizedBytes); err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(randomizedBytes), nil
+}
diff --git a/internal/mkcw/luks_test.go b/internal/mkcw/luks_test.go
new file mode 100644
index 0000000..3df723f
--- /dev/null
+++ b/internal/mkcw/luks_test.go
@@ -0,0 +1,66 @@
+package mkcw
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/luksy"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckLUKSPassphrase(t *testing.T) {
+ passphrase, err := GenerateDiskEncryptionPassphrase()
+ require.NoError(t, err)
+ secondPassphrase, err := GenerateDiskEncryptionPassphrase()
+ require.NoError(t, err)
+
+ t.Run("v1", func(t *testing.T) {
+ header, encrypter, blockSize, err := luksy.EncryptV1([]string{secondPassphrase, passphrase}, "")
+ require.NoError(t, err)
+ f, err := os.Create(filepath.Join(t.TempDir(), "v1"))
+ require.NoError(t, err)
+ n, err := f.Write(header)
+ require.NoError(t, err)
+ require.Equal(t, len(header), n)
+ wrapper := luksy.EncryptWriter(encrypter, f, blockSize)
+ _, err = wrapper.Write(make([]byte, blockSize*10))
+ require.NoError(t, err)
+ wrapper.Close()
+ f.Close()
+
+ err = CheckLUKSPassphrase(f.Name(), passphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), secondPassphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), "nope, this is not a correct passphrase")
+ require.Error(t, err)
+ })
+
+ t.Run("v2", func(t *testing.T) {
+ for _, sectorSize := range []int{512, 1024, 2048, 4096} {
+ t.Run(fmt.Sprintf("sectorSize=%d", sectorSize), func(t *testing.T) {
+ header, encrypter, blockSize, err := luksy.EncryptV2([]string{secondPassphrase, passphrase}, "", sectorSize)
+ require.NoError(t, err)
+ f, err := os.Create(filepath.Join(t.TempDir(), "v2"))
+ require.NoError(t, err)
+ n, err := f.Write(header)
+ require.NoError(t, err)
+ require.Equal(t, len(header), n)
+ wrapper := luksy.EncryptWriter(encrypter, f, blockSize)
+ _, err = wrapper.Write(make([]byte, blockSize*10))
+ require.NoError(t, err)
+ wrapper.Close()
+ f.Close()
+
+ err = CheckLUKSPassphrase(f.Name(), passphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), secondPassphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), "nope, this is not one of the correct passphrases")
+ require.Error(t, err)
+ })
+ }
+ })
+}
diff --git a/internal/mkcw/makefs.go b/internal/mkcw/makefs.go
new file mode 100644
index 0000000..308f2a9
--- /dev/null
+++ b/internal/mkcw/makefs.go
@@ -0,0 +1,38 @@
+package mkcw
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+)
+
+// MakeFS formats the imageFile as a filesystem of the specified type,
+// populating it with the contents of the directory at sourcePath.
+// Recognized filesystem types are "ext2", "ext3", "ext4", and "btrfs".
+// Note that krun's init is currently hard-wired to assume "ext4".
+// Returns the stdout, stderr, and any error returned by the mkfs command.
+func MakeFS(sourcePath, imageFile, filesystem string) (string, string, error) {
+ var stdout, stderr strings.Builder
+ // N.B. mkfs.xfs can accept a protofile via its -p option, but the
+ // protofile format doesn't allow us to supply timestamp information or
+ // specify that files are hard linked
+ switch filesystem {
+ case "ext2", "ext3", "ext4":
+ logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
+ cmd := exec.Command("mkfs", "-t", filesystem, "-d", sourcePath, imageFile)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ err := cmd.Run()
+ return stdout.String(), stderr.String(), err
+ case "btrfs":
+ logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
+ cmd := exec.Command("mkfs", "-t", filesystem, "--rootdir", sourcePath, imageFile)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ err := cmd.Run()
+ return stdout.String(), stderr.String(), err
+ }
+ return "", "", fmt.Errorf("don't know how to make a %q filesystem with contents", filesystem)
+}
diff --git a/internal/mkcw/types/attest.go b/internal/mkcw/types/attest.go
new file mode 100644
index 0000000..276c7f0
--- /dev/null
+++ b/internal/mkcw/types/attest.go
@@ -0,0 +1,47 @@
+package types
+
+// RegistrationRequest is the body of the request which we use for registering
+// this confidential workload with the attestation server.
+// https://github.com/virtee/reference-kbs/blob/10b2a4c0f8caf78a077210b172863bbae54f66aa/src/main.rs#L83
+type RegistrationRequest struct {
+ WorkloadID string `json:"workload_id"`
+ LaunchMeasurement string `json:"launch_measurement"`
+ Passphrase string `json:"passphrase"`
+ TeeConfig string `json:"tee_config"` // JSON-encoded teeConfig? or specific to the type of TEE?
+}
+
+// TeeConfig contains information about a trusted execution environment.
+type TeeConfig struct {
+ Flags TeeConfigFlags `json:"flags"` // runtime requirement bits
+ MinFW TeeConfigMinFW `json:"minfw"` // minimum platform firmware version
+}
+
+// TeeConfigFlags is a bit field containing policy flags specific to the environment.
+// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/sev.rs#L172
+// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/snp.rs#L114
+type TeeConfigFlags struct {
+ Bits TeeConfigFlagBits `json:"bits"`
+}
+
+// TeeConfigFlagBits are bits representing run-time expectations.
+type TeeConfigFlagBits int
+
+const (
+ SEV_CONFIG_NO_DEBUG TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming no debugging of guests
+ SEV_CONFIG_NO_KEY_SHARING TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming no sharing keys between guests
+ SEV_CONFIG_ENCRYPTED_STATE TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming requires SEV-ES
+ SEV_CONFIG_NO_SEND TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming no transferring the guest to another platform
+ SEV_CONFIG_DOMAIN TeeConfigFlagBits = 0b00010000 //revive:disable-line:var-naming no transferring the guest out of the domain (?)
+ SEV_CONFIG_SEV TeeConfigFlagBits = 0b00100000 //revive:disable-line:var-naming no transferring the guest to non-SEV platforms
+ SNP_CONFIG_SMT TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming SMT is enabled on the host machine
+ SNP_CONFIG_MANDATORY TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming reserved bit which should always be set
+ SNP_CONFIG_MIGRATE_MA TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming allowed to use a migration agent
+ SNP_CONFIG_DEBUG TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming allow debugging
+)
+
+// TeeConfigFlagMinFW corresponds to a minimum version of the kernel+initrd
+// combination that should be booted.
+type TeeConfigMinFW struct {
+ Major int `json:"major"`
+ Minor int `json:"minor"`
+}
diff --git a/internal/mkcw/types/workload.go b/internal/mkcw/types/workload.go
new file mode 100644
index 0000000..9036485
--- /dev/null
+++ b/internal/mkcw/types/workload.go
@@ -0,0 +1,34 @@
+package types
+
+import "github.com/containers/buildah/define"
+
+// WorkloadConfig is the data type which is encoded and stored in /krun-sev.json in a container
+// image, and included directly in the disk image.
+// https://github.com/containers/libkrun/blob/57c59dc5359bdeeb8260b3493e9f63d3708f9ab9/src/vmm/src/resources.rs#L57
+type WorkloadConfig struct {
+ Type define.TeeType `json:"tee"`
+ TeeData string `json:"tee_data"` // Type == SEV: JSON-encoded SevWorkloadData, SNP: JSON-encoded SnpWorkloadData, others?
+ WorkloadID string `json:"workload_id"`
+ CPUs int `json:"cpus"`
+ Memory int `json:"ram_mib"`
+ AttestationURL string `json:"attestation_url"`
+}
+
+// SevWorkloadData contains the path to the SEV certificate chain and optionally,
+// the attestation server's public key(?)
+// https://github.com/containers/libkrun/blob/d31747aa92cf83df2abaeb87e2a83311c135d003/src/vmm/src/linux/tee/amdsev.rs#L222
+type SevWorkloadData struct {
+ VendorChain string `json:"vendor_chain"`
+ AttestationServerPubkey string `json:"attestation_server_pubkey"`
+}
+
+// SnpWorkloadData contains the required CPU generation name.
+// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json
+type SnpWorkloadData struct {
+ Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo/siena=4, turin=5)
+}
+
+const (
+ // SEV_NO_ES is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization without encrypted state, requires epyc 1000 "naples")
+ SEV_NO_ES define.TeeType = "sev_no_es" //revive:disable-line:var-naming
+)
diff --git a/internal/mkcw/workload.go b/internal/mkcw/workload.go
new file mode 100644
index 0000000..4109ce9
--- /dev/null
+++ b/internal/mkcw/workload.go
@@ -0,0 +1,223 @@
+package mkcw
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal/mkcw/types"
+)
+
+type (
+ // WorkloadConfig is the data type which is encoded and stored in an image.
+ WorkloadConfig = types.WorkloadConfig
+ // SevWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SEV.
+ SevWorkloadData = types.SevWorkloadData
+ // SnpWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SNP.
+ SnpWorkloadData = types.SnpWorkloadData
+ // TeeType is one of the known types of trusted execution environments for which we
+ // can generate suitable image contents.
+ TeeType = define.TeeType
+)
+
+const (
+ maxWorkloadConfigSize = 1024 * 1024
+ preferredPaddingBoundary = 4096
+ // SEV is a known trusted execution environment type: AMD-SEV
+ SEV = define.SEV
+ // SEV_NO_ES is a known trusted execution environment type: AMD-SEV without encrypted state
+ SEV_NO_ES = types.SEV_NO_ES //revive:disable-line:var-naming
+ // SNP is a known trusted execution environment type: AMD-SNP
+ SNP = define.SNP
+ // krun looks for its configuration JSON directly in a disk image if the last twelve bytes
+ // of the disk image are this magic value followed by a little-endian 64-bit
+ // length-of-the-configuration
+ krunMagic = "KRUN"
+)
+
+// ReadWorkloadConfigFromImage reads the workload configuration from the
+// specified disk image file
+func ReadWorkloadConfigFromImage(path string) (WorkloadConfig, error) {
+ // Read the last 12 bytes, which should be "KRUN" followed by a 64-bit
+ // little-endian length. The (length) bytes immediately preceding
+ // these hold the JSON-encoded workloadConfig.
+ var wc WorkloadConfig
+ f, err := os.Open(path)
+ if err != nil {
+ return wc, err
+ }
+ defer f.Close()
+
+ // Read those last 12 bytes.
+ finalTwelve := make([]byte, 12)
+ if _, err = f.Seek(-12, io.SeekEnd); err != nil {
+ return wc, fmt.Errorf("checking for workload config signature: %w", err)
+ }
+ if n, err := f.Read(finalTwelve); err != nil || n != len(finalTwelve) {
+ if err != nil && !errors.Is(err, io.EOF) {
+ return wc, fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
+ }
+ if n != len(finalTwelve) {
+ return wc, fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", path, n)
+ }
+ }
+ if magic := string(finalTwelve[0:4]); magic != "KRUN" {
+ return wc, fmt.Errorf("expected magic string KRUN in %q, found %q)", path, magic)
+ }
+ length := binary.LittleEndian.Uint64(finalTwelve[4:])
+ if length > maxWorkloadConfigSize {
+ return wc, fmt.Errorf("workload config in %q is %d bytes long, which seems unreasonable (max allowed %d)", path, length, maxWorkloadConfigSize)
+ }
+
+ // Read and decode the config.
+ configBytes := make([]byte, length)
+ if _, err = f.Seek(-(int64(length) + 12), io.SeekEnd); err != nil {
+ return wc, fmt.Errorf("looking for workload config from disk image: %w", err)
+ }
+ if n, err := f.Read(configBytes); err != nil || n != len(configBytes) {
+ if err != nil {
+ return wc, fmt.Errorf("reading workload config from disk image: %w", err)
+ }
+ return wc, fmt.Errorf("short read (expected %d bytes near the end of %q, got %d)", len(configBytes), path, n)
+ }
+ err = json.Unmarshal(configBytes, &wc)
+ if err != nil {
+ err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err)
+ }
+ return wc, err
+}
+
+// WriteWorkloadConfigToImage writes the workload configuration to the
+// specified disk image file, overwriting a previous configuration if it's
+// asked to and it finds one
+func WriteWorkloadConfigToImage(imageFile *os.File, workloadConfigBytes []byte, overwrite bool) error {
+ // Read those last 12 bytes to check if there's a configuration there already, which we should overwrite.
+ var overwriteOffset int64
+ if overwrite {
+ finalTwelve := make([]byte, 12)
+ if _, err := imageFile.Seek(-12, io.SeekEnd); err != nil {
+ return fmt.Errorf("checking for workload config signature: %w", err)
+ }
+ if n, err := imageFile.Read(finalTwelve); err != nil || n != len(finalTwelve) {
+ if err != nil && !errors.Is(err, io.EOF) {
+ return fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
+ }
+ if n != len(finalTwelve) {
+ return fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", imageFile.Name(), n)
+ }
+ }
+ if magic := string(finalTwelve[0:4]); magic == "KRUN" {
+ length := binary.LittleEndian.Uint64(finalTwelve[4:])
+ if length < maxWorkloadConfigSize {
+ overwriteOffset = int64(length + 12)
+ }
+ }
+ }
+ // If we found a configuration in the file, try to figure out how much padding was used.
+ paddingSize := int64(preferredPaddingBoundary)
+ if overwriteOffset != 0 {
+ st, err := imageFile.Stat()
+ if err != nil {
+ return err
+ }
+ for _, possiblePaddingLength := range []int64{0x100000, 0x10000, 0x1000, 0x200, 0x100} {
+ if overwriteOffset > possiblePaddingLength {
+ continue
+ }
+ if st.Size()%possiblePaddingLength != 0 {
+ continue
+ }
+ if _, err := imageFile.Seek(-possiblePaddingLength, io.SeekEnd); err != nil {
+ return fmt.Errorf("checking size of padding at end of file: %w", err)
+ }
+ buf := make([]byte, possiblePaddingLength)
+ n, err := imageFile.Read(buf)
+ if err != nil {
+ return fmt.Errorf("reading possible padding at end of file: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("short read checking size of padding at end of file: %d != %d", n, len(buf))
+ }
+ if bytes.Equal(buf[:possiblePaddingLength-overwriteOffset], make([]byte, possiblePaddingLength-overwriteOffset)) {
+ // everything up to the configuration was zero bytes, so it was padding
+ overwriteOffset = possiblePaddingLength
+ paddingSize = possiblePaddingLength
+ break
+ }
+ }
+ }
+
+ // Append the krun configuration to a new buffer.
+ var formatted bytes.Buffer
+ nWritten, err := formatted.Write(workloadConfigBytes)
+ if err != nil {
+ return fmt.Errorf("building workload config: %w", err)
+ }
+ if nWritten != len(workloadConfigBytes) {
+ return fmt.Errorf("short write appending configuration to buffer: %d != %d", nWritten, len(workloadConfigBytes))
+ }
+ // Append the magic string to the buffer.
+ nWritten, err = formatted.WriteString(krunMagic)
+ if err != nil {
+ return fmt.Errorf("building workload config signature: %w", err)
+ }
+ if nWritten != len(krunMagic) {
+ return fmt.Errorf("short write appending krun magic to buffer: %d != %d", nWritten, len(krunMagic))
+ }
+ // Append the 64-bit little-endian length of the workload configuration to the buffer.
+ workloadConfigLengthBytes := make([]byte, 8)
+ binary.LittleEndian.PutUint64(workloadConfigLengthBytes, uint64(len(workloadConfigBytes)))
+ nWritten, err = formatted.Write(workloadConfigLengthBytes)
+ if err != nil {
+ return fmt.Errorf("building workload config signature size: %w", err)
+ }
+ if nWritten != len(workloadConfigLengthBytes) {
+ return fmt.Errorf("short write appending configuration length to buffer: %d != %d", nWritten, len(workloadConfigLengthBytes))
+ }
+
+ // Build a copy of that data, with padding preceding it.
+ var padded bytes.Buffer
+ if int64(formatted.Len())%paddingSize != 0 {
+ extra := paddingSize - (int64(formatted.Len()) % paddingSize)
+ nWritten, err := padded.Write(make([]byte, extra))
+ if err != nil {
+ return fmt.Errorf("buffering padding: %w", err)
+ }
+ if int64(nWritten) != extra {
+ return fmt.Errorf("short write buffering padding for disk image: %d != %d", nWritten, extra)
+ }
+ }
+ extra := int64(formatted.Len())
+ nWritten, err = padded.Write(formatted.Bytes())
+ if err != nil {
+ return fmt.Errorf("buffering workload config: %w", err)
+ }
+ if int64(nWritten) != extra {
+ return fmt.Errorf("short write buffering workload config: %d != %d", nWritten, extra)
+ }
+
+ // Write the buffer to the file, starting with padding.
+ if _, err = imageFile.Seek(-overwriteOffset, io.SeekEnd); err != nil {
+ return fmt.Errorf("preparing to write workload config: %w", err)
+ }
+ nWritten, err = imageFile.Write(padded.Bytes())
+ if err != nil {
+ return fmt.Errorf("writing workload config: %w", err)
+ }
+ if nWritten != padded.Len() {
+ return fmt.Errorf("short write writing configuration to disk image: %d != %d", nWritten, padded.Len())
+ }
+ offset, err := imageFile.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return fmt.Errorf("preparing mark end of disk image: %w", err)
+ }
+ if err = imageFile.Truncate(offset); err != nil {
+ return fmt.Errorf("marking end of disk image: %w", err)
+ }
+ return nil
+}
diff --git a/internal/mkcw/workload_test.go b/internal/mkcw/workload_test.go
new file mode 100644
index 0000000..2de766f
--- /dev/null
+++ b/internal/mkcw/workload_test.go
@@ -0,0 +1,62 @@
+package mkcw
+
+import (
+ "crypto/rand"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadWriteWorkloadConfig(t *testing.T) {
+ // Create a temporary file to stand in for a disk image.
+ temp := filepath.Join(t.TempDir(), "disk.img")
+ f, err := os.OpenFile(temp, os.O_CREATE|os.O_RDWR, 0o600)
+ require.NoError(t, err)
+ err = f.Truncate(0x1000000)
+ require.NoError(t, err)
+ defer f.Close()
+
+ // Generate a random "encoded workload config".
+ workloadConfig := make([]byte, 0x100)
+ n, err := rand.Read(workloadConfig)
+ require.NoError(t, err)
+ require.Equal(t, len(workloadConfig), n)
+
+ // Read the size of our temporary file.
+ st, err := f.Stat()
+ require.NoError(t, err)
+ originalSize := st.Size()
+
+ // Should get an error, since there's no workloadConfig in there to read.
+ _, err = ReadWorkloadConfigFromImage(f.Name())
+ require.Error(t, err)
+
+ // File should grow, even though we looked for an old config to overwrite.
+ err = WriteWorkloadConfigToImage(f, workloadConfig, true)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Greater(t, st.Size(), originalSize)
+ originalSize = st.Size()
+
+ // File shouldn't grow, even overwriting the config with a slightly larger one.
+ err = WriteWorkloadConfigToImage(f, append([]byte("slightly longer"), workloadConfig...), true)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Equal(t, originalSize, st.Size())
+ originalSize = st.Size()
+
+ // File should grow if we're not trying to replace an old one config with a new one.
+ err = WriteWorkloadConfigToImage(f, []byte("{\"comment\":\"quite a bit shorter\"}"), false)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Greater(t, st.Size(), originalSize)
+
+ // Should read successfully.
+ _, err = ReadWorkloadConfigFromImage(f.Name())
+ require.NoError(t, err)
+}
diff --git a/internal/parse/parse.go b/internal/parse/parse.go
new file mode 100644
index 0000000..89ff7d3
--- /dev/null
+++ b/internal/parse/parse.go
@@ -0,0 +1,79 @@
+package parse
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/common/pkg/parse"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// ValidateVolumeMountHostDir validates the host path of buildah --volume
+func ValidateVolumeMountHostDir(hostDir string) error {
+ if !filepath.IsAbs(hostDir) {
+ return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir)
+ }
+ if _, err := os.Stat(hostDir); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RevertEscapedColon converts "\:" to ":"
+func RevertEscapedColon(source string) string {
+ return strings.ReplaceAll(source, "\\:", ":")
+}
+
+// SplitStringWithColonEscape splits string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator
+func SplitStringWithColonEscape(str string) []string {
+ result := make([]string, 0, 3)
+ sb := &strings.Builder{}
+ for idx, r := range str {
+ if r == ':' {
+ // the colon is backslash-escaped
+ if idx-1 > 0 && str[idx-1] == '\\' {
+ sb.WriteRune(r)
+ } else {
+ // os.Stat will fail if path contains escaped colon
+ result = append(result, RevertEscapedColon(sb.String()))
+ sb.Reset()
+ }
+ } else {
+ sb.WriteRune(r)
+ }
+ }
+ if sb.Len() > 0 {
+ result = append(result, RevertEscapedColon(sb.String()))
+ }
+ return result
+}
+
+// Volume parses the input of --volume
+func Volume(volume string) (specs.Mount, error) {
+ mount := specs.Mount{}
+ arr := SplitStringWithColonEscape(volume)
+ if len(arr) < 2 {
+ return mount, fmt.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
+ }
+ if err := ValidateVolumeMountHostDir(arr[0]); err != nil {
+ return mount, err
+ }
+ if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil {
+ return mount, err
+ }
+ mountOptions := ""
+ if len(arr) > 2 {
+ mountOptions = arr[2]
+ if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil {
+ return mount, err
+ }
+ }
+ mountOpts := strings.Split(mountOptions, ",")
+ mount.Source = arr[0]
+ mount.Destination = arr[1]
+ mount.Type = "rbind"
+ mount.Options = mountOpts
+ return mount, nil
+}
diff --git a/internal/source/add.go b/internal/source/add.go
new file mode 100644
index 0000000..8363c62
--- /dev/null
+++ b/internal/source/add.go
@@ -0,0 +1,133 @@
+package source
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// AddOptions include data to alter certain knobs when adding a source artifact
+// to a source image.
+type AddOptions struct {
+ // Annotations for the source artifact.
+ Annotations []string
+}
+
+// annotations parses the specified annotations and transforms them into a map.
+// A given annotation can be specified only once.
+func (o *AddOptions) annotations() (map[string]string, error) {
+ annotations := make(map[string]string)
+
+ for _, unparsed := range o.Annotations {
+ parsed := strings.SplitN(unparsed, "=", 2)
+ if len(parsed) != 2 {
+ return nil, fmt.Errorf("invalid annotation %q (expected format is \"key=value\")", unparsed)
+ }
+ if _, exists := annotations[parsed[0]]; exists {
+ return nil, fmt.Errorf("annotation %q specified more than once", parsed[0])
+ }
+ annotations[parsed[0]] = parsed[1]
+ }
+
+ return annotations, nil
+}
+
+// Add adds the specified source artifact at `artifactPath` to the source image
+// at `sourcePath`. Note that the artifact will be added as a gzip-compressed
+// tar ball. Add attempts to auto-tar and auto-compress only if necessary.
+func Add(ctx context.Context, sourcePath string, artifactPath string, options AddOptions) error {
+ // Let's first make sure `sourcePath` exists and that we can access it.
+ if _, err := os.Stat(sourcePath); err != nil {
+ return err
+ }
+
+ annotations, err := options.annotations()
+ if err != nil {
+ return err
+ }
+
+ ociDest, err := openOrCreateSourceImage(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ defer ociDest.Close()
+
+ tarStream, err := archive.TarWithOptions(artifactPath, &archive.TarOptions{Compression: archive.Gzip})
+ if err != nil {
+ return fmt.Errorf("creating compressed tar stream: %w", err)
+ }
+
+ info := types.BlobInfo{
+ Size: -1, // "unknown": we'll get that information *after* adding
+ }
+ addedBlob, err := ociDest.PutBlob(ctx, tarStream, info, nil, false)
+ if err != nil {
+ return fmt.Errorf("adding source artifact: %w", err)
+ }
+
+ // Add the new layers to the source image's manifest.
+ manifest, oldManifestDigest, _, err := readManifestFromOCIPath(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ manifest.Layers = append(manifest.Layers,
+ specV1.Descriptor{
+ MediaType: specV1.MediaTypeImageLayerGzip,
+ Digest: addedBlob.Digest,
+ Size: addedBlob.Size,
+ Annotations: annotations,
+ },
+ )
+ manifestDigest, manifestSize, err := writeManifest(ctx, manifest, ociDest)
+ if err != nil {
+ return err
+ }
+
+ // Now, as we've written the updated manifest, we can delete the
+ // previous one. `types.ImageDestination` doesn't expose a high-level
+ // API to manage multi-manifest destination, so we need to do it
+ // manually. Not an issue, since paths are predictable for an OCI
+ // layout.
+ if err := removeBlob(oldManifestDigest, sourcePath); err != nil {
+ return fmt.Errorf("removing old manifest: %w", err)
+ }
+
+ manifestDescriptor := specV1.Descriptor{
+ MediaType: specV1.MediaTypeImageManifest,
+ Digest: *manifestDigest,
+ Size: manifestSize,
+ }
+ if err := updateIndexWithNewManifestDescriptor(&manifestDescriptor, sourcePath); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func updateIndexWithNewManifestDescriptor(manifest *specV1.Descriptor, sourcePath string) error {
+ index := specV1.Index{}
+ indexPath := filepath.Join(sourcePath, "index.json")
+
+ rawData, err := os.ReadFile(indexPath)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(rawData, &index); err != nil {
+ return err
+ }
+
+ index.Manifests = []specV1.Descriptor{*manifest}
+ rawData, err = json.Marshal(&index)
+ if err != nil {
+ return err
+ }
+
+ return os.WriteFile(indexPath, rawData, 0644)
+}
diff --git a/internal/source/create.go b/internal/source/create.go
new file mode 100644
index 0000000..c335cd0
--- /dev/null
+++ b/internal/source/create.go
@@ -0,0 +1,70 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ spec "github.com/opencontainers/image-spec/specs-go"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// CreateOptions includes data to alter certain knobs when creating a source
+// image.
+type CreateOptions struct {
+ // Author is the author of the source image.
+ Author string
+ // TimeStamp controls whether a "created" timestamp is set or not.
+ TimeStamp bool
+}
+
+// createdTime returns `time.Now()` if the options are configured to include a
+// time stamp.
+func (o *CreateOptions) createdTime() *time.Time {
+ if !o.TimeStamp {
+ return nil
+ }
+ now := time.Now()
+ return &now
+}
+
+// Create creates an empty source image at the specified `sourcePath`. Note
+// that `sourcePath` must not exist.
+func Create(ctx context.Context, sourcePath string, options CreateOptions) error {
+ if _, err := os.Stat(sourcePath); err == nil {
+ return fmt.Errorf("creating source image: %q already exists", sourcePath)
+ }
+
+ ociDest, err := openOrCreateSourceImage(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ defer ociDest.Close()
+
+ // Create and add a config.
+ config := ImageConfig{
+ Author: options.Author,
+ Created: options.createdTime(),
+ }
+ configBlob, err := addConfig(ctx, &config, ociDest)
+ if err != nil {
+ return err
+ }
+
+ // Create and write the manifest.
+ manifest := specV1.Manifest{
+ Versioned: spec.Versioned{SchemaVersion: 2},
+ MediaType: specV1.MediaTypeImageManifest,
+ Config: specV1.Descriptor{
+ MediaType: MediaTypeSourceImageConfig,
+ Digest: configBlob.Digest,
+ Size: configBlob.Size,
+ },
+ }
+ if _, _, err := writeManifest(ctx, &manifest, ociDest); err != nil {
+ return err
+ }
+
+ return ociDest.Commit(ctx, nil)
+}
diff --git a/internal/source/pull.go b/internal/source/pull.go
new file mode 100644
index 0000000..f8743dd
--- /dev/null
+++ b/internal/source/pull.go
@@ -0,0 +1,110 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/shortnames"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+)
+
+// PullOptions includes data to alter certain knobs when pulling a source
+// image.
+type PullOptions struct {
+ // Require HTTPS and verify certificates when accessing the registry.
+ TLSVerify bool
+ // [username[:password] to use when connecting to the registry.
+ Credentials string
+ // Quiet the progress bars when pushing.
+ Quiet bool
+}
+
+// Pull `imageInput` from a container registry to `sourcePath`.
+func Pull(ctx context.Context, imageInput string, sourcePath string, options PullOptions) error {
+ if _, err := os.Stat(sourcePath); err == nil {
+ return fmt.Errorf("%q already exists", sourcePath)
+ }
+
+ srcRef, err := stringToImageReference(imageInput)
+ if err != nil {
+ return err
+ }
+ destRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return err
+ }
+
+ sysCtx := &types.SystemContext{
+ DockerInsecureSkipTLSVerify: types.NewOptionalBool(!options.TLSVerify),
+ }
+ if options.Credentials != "" {
+ authConf, err := parse.AuthConfig(options.Credentials)
+ if err != nil {
+ return err
+ }
+ sysCtx.DockerAuthConfig = authConf
+ }
+
+ if err := validateSourceImageReference(ctx, srcRef, sysCtx); err != nil {
+ return err
+ }
+
+ policy, err := signature.DefaultPolicy(sysCtx)
+ if err != nil {
+ return fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("creating new signature policy context: %w", err)
+ }
+
+ copyOpts := copy.Options{
+ SourceCtx: sysCtx,
+ }
+ if !options.Quiet {
+ copyOpts.ReportWriter = os.Stderr
+ }
+ if _, err := copy.Image(ctx, policyContext, destRef, srcRef, &copyOpts); err != nil {
+ return fmt.Errorf("pulling source image: %w", err)
+ }
+
+ return nil
+}
+
+func stringToImageReference(imageInput string) (types.ImageReference, error) {
+ if shortnames.IsShortName(imageInput) {
+ return nil, fmt.Errorf("pulling source images by short name (%q) is not supported, please use a fully-qualified name", imageInput)
+ }
+
+ ref, err := alltransports.ParseImageName("docker://" + imageInput)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image name: %w", err)
+ }
+
+ return ref, nil
+}
+
+func validateSourceImageReference(ctx context.Context, ref types.ImageReference, sysCtx *types.SystemContext) error {
+ src, err := ref.NewImageSource(ctx, sysCtx)
+ if err != nil {
+ return fmt.Errorf("creating image source from reference: %w", err)
+ }
+ defer src.Close()
+
+ ociManifest, _, _, err := readManifestFromImageSource(ctx, src)
+ if err != nil {
+ return err
+ }
+
+ if ociManifest.Config.MediaType != MediaTypeSourceImageConfig {
+ return fmt.Errorf("invalid media type of image config %q (expected: %q)", ociManifest.Config.MediaType, MediaTypeSourceImageConfig)
+ }
+
+ return nil
+}
diff --git a/internal/source/push.go b/internal/source/push.go
new file mode 100644
index 0000000..799912c
--- /dev/null
+++ b/internal/source/push.go
@@ -0,0 +1,69 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// PushOptions includes data to alter certain knobs when pushing a source
+// image.
+type PushOptions struct {
+ // Require HTTPS and verify certificates when accessing the registry.
+ TLSVerify bool
+ // [username[:password] to use when connecting to the registry.
+ Credentials string
+ // Quiet the progress bars when pushing.
+ Quiet bool
+}
+
+// Push the source image at `sourcePath` to `imageInput` at a container
+// registry.
+func Push(ctx context.Context, sourcePath string, imageInput string, options PushOptions) error {
+ srcRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return err
+ }
+ destRef, err := stringToImageReference(imageInput)
+ if err != nil {
+ return err
+ }
+
+ sysCtx := &types.SystemContext{
+ DockerInsecureSkipTLSVerify: types.NewOptionalBool(!options.TLSVerify),
+ }
+ if options.Credentials != "" {
+ authConf, err := parse.AuthConfig(options.Credentials)
+ if err != nil {
+ return err
+ }
+ sysCtx.DockerAuthConfig = authConf
+ }
+
+ policy, err := signature.DefaultPolicy(sysCtx)
+ if err != nil {
+ return fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("creating new signature policy context: %w", err)
+ }
+
+ copyOpts := &copy.Options{
+ DestinationCtx: sysCtx,
+ }
+ if !options.Quiet {
+ copyOpts.ReportWriter = os.Stderr
+ }
+ if _, err := copy.Image(ctx, policyContext, destRef, srcRef, copyOpts); err != nil {
+ return fmt.Errorf("pushing source image: %w", err)
+ }
+
+ return nil
+}
diff --git a/internal/source/source.go b/internal/source/source.go
new file mode 100644
index 0000000..b44a903
--- /dev/null
+++ b/internal/source/source.go
@@ -0,0 +1,121 @@
+package source
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// MediaTypeSourceImageConfig specifies the media type of a source-image config.
+const MediaTypeSourceImageConfig = "application/vnd.oci.source.image.config.v1+json"
+
+// ImageConfig specifies the config of a source image.
+type ImageConfig struct {
+ // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6.
+ Created *time.Time `json:"created,omitempty"`
+
+ // Author is the author of the source image.
+ Author string `json:"author,omitempty"`
+}
+
+// writeManifest writes the specified OCI `manifest` to the source image at
+// `ociDest`.
+func writeManifest(ctx context.Context, manifest *specV1.Manifest, ociDest types.ImageDestination) (*digest.Digest, int64, error) {
+ rawData, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, -1, fmt.Errorf("marshalling manifest: %w", err)
+ }
+
+ if err := ociDest.PutManifest(ctx, rawData, nil); err != nil {
+ return nil, -1, fmt.Errorf("writing manifest: %w", err)
+ }
+
+ manifestDigest := digest.FromBytes(rawData)
+ return &manifestDigest, int64(len(rawData)), nil
+}
+
+// readManifestFromImageSource reads the manifest from the specified image
+// source. Note that the manifest is expected to be an OCI v1 manifest.
+func readManifestFromImageSource(ctx context.Context, src types.ImageSource) (*specV1.Manifest, *digest.Digest, int64, error) {
+ rawData, mimeType, err := src.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ if mimeType != specV1.MediaTypeImageManifest {
+ return nil, nil, -1, fmt.Errorf("image %q is of type %q (expected: %q)", strings.TrimPrefix(src.Reference().StringWithinTransport(), "//"), mimeType, specV1.MediaTypeImageManifest)
+ }
+
+ manifest := specV1.Manifest{}
+ if err := json.Unmarshal(rawData, &manifest); err != nil {
+ return nil, nil, -1, fmt.Errorf("reading manifest: %w", err)
+ }
+
+ manifestDigest := digest.FromBytes(rawData)
+ return &manifest, &manifestDigest, int64(len(rawData)), nil
+}
+
+// readManifestFromOCIPath returns the manifest of the specified source image
+// at `sourcePath` along with its digest. The digest can later on be used to
+// locate the manifest on the file system.
+func readManifestFromOCIPath(ctx context.Context, sourcePath string) (*specV1.Manifest, *digest.Digest, int64, error) {
+ ociRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+
+ ociSource, err := ociRef.NewImageSource(ctx, &types.SystemContext{})
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ defer ociSource.Close()
+
+ return readManifestFromImageSource(ctx, ociSource)
+}
+
+// openOrCreateSourceImage returns an OCI types.ImageDestination of the the
+// specified `sourcePath`. Note that if the path doesn't exist, it'll be
+// created along with the OCI directory layout.
+func openOrCreateSourceImage(ctx context.Context, sourcePath string) (types.ImageDestination, error) {
+ ociRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return nil, err
+ }
+
+ // This will implicitly create an OCI directory layout at `path`.
+ return ociRef.NewImageDestination(ctx, &types.SystemContext{})
+}
+
+// addConfig adds `config` to `ociDest` and returns the corresponding blob
+// info.
+func addConfig(ctx context.Context, config *ImageConfig, ociDest types.ImageDestination) (*types.BlobInfo, error) {
+ rawData, err := json.Marshal(config)
+ if err != nil {
+ return nil, fmt.Errorf("marshalling config: %w", err)
+ }
+
+ info := types.BlobInfo{
+ Size: -1, // "unknown": we'll get that information *after* adding
+ }
+ addedBlob, err := ociDest.PutBlob(ctx, bytes.NewReader(rawData), info, nil, true)
+ if err != nil {
+ return nil, fmt.Errorf("adding config: %w", err)
+ }
+
+ return &addedBlob, nil
+}
+
+// removeBlob removes the specified `blob` from the source image at `sourcePath`.
+func removeBlob(blob *digest.Digest, sourcePath string) error {
+ blobPath := filepath.Join(filepath.Join(sourcePath, "blobs/sha256"), blob.Encoded())
+ return os.Remove(blobPath)
+}
diff --git a/internal/tmpdir/tmpdir.go b/internal/tmpdir/tmpdir.go
new file mode 100644
index 0000000..ff966b2
--- /dev/null
+++ b/internal/tmpdir/tmpdir.go
@@ -0,0 +1,26 @@
+package tmpdir
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+// GetTempDir returns the path of the preferred temporary directory on the host.
+func GetTempDir() string {
+ if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
+ abs, err := filepath.Abs(tmpdir)
+ if err == nil {
+ return abs
+ }
+ logrus.Warnf("ignoring TMPDIR from environment, evaluating it: %v", err)
+ }
+ if containerConfig, err := config.Default(); err == nil {
+ if tmpdir, err := containerConfig.ImageCopyTmpDir(); err == nil {
+ return tmpdir
+ }
+ }
+ return "/var/tmp"
+}
diff --git a/internal/tmpdir/tmpdir_test.go b/internal/tmpdir/tmpdir_test.go
new file mode 100644
index 0000000..ea7d673
--- /dev/null
+++ b/internal/tmpdir/tmpdir_test.go
@@ -0,0 +1,58 @@
+package tmpdir
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetTempDir(t *testing.T) {
+ // test default
+ err := os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+ err = os.Setenv("CONTAINERS_CONF", "/dev/null")
+ require.NoError(t, err)
+ tmpdir := GetTempDir()
+ assert.Equal(t, "/var/tmp", tmpdir)
+
+ // test TMPDIR Environment
+ err = os.Setenv("TMPDIR", "/tmp/bogus")
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.Equal(t, tmpdir, "/tmp/bogus")
+ err = os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+
+ // relative TMPDIR should be automatically converted to absolute
+ err = os.Setenv("TMPDIR", ".")
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.True(t, filepath.IsAbs(tmpdir), "path from GetTempDir should always be absolute")
+ err = os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+
+ f, err := os.CreateTemp("", "containers.conf-")
+ require.NoError(t, err)
+ // close and remove the temporary file at the end of the program
+ defer f.Close()
+ defer os.Remove(f.Name())
+ data := []byte("[engine]\nimage_copy_tmp_dir=\"/mnt\"\n")
+ _, err = f.Write(data)
+ require.NoError(t, err)
+
+ err = os.Setenv("CONTAINERS_CONF", f.Name())
+ require.NoError(t, err)
+ // force config reset of default containers.conf
+ options := config.Options{
+ SetDefault: true,
+ }
+ _, err = config.New(&options)
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.Equal(t, "/mnt", tmpdir)
+
+}
diff --git a/internal/types.go b/internal/types.go
new file mode 100644
index 0000000..ee87eca
--- /dev/null
+++ b/internal/types.go
@@ -0,0 +1,18 @@
+package internal
+
+const (
+ // Temp directory which stores external artifacts which are download for a build.
+ // Example: tar files from external sources.
+ BuildahExternalArtifactsDir = "buildah-external-artifacts"
+)
+
+// Types is internal packages are suspected to change with releases avoid using these outside of buildah
+
+// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor
+// StageExecutor has ability to mount stages/images in current context and
+// automatically clean them up.
+type StageMountDetails struct {
+ DidExecute bool // tells if the stage which is being mounted was freshly executed or was part of older cache
+ IsStage bool // tells if mountpoint returned from stage executor is stage or image
+ MountPoint string // mountpoint of stage/image
+}
diff --git a/internal/util/util.go b/internal/util/util.go
new file mode 100644
index 0000000..01f4b10
--- /dev/null
+++ b/internal/util/util.go
@@ -0,0 +1,99 @@
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
+ lplatform "github.com/containers/common/libimage/platform"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/unshare"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// LookupImage returns *Image to corresponding imagename or id
+func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) {
+ systemContext := ctx
+ if systemContext == nil {
+ systemContext = &types.SystemContext{}
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return nil, err
+ }
+ localImage, _, err := runtime.LookupImage(image, nil)
+ if err != nil {
+ return nil, err
+ }
+ return localImage, nil
+}
+
+// NormalizePlatform validates and translate the platform to the canonical value.
+//
+// For example, if "Aarch64" is encountered, we change it to "arm64" or if
+// "x86_64" is encountered, it becomes "amd64".
+//
+// Wrapper around libimage.NormalizePlatform to return and consume
+// v1.Platform instead of independent os, arch and variant.
+func NormalizePlatform(platform v1.Platform) v1.Platform {
+ os, arch, variant := lplatform.Normalize(platform.OS, platform.Architecture, platform.Variant)
+ return v1.Platform{
+ OS: os,
+ Architecture: arch,
+ Variant: variant,
+ }
+}
+
+// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout.
+func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
+ var err error
+ if !filepath.IsAbs(opts.Path) {
+ opts.Path, err = filepath.Abs(opts.Path)
+ if err != nil {
+ return err
+ }
+ }
+ if opts.IsDir {
+ // In order to keep this feature as close as possible to
+ // buildkit it was decided to preserve ownership when
+ // invoked as root since caller already has access to artifacts
+ // therefore we can preserve ownership as is, however for rootless users
+ // ownership has to be changed so exported artifacts can still
+ // be accessible by unprivileged users.
+ // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
+ noLChown := false
+ if unshare.IsRootless() {
+ noLChown = true
+ }
+
+ err = os.MkdirAll(opts.Path, 0700)
+ if err != nil {
+ return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err)
+ }
+
+ err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown})
+ if err != nil {
+ return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err)
+ }
+ } else {
+ outFile := os.Stdout
+ if !opts.IsStdout {
+ outFile, err = os.Create(opts.Path)
+ if err != nil {
+ return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err)
+ }
+ defer outFile.Close()
+ }
+ _, err = io.Copy(outFile, input)
+ if err != nil {
+ return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err)
+ }
+ }
+ return nil
+}
diff --git a/internal/volumes/volumes.go b/internal/volumes/volumes.go
new file mode 100644
index 0000000..a79b8df
--- /dev/null
+++ b/internal/volumes/volumes.go
@@ -0,0 +1,637 @@
+package volumes
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "errors"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ internalParse "github.com/containers/buildah/internal/parse"
+ "github.com/containers/buildah/internal/tmpdir"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/common/pkg/parse"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/unshare"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
+
+const (
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+ // TypeCache is the type for mounting a common persistent cache from host
+ TypeCache = "cache"
+ // mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
+ // Lifecycle of following directory will be inherited from how host machine treats temporary directory
+ buildahCacheDir = "buildah-cache"
+ // mount=type=cache allows users to lock a cache store while its being used by another build
+ BuildahCacheLockfile = "buildah-cache-lockfile"
+ // All the lockfiles are stored in a separate directory inside `BuildahCacheDir`
+ // Example `/var/tmp/buildah-cache/<target>/buildah-cache-lockfile`
+ BuildahCacheLockfileDir = "buildah-cache-lockfiles"
+)
+
+var (
+ errBadMntOption = errors.New("invalid mount option")
+ errBadOptionArg = errors.New("must provide an argument for option")
+ errBadVolDest = errors.New("must set volume destination")
+ errBadVolSrc = errors.New("must set volume source")
+ errDuplicateDest = errors.New("duplicate mount destination")
+)
+
+// CacheParent returns a cache parent for --mount=type=cache
+func CacheParent() string {
+ return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
+}
+
+// GetBindMount parses a single bind mount entry from the --mount flag.
+// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
+// Caller is expected to perform unmount of any mounted images
+func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) {
+ newMount := specs.Mount{
+ Type: define.TypeBind,
+ }
+
+ setRelabel := false
+ mountReadability := false
+ setDest := false
+ bindNonRecursive := false
+ fromImage := ""
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "bind-nonrecursive":
+ newMount.Options = append(newMount.Options, "bind")
+ bindNonRecursive = true
+ case "ro", "nosuid", "nodev", "noexec":
+ // TODO: detect duplication of these options.
+ // (Is this necessary?)
+ newMount.Options = append(newMount.Options, kv[0])
+ mountReadability = true
+ case "rw", "readwrite":
+ newMount.Options = append(newMount.Options, "rw")
+ mountReadability = true
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ mountReadability = true
+ case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "from":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ fromImage = kv[1]
+ case "bind-propagation":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, kv[1])
+ case "src", "source":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Source = kv[1]
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ targetPath := kv[1]
+ if !path.IsAbs(targetPath) {
+ targetPath = filepath.Join(workDir, targetPath)
+ }
+ if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
+ return newMount, "", err
+ }
+ newMount.Destination = targetPath
+ setDest = true
+ case "relabel":
+ if setRelabel {
+ return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
+ }
+ setRelabel = true
+ if len(kv) != 2 {
+ return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
+ }
+ switch kv[1] {
+ case "private":
+ newMount.Options = append(newMount.Options, "Z")
+ case "shared":
+ newMount.Options = append(newMount.Options, "z")
+ default:
+ return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
+ }
+ case "consistency":
+ // Option for OS X only, has no meaning on other platforms
+ // and can thus be safely ignored.
+ // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
+ default:
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ // default mount readability is always readonly
+ if !mountReadability {
+ newMount.Options = append(newMount.Options, "ro")
+ }
+
+ // Following variable ensures that we return imagename only if we did additional mount
+ isImageMounted := false
+ if fromImage != "" {
+ mountPoint := ""
+ if additionalMountPoints != nil {
+ if val, ok := additionalMountPoints[fromImage]; ok {
+ mountPoint = val.MountPoint
+ }
+ }
+ // if mountPoint of image was not found in additionalMap
+ // or additionalMap was nil, try mounting image
+ if mountPoint == "" {
+ image, err := internalUtil.LookupImage(ctx, store, fromImage)
+ if err != nil {
+ return newMount, "", err
+ }
+
+ mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
+ if err != nil {
+ return newMount, "", err
+ }
+ isImageMounted = true
+ }
+ contextDir = mountPoint
+ }
+
+ // buildkit parity: default bind option must be `rbind`
+ // unless specified
+ if !bindNonRecursive {
+ newMount.Options = append(newMount.Options, "rbind")
+ }
+
+ if !setDest {
+ return newMount, fromImage, errBadVolDest
+ }
+
+ // buildkit parity: support absolute path for sources from current build context
+ if contextDir != "" {
+ // path should be /contextDir/specified path
+ newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source))
+ } else {
+ // looks like its coming from `build run --mount=type=bind` allow using absolute path
+ // error out if no source is set
+ if newMount.Source == "" {
+ return newMount, "", errBadVolSrc
+ }
+ if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
+ return newMount, "", err
+ }
+ }
+
+ opts, err := parse.ValidateVolumeOpts(newMount.Options)
+ if err != nil {
+ return newMount, fromImage, err
+ }
+ newMount.Options = opts
+
+ if !isImageMounted {
+ // we don't want any cleanups if image was not mounted explicitly
+ // so dont return anything
+ fromImage = ""
+ }
+
+ return newMount, fromImage, nil
+}
+
+// GetCacheMount parses a single cache mount entry from the --mount flag.
+//
+// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
+func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
+ var err error
+ var mode uint64
+ var buildahLockFilesDir string
+ var (
+ setDest bool
+ setShared bool
+ setReadOnly bool
+ foundSElinuxLabel bool
+ )
+ fromStage := ""
+ newMount := specs.Mount{
+ Type: define.TypeBind,
+ }
+ // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
+ id := ""
+ //buidkit parity: cache directory defaults to 755
+ mode = 0o755
+ //buidkit parity: cache directory defaults to uid 0 if not specified
+ uid := 0
+ //buidkit parity: cache directory defaults to gid 0 if not specified
+ gid := 0
+ // sharing mode
+ sharing := "shared"
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "nosuid", "nodev", "noexec":
+ // TODO: detect duplication of these options.
+ // (Is this necessary?)
+ newMount.Options = append(newMount.Options, kv[0])
+ case "rw", "readwrite":
+ newMount.Options = append(newMount.Options, "rw")
+ case "readonly", "ro":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ setReadOnly = true
+ case "Z", "z":
+ newMount.Options = append(newMount.Options, kv[0])
+ foundSElinuxLabel = true
+ case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U":
+ newMount.Options = append(newMount.Options, kv[0])
+ setShared = true
+ case "sharing":
+ sharing = kv[1]
+ case "bind-propagation":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, kv[1])
+ case "id":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ id = kv[1]
+ case "from":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ fromStage = kv[1]
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ targetPath := kv[1]
+ if !path.IsAbs(targetPath) {
+ targetPath = filepath.Join(workDir, targetPath)
+ }
+ if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
+ return newMount, nil, err
+ }
+ newMount.Destination = targetPath
+ setDest = true
+ case "src", "source":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Source = kv[1]
+ case "mode":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ mode, err = strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
+ }
+ case "uid":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ uid, err = strconv.Atoi(kv[1])
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
+ }
+ case "gid":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ gid, err = strconv.Atoi(kv[1])
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
+ }
+ default:
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ // If selinux is enabled and no selinux option was configured
+ // default to `z` i.e shared content label.
+ if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" {
+ newMount.Options = append(newMount.Options, "z")
+ }
+
+ if !setDest {
+ return newMount, nil, errBadVolDest
+ }
+
+ if fromStage != "" {
+ // do not create cache on host
+ // instead use read-only mounted stage as cache
+ mountPoint := ""
+ if additionalMountPoints != nil {
+ if val, ok := additionalMountPoints[fromStage]; ok {
+ if val.IsStage {
+ mountPoint = val.MountPoint
+ }
+ }
+ }
+ // Cache does not supports using image so if not stage found
+ // return with error
+ if mountPoint == "" {
+ return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
+ }
+ // path should be /contextDir/specified path
+ newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
+ } else {
+ // we need to create cache on host if no image is being used
+
+ // since type is cache and cache can be reused by consecutive builds
+ // create a common cache directory, which persists on hosts within temp lifecycle
+ // add subdirectory if specified
+
+ // cache parent directory: creates separate cache parent for each user.
+ cacheParent := CacheParent()
+ // create cache on host if not present
+ err = os.MkdirAll(cacheParent, os.FileMode(0755))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
+ }
+
+ if id != "" {
+ newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
+ buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id))
+ } else {
+ newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
+ buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination))
+ }
+ idPair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ //buildkit parity: change uid and gid if specified otheriwise keep `0`
+ err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
+ }
+
+ // create a subdirectory inside `cacheParent` just to store lockfiles
+ buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
+ err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
+ }
+ }
+
+ var targetLock *lockfile.LockFile // = nil
+ succeeded := false
+ defer func() {
+ if !succeeded && targetLock != nil {
+ targetLock.Unlock()
+ }
+ }()
+ switch sharing {
+ case "locked":
+ // lock parent cache
+ lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
+ }
+ // Will be unlocked after the RUN step is executed.
+ lockfile.Lock()
+ targetLock = lockfile
+ case "shared":
+ // do nothing since default is `shared`
+ break
+ default:
+ // error out for unknown values
+ return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
+ }
+
+ // buildkit parity: default sharing should be shared
+ // unless specified
+ if !setShared {
+ newMount.Options = append(newMount.Options, "shared")
+ }
+
+ // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly
+ if !setReadOnly {
+ newMount.Options = append(newMount.Options, "rw")
+ }
+
+ newMount.Options = append(newMount.Options, "bind")
+
+ opts, err := parse.ValidateVolumeOpts(newMount.Options)
+ if err != nil {
+ return newMount, nil, err
+ }
+ newMount.Options = opts
+
+ succeeded = true
+ return newMount, targetLock, nil
+}
+
+func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
+ finalVolumeMounts := make(map[string]specs.Mount)
+
+ for _, volume := range volumes {
+ volumeMount, err := internalParse.Volume(volume)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
+ return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
+ }
+ finalVolumeMounts[volumeMount.Destination] = volumeMount
+ }
+ return finalVolumeMounts, nil
+}
+
+// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
+func UnlockLockArray(locks []*lockfile.LockFile) {
+ for _, lock := range locks {
+ lock.Unlock()
+ }
+}
+
+// GetVolumes gets the volumes from --volume and --mount
+//
+// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
+func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
+ unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ UnlockLockArray(targetLocks)
+ }
+ }()
+ volumeMounts, err := getVolumeMounts(volumes)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ for dest, mount := range volumeMounts {
+ if _, ok := unifiedMounts[dest]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
+ }
+ unifiedMounts[dest] = mount
+ }
+
+ finalMounts := make([]specs.Mount, 0, len(unifiedMounts))
+ for _, mount := range unifiedMounts {
+ finalMounts = append(finalMounts, mount)
+ }
+ succeeded = true
+ return finalMounts, mountedImages, targetLocks, nil
+}
+
+// getMounts takes user-provided input from the --mount flag and creates OCI
+// spec mounts.
+// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
+// buildah run --mount type=tmpfs,target=/dev/shm ...
+//
+// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
+func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
+ // If `type` is not set default to "bind"
+ mountType := define.TypeBind
+ finalMounts := make(map[string]specs.Mount)
+ mountedImages := make([]string, 0)
+ targetLocks := make([]*lockfile.LockFile, 0)
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ UnlockLockArray(targetLocks)
+ }
+ }()
+
+ errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
+
+ // TODO(vrothberg): the manual parsing can be replaced with a regular expression
+ // to allow a more robust parsing of the mount format and to give
+ // precise errors regarding supported format versus supported options.
+ for _, mount := range mounts {
+ tokens := strings.Split(mount, ",")
+ if len(tokens) < 2 {
+ return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ }
+ for _, field := range tokens {
+ if strings.HasPrefix(field, "type=") {
+ kv := strings.Split(field, "=")
+ if len(kv) != 2 {
+ return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ }
+ mountType = kv[1]
+ }
+ }
+ switch mountType {
+ case define.TypeBind:
+ mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ mountedImages = append(mountedImages, image)
+ case TypeCache:
+ mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if tl != nil {
+ targetLocks = append(targetLocks, tl)
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ case TypeTmpfs:
+ mount, err := GetTmpfsMount(tokens)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ default:
+ return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType)
+ }
+ }
+
+ succeeded = true
+ return finalMounts, mountedImages, targetLocks, nil
+}
+
+// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
+func GetTmpfsMount(args []string) (specs.Mount, error) {
+ newMount := specs.Mount{
+ Type: TypeTmpfs,
+ Source: TypeTmpfs,
+ }
+
+ setDest := false
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "ro", "nosuid", "nodev", "noexec":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ case "tmpcopyup":
+ //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
+ newMount.Options = append(newMount.Options, kv[0])
+ case "tmpfs-mode":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
+ case "tmpfs-size":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
+ case "src", "source":
+ return newMount, errors.New("source is not supported with tmpfs mounts")
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Destination = kv[1]
+ setDest = true
+ default:
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ if !setDest {
+ return newMount, errBadVolDest
+ }
+
+ return newMount, nil
+}
diff --git a/logos/buildah-logo-source.svg b/logos/buildah-logo-source.svg
new file mode 100644
index 0000000..31afdcf
--- /dev/null
+++ b/logos/buildah-logo-source.svg
@@ -0,0 +1,2888 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="267.22922mm"
+ height="373.62955mm"
+ viewBox="0 0 267.22922 373.62956"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.2 5c3e80d, 2017-08-06"
+ sodipodi:docname="buildah-logo-source.svg"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/buildah-logo-reverse_sm.png"
+ inkscape:export-xdpi="30"
+ inkscape:export-ydpi="30">
+ <title
+ id="title81241">Buildah Logo</title>
+ <defs
+ id="defs2">
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163655"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163651"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163633"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163629"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163619"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163615"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163611"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163605"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163597"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163593"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect80935"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect80931"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect80913"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect80909"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect80899"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect80895"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect80891"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect80885"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect80877"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect80873"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect627"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect623"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect617"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect605"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect602"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5327"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5323"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5319"
+ effect="spiro" />
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient5241">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop5237" />
+ <stop
+ style="stop-color:#666666;stop-opacity:1"
+ offset="1"
+ id="stop5239" />
+ </linearGradient>
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5036"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5011"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5007"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5003"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4989"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4977"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4971"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4967"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4959"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4955"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4950"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4947"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4927"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4922"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4883"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4879"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4871"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4865"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4861"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4853"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4843"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4240"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4232"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4229"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4226"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4220"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4217"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4214"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2264"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2229"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2225"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2191"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2188"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1161"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1153"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1149"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1143"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1139"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1135"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1127"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1123"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1119"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1111"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1107"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1099"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1087"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1081"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1077"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1073"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1069"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1065"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1057"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1053"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1049"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1045"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1041"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1033"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1029"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1017"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1013"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect1009"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1005"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1001"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect987"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect983"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect975"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect971"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect967"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect963"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect959"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect744"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect740"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect736"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect732"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect728"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect724"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect716"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect712"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect708"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect650"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect646"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect642"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect638"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect634"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect630"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect622"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect618"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect614"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect561"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect557"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect553"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect549"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect545"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect541"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect533"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect529"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect525"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect426"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect422"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect418"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect414"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect410"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect406"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect398"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect394"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect390"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3218"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3214"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3210"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3206"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3202"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3198"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect3190"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3186"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect3182"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2808"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2804"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2800"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2796"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2792"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2784"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2780"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2768"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2764"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2760"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2742"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2738"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2734"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2730"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2726"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2722"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2718"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2706"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2702"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2698"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2688"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2684"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2680"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2676"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2672"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2668"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2664"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2629"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2625"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2621"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2617"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2613"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2609"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2605"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2589"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2585"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2581"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2577"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2573"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2569"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2565"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2561"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2557"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2553"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2549"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2545"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2541"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect2537"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2533"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2419"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect2397"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1570"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1566"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect1562"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4921"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4907"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="bspline"
+ id="path-effect4875"
+ is_visible="true"
+ weight="33.333333"
+ steps="2"
+ helper_size="0"
+ apply_no_weight="true"
+ apply_with_weight="true"
+ only_selected="false" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5878"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5876"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5874"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5872"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5870"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5868"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5866"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5864"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5862"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5858"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5854"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5828"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5824"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5798"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5794"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5790"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5786"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5782"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5778"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5774"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5770"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5628"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5624"
+ is_visible="true" />
+ <marker
+ inkscape:stockid="DiamondS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5584"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5582"
+ d="M 0,-7.0710768 -7.0710894,0 0,7.0710589 7.0710462,0 Z"
+ style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#4d4d4d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="DiamondS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="DiamondS"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5359"
+ d="M 0,-7.0710768 -7.0710894,0 0,7.0710589 7.0710462,0 Z"
+ style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#4d4d4d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="scale(0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="DotL"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="DotL"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5335"
+ d="m -2.5,-1 c 0,2.76 -2.24,5 -5,5 -2.76,0 -5,-2.24 -5,-5 0,-2.76 2.24,-5 5,-5 2.76,0 5,2.24 5,5 z"
+ style="fill:#4d4d4d;fill-opacity:1;fill-rule:evenodd;stroke:#4d4d4d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,5.92,0.8)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5272"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5270"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5268"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5266"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5264"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5262"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5260"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5258"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5256"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5252"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5238"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5226"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5222"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5126"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5122"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5118"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5106"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5102"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5098"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5094"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5090"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5082"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5078"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5074"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5070"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5066"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5034"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5030"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5026"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5022"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5018"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5008"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5004"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5000"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4996"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4992"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4980"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4976"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4972"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4968"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4964"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4952"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4948"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4944"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4940"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4928"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4920"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4916"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4912"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4908"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4904"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4900"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4896"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4892"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="powerstroke"
+ id="path-effect4888"
+ is_visible="true"
+ offset_points="0,0.13229166"
+ sort_points="true"
+ interpolator_type="CubicBezierJohan"
+ interpolator_beta="0.2"
+ start_linecap_type="zerowidth"
+ linejoin_type="extrp_arc"
+ miter_limit="4"
+ end_linecap_type="zerowidth" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4886"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4854"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4850"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4846"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4842"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4838"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4834"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4830"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4826"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4822"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4818"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4814"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5624-3"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5256-6"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5226-7"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5222-5"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5268-3"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5260-5"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5270-6"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5272-2"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5262-9"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5258-1"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5264-2"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5266-7"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4921-9"
+ is_visible="true" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient5241"
+ id="linearGradient5243"
+ x1="732.84601"
+ y1="-159.4493"
+ x2="732.84601"
+ y2="-60.495129"
+ gradientUnits="userSpaceOnUse" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5036-3"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5003-6"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4214-7"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4232-5"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4229-3"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4959-5"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect4220-6"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect4217-2"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect5007-9"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect5011-1"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163593-3"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163597-6"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163605-7"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163611-5"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163615-3"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163619-5"
+ is_visible="true" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163629-6"
+ is_visible="true" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163633-2"
+ effect="spiro" />
+ <inkscape:path-effect
+ is_visible="true"
+ id="path-effect163651-9"
+ effect="spiro" />
+ <inkscape:path-effect
+ effect="spiro"
+ id="path-effect163655-1"
+ is_visible="true" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1"
+ inkscape:cx="388.45664"
+ inkscape:cy="938.70261"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:snap-global="false"
+ showguides="false"
+ inkscape:guide-bbox="true"
+ inkscape:window-width="2560"
+ inkscape:window-height="1376"
+ inkscape:window-x="0"
+ inkscape:window-y="27"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:lockguides="true"
+ inkscape:measure-start="0,0"
+ inkscape:measure-end="0,0">
+ <sodipodi:guide
+ position="1.9644311,145.18081"
+ orientation="1,0"
+ id="guide1489"
+ inkscape:locked="true" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title>Buildah Logo</dc:title>
+ <dc:date>12 December 2017</dc:date>
+ <dc:creator>
+ <cc:Agent>
+ <dc:title>Máirín Duffy &lt;duffy@redhat.com&gt;</dc:title>
+ </cc:Agent>
+ </dc:creator>
+ <dc:rights>
+ <cc:Agent>
+ <dc:title>Copyright Red Hat, Inc.</dc:title>
+ </cc:Agent>
+ </dc:rights>
+ <dc:publisher>
+ <cc:Agent>
+ <dc:title>Red Hat, Inc.</dc:title>
+ </cc:Agent>
+ </dc:publisher>
+ <dc:contributor>
+ <cc:Agent>
+ <dc:title>Daniel Walsh &lt;dwalsh@redhat.com&gt;, Tom Sweeney &lt;tsweeney@redhat.com&gt;</dc:title>
+ </cc:Agent>
+ </dc:contributor>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-619.60435,199.89276)">
+ <g
+ id="g164054"
+ transform="translate(-1.0583333,-251.19926)">
+ <g
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;stroke:#000000"
+ id="g163990"
+ transform="translate(-118.1493,-10.074071)"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png">
+ <path
+ id="path164069"
+ d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780616 21.38926,-0.06408 13.38485,16.701146 -4.69887,20.8897 z"
+ style="opacity:1;fill:#c9c8c6;fill-opacity:1;stroke:#000000;stroke-width:1.0583334;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
+ d="m 185.87625,59.397934 c -0.86973,0.217518 -2.57155,-0.134058 -2.52494,1.185647 0.29798,1.534184 0.51469,3.25265 -0.0756,4.733265 -2.59999,1.640261 -6.20252,6.520936 -9.25204,6.909269 l 43.94309,20.562507 42.71289,-19.994542 c 1.02895,-0.723043 2.47728,0.557072 1.54478,0.673163 -3.35476,-0.649977 -5.60495,-3.545794 -7.89139,-5.779104 -1.32714,-1.221465 -1.85144,-3.043527 -1.61413,-4.807152 -0.0512,-1.079531 1.14073,-2.378109 0.11981,-3.2134 -1.25435,-0.06792 -2.90233,-0.67841 -3.75435,0.61017 -3.09923,3.191828 -7.98829,4.311297 -12.1138,2.887779 -1.55682,-0.854291 -3.06748,0.550296 -4.47291,1.067862 -6.87259,3.170321 -14.6714,4.278483 -21.96511,2.268758 -3.27701,-0.820909 -6.47851,-1.975772 -9.37028,-3.683317 -1.34887,-0.137072 -2.59577,0.774552 -3.96402,0.837618 -3.77142,0.600908 -7.75015,-0.913634 -10.36088,-3.74839 -0.24699,-0.267363 -0.5888,-0.503792 -0.96111,-0.510133 z m 32.50205,15.39649 c 1.43219,-0.301225 0.54304,1.655686 0.79688,2.500732 0.0233,4.281784 0.0465,8.563566 0.0697,12.845351 -1.3554,0.293932 -1.91251,-0.210418 -1.5934,-1.590991 v -13.77926 c 0.24226,0.0081 0.48452,0.01611 0.72678,0.02417 z"
+ id="path164067"
+ inkscape:connector-curvature="0"
+ transform="matrix(0.26458333,0,0,0.26458333,738.81198,118.53055)"
+ sodipodi:nodetypes="cccccccccccccccccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
+ d="m 799.53748,117.44571 c -0.14618,0.005 -0.2976,0.0274 -0.42892,0.009 -2.03273,0.0428 -4.06359,0.0892 -6.0942,0.031 -0.28926,0.11428 -0.24836,0.50228 -0.35967,0.75138 -0.22736,0.98319 -0.0226,1.99923 0.0233,2.99155 0.07,0.84001 0.14093,1.67976 0.21136,2.51974 -1.27629,1.32719 -2.44789,2.78357 -3.30058,4.42195 -0.45703,0.87705 -0.62335,1.88496 -0.49712,2.86391 0.0203,0.83713 0.14089,1.79437 -0.44494,2.48357 -0.2045,0.17656 -0.15243,0.47737 0.0667,0.6134 0.61884,0.5474 1.48745,0.80041 2.29805,0.60306 0.99978,-0.16365 1.63821,-1.03284 2.31768,-1.6955 0.15864,-0.2272 0.63711,-0.45408 0.38551,-0.76998 -0.7675,-1.08459 -1.32972,-2.44507 -1.0697,-3.79098 0.12353,-0.6101 0.64173,-1.02068 1.16892,-1.28675 0.28556,-0.28377 -0.2066,-0.53663 -0.23823,-0.83509 0.0848,-0.56057 0.18974,-1.14181 0.44597,-1.65209 0.25811,-0.42031 0.80937,-0.32417 1.21078,-0.48731 0.63192,-0.22035 1.33566,-0.35672 1.98024,-0.10284 0.44879,0.16718 0.94179,0.19308 1.38079,0.37569 0.40792,0.36381 0.44788,0.95927 0.5917,1.45728 0.0844,0.26471 0.0795,0.56128 -0.12093,0.77463 -0.15859,0.17909 -0.12732,0.47628 0.12661,0.54311 0.63811,0.32204 1.10695,0.97435 1.09296,1.70481 0.0246,1.20969 -0.45512,2.37707 -1.16789,3.33726 -0.10802,0.32476 0.32009,0.49853 0.46974,0.73794 0.65863,0.70094 1.34012,1.50616 2.33267,1.70584 0.88352,0.181 1.86515,-0.13458 2.46135,-0.80718 0.0949,-0.27388 -0.28759,-0.40886 -0.36122,-0.65371 -0.42704,-0.7126 -0.29638,-1.55962 -0.2806,-2.34869 0.15195,-1.12447 -0.16703,-2.25995 -0.74207,-3.22358 -0.84528,-1.44611 -1.89414,-2.76858 -3.0608,-3.96823 0.10988,-1.60331 0.32998,-3.20245 0.33383,-4.81108 -0.019,-0.47827 -0.13757,-0.95613 -0.33435,-1.39061 -0.10996,-0.0918 -0.25069,-0.10579 -0.39687,-0.10129 z m -0.81339,15.38149 c -0.0464,-0.003 -0.0974,0.0351 -0.14366,0.0372 -0.54357,0.44869 -1.198,0.81023 -1.92236,0.80047 -0.71698,0.0539 -1.45907,-0.0997 -2.0345,-0.54881 -0.19522,-0.13527 -0.4874,-0.43793 -0.68678,-0.14831 -0.47322,0.53276 -0.98364,1.03938 -1.4764,1.54771 -0.15575,0.31923 0.32053,0.38287 0.51935,0.50023 1.12198,0.45684 2.32628,0.77692 3.54397,0.77773 1.30395,-0.0584 2.61944,-0.35107 3.78271,-0.9524 0.27683,-0.2308 -0.0777,-0.51977 -0.23306,-0.70125 -0.43157,-0.4316 -0.83203,-0.91444 -1.30431,-1.29242 -0.0141,-0.0134 -0.0295,-0.019 -0.045,-0.0202 z"
+ id="path164056" />
+ </g>
+ <path
+ style="fill:#fde385;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 669.39724,96.367584 1.02899,-1.957235 15.43482,-0.460526 1.59025,2.993416 -2.61924,6.332231 -5.42557,0.80592 -3.92887,0.34539 -4.20949,-2.76315 z"
+ id="path163992"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 668.72873,96.705238 c 1.29471,-1.620631 2.29404,-2.396806 3.34528,-2.88108 l 0.51322,0.669933 m 14.83217,1.608651 c -1.2947,-1.620635 -2.15245,-2.246185 -3.06212,-2.579834 l -0.52526,0.706599"
+ id="path163994"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#cccccc;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 677.98853,116.49938 c -0.34914,-0.002 -0.69863,0.044 -1.03586,0.14075 -2.10393,0.68594 -2.93218,1.30639 -2.55731,3.44971 0.43873,2.20092 2.18697,4.04843 3.96734,3.90808 1.78019,0.13991 3.5281,-1.70739 3.96677,-3.90808 0.37487,-2.14332 -0.45337,-2.76377 -2.5573,-3.44971 -0.33722,-0.0967 -0.68672,-0.14308 -1.03586,-0.14075 -0.12482,0.001 -0.24946,0.0116 -0.37361,0.0253 -0.12433,-0.014 -0.24916,-0.0242 -0.37417,-0.0253 z"
+ id="path163996"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 682.08117,113.78901 c 0.91986,0.9492 1.75512,1.98035 2.49266,3.07724 0.68351,1.01654 1.29935,2.144 1.34344,3.36817 0.0249,0.69116 -0.135,1.38133 -0.0846,2.07111 0.0252,0.34488 0.10516,0.69066 0.27705,0.99072 0.17189,0.30006 0.44194,0.55171 0.77047,0.65966 0.22103,0.0726 0.46179,0.0784 0.68921,0.0293 0.22741,-0.0491 0.44164,-0.15193 0.6304,-0.28796 0.37751,-0.27205 0.64758,-0.66865 0.86173,-1.08176 0.52388,-1.01062 0.75221,-2.15202 0.83092,-3.28763 0.0787,-1.13561 0.0138,-2.27549 -0.0167,-3.41341 -0.0193,-0.72156 -0.0232,-1.45577 0.17755,-2.14911 0.23128,-0.79874 0.71878,-1.49341 1.15186,-2.20328 0.43309,-0.70988 0.82802,-1.48546 0.83801,-2.31695 0.007,-0.57479 -0.1711,-1.13308 -0.31075,-1.69069 -0.13965,-0.55761 -0.24144,-1.14927 -0.0841,-1.70214 0.10561,-0.371 0.32101,-0.69904 0.51086,-1.03483 0.95093,-1.68188 1.29523,-3.68638 1.02955,-5.600118 -0.26569,-1.91374 -1.12956,-3.730164 -2.39332,-5.191636 -0.0591,-0.06838 -0.11962,-0.136493 -0.19144,-0.191396 -0.0718,-0.0549 -0.15638,-0.09637 -0.24644,-0.104263 -0.0833,-0.0073 -0.16782,0.01471 -0.24093,0.05531 -0.0731,0.0406 -0.13525,0.09915 -0.18648,0.165253 -0.10245,0.132201 -0.16121,0.291958 -0.22941,0.444673 -0.34883,0.781052 -0.96388,1.408741 -1.59109,1.990395 -0.62722,0.581655 -1.28716,1.144393 -1.76597,1.853243 -0.51192,0.757873 -0.79212,1.647778 -0.99797,2.538874 -0.20585,0.8911 -0.34453,1.79879 -0.60719,2.67483 -0.0931,0.31033 -0.2015,0.61605 -0.32481,0.91564 -0.83771,-0.13943 -1.68628,-0.21352 -2.53548,-0.22138 -0.92151,-0.009 -1.84374,0.0609 -2.75357,0.20747 0.83081,0.40874 1.56474,1.01281 2.12562,1.74951 0.68638,0.90154 1.10965,2.00099 1.20503,3.13006 z"
+ id="path163998"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163593"
+ inkscape:original-d="m 682.08117,113.78901 c 0.99336,-0.47132 1.43537,2.00637 2.49266,3.07724 0.52864,0.53542 0.96775,1.92235 1.34344,3.36817 0.37569,1.44583 0.68796,2.95052 0.96292,3.72149 0.54992,1.54193 1.74318,-1.25801 2.18134,-1.34044 0.9632,-0.18121 0.58141,-6.38011 0.81425,-6.70104 0.2328,-0.32093 0.13498,0.0561 0.17755,-2.14911 0.0426,-2.20518 1.12284,-2.20798 1.98987,-4.52023 0.86703,-2.31224 -0.17616,-2.45687 -0.39481,-3.39283 -0.21864,-0.93597 0.34029,-0.69019 0.51086,-1.03483 0.17058,-0.34465 -1.3155,-7.194794 -1.36377,-10.791754 -0.0483,-3.596939 -0.29221,-0.197401 -0.43788,-0.295659 -0.14566,-0.09827 -0.43817,0.4432 -0.65682,0.665241 -0.21864,0.222049 -2.23834,2.562126 -3.35706,3.843638 -1.11874,1.28151 -0.59065,3.543314 -1.60516,5.213704 -1.0145,1.6704 -0.26164,0.57279 -0.32481,0.91564 -1.47046,-1.47997 -2.08084,-0.28081 -2.53548,-0.22138 -0.45464,0.0595 -1.5473,-1.02086 -2.75357,0.20747 0.25395,0.23578 1.36429,0.99982 2.12562,1.74951 0.76132,0.74967 1.58531,2.48745 1.20503,3.13006 -0.20459,1.02558 -1.79371,4.55524 -0.37418,4.55511 z"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 665.98766,127.54111 c 1.45053,0.14024 2.01604,-0.91633 2.5656,-1.36858 1.09913,-0.90452 0.1113,-1.91988 0.23409,-2.39785"
+ id="path164002"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164004"
+ d="m 656.79097,107.36883 6.8301,1.80907"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:original-d="m 674.60667,113.78901 c -0.99336,-0.47132 -1.43537,2.00637 -2.49266,3.07724 -0.52864,0.53543 -0.96775,1.92236 -1.34344,3.36818 -0.37569,1.44582 -0.68796,2.95051 -0.96292,3.72148 -0.54992,1.54193 -1.74318,-1.25801 -2.18134,-1.34044 -0.9632,-0.18121 -0.58141,-6.38011 -0.81425,-6.70104 -0.2328,-0.32093 -0.13498,0.0561 -0.17755,-2.14911 -0.0426,-2.20518 -1.12284,-2.20798 -1.98987,-4.52023 -0.86703,-2.31224 0.17616,-2.45687 0.39481,-3.39283 0.21864,-0.93597 -0.34029,-0.69019 -0.51086,-1.03483 -0.17058,-0.34465 1.3155,-7.194794 1.36377,-10.791754 0.0483,-3.596939 0.29221,-0.197401 0.43788,-0.295659 0.14566,-0.09827 0.43817,0.4432 0.65682,0.665241 0.21864,0.222049 2.23834,2.562126 3.35706,3.843638 1.11874,1.28151 0.59065,3.543314 1.60516,5.213704 1.0145,1.6704 0.26164,0.57279 0.32481,0.91564 1.47046,-1.47997 2.08084,-0.28081 2.53548,-0.22138 0.45464,0.0595 1.5473,-1.02086 2.75357,0.20747 -0.25395,0.23578 -1.36429,0.99982 -2.12562,1.74951 -0.76132,0.74967 -1.58531,2.48745 -1.20503,3.13006 0.20459,1.02558 1.79371,4.55524 0.37418,4.55511 z"
+ inkscape:path-effect="#path-effect163605"
+ inkscape:connector-curvature="0"
+ id="path164006"
+ d="m 674.60667,113.78901 c -0.91986,0.9492 -1.75512,1.98035 -2.49266,3.07724 -0.68351,1.01654 -1.29935,2.14401 -1.34344,3.36818 -0.0249,0.69116 0.135,1.38133 0.0846,2.0711 -0.0252,0.34489 -0.10516,0.69066 -0.27704,0.99072 -0.17189,0.30007 -0.44195,0.55171 -0.77047,0.65966 -0.22103,0.0726 -0.46179,0.0784 -0.68921,0.0293 -0.22741,-0.0491 -0.44164,-0.15193 -0.6304,-0.28796 -0.37751,-0.27205 -0.64758,-0.66865 -0.86173,-1.08176 -0.52388,-1.01062 -0.75221,-2.15202 -0.83092,-3.28763 -0.0787,-1.13561 -0.0138,-2.27549 0.0167,-3.41341 0.0193,-0.72156 0.0232,-1.45577 -0.17755,-2.14911 -0.23128,-0.79874 -0.71878,-1.49341 -1.15186,-2.20328 -0.43309,-0.70988 -0.82802,-1.48546 -0.83801,-2.31695 -0.007,-0.57479 0.1711,-1.13308 0.31075,-1.69069 0.13965,-0.55761 0.24144,-1.14927 0.0841,-1.70214 -0.10561,-0.371 -0.32101,-0.69904 -0.51086,-1.03483 -0.95093,-1.68188 -1.29523,-3.68638 -1.02955,-5.600118 0.26569,-1.91374 1.12956,-3.730164 2.39332,-5.191636 0.0591,-0.06838 0.11962,-0.136493 0.19144,-0.191396 0.0718,-0.0549 0.15638,-0.09637 0.24644,-0.104263 0.0833,-0.0073 0.16782,0.01471 0.24093,0.05531 0.0731,0.0406 0.13525,0.09915 0.18648,0.165253 0.10245,0.132201 0.16121,0.291958 0.22941,0.444673 0.34883,0.781052 0.96388,1.408741 1.59109,1.990395 0.62722,0.581655 1.28716,1.144393 1.76597,1.853243 0.51192,0.757873 0.79212,1.647778 0.99797,2.538874 0.20585,0.8911 0.34453,1.79879 0.60719,2.67483 0.093,0.31033 0.2015,0.61605 0.32481,0.91564 0.83771,-0.13943 1.68628,-0.21352 2.53548,-0.22138 0.92151,-0.009 1.84374,0.0609 2.75357,0.20747 -0.83081,0.40874 -1.56474,1.01281 -2.12562,1.74951 -0.68638,0.90154 -1.10965,2.00099 -1.20503,3.13006 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ ry="2.6315403"
+ rx="2.5586886"
+ cy="112.94144"
+ cx="-685.8017"
+ id="ellipse164008"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ transform="scale(-1,1)"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csc"
+ inkscape:original-d="m 688.09564,109.52895 c -0.77869,-0.31413 -1.37767,-0.81293 -2.16284,-1.12668 -0.78518,-0.31375 -1.50033,0.61846 -2.15345,0.83557"
+ inkscape:path-effect="#path-effect163611"
+ inkscape:connector-curvature="0"
+ id="path164010"
+ d="m 688.09564,109.52895 c -0.51006,-0.66669 -1.32419,-1.09079 -2.16284,-1.12668 -0.79156,-0.0339 -1.5919,0.27667 -2.15345,0.83557"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 688.35038,105.60417 c -0.34291,-0.021 -0.67986,-0.13271 -0.96738,-0.32076 -0.28752,-0.18805 -0.52493,-0.45197 -0.68159,-0.75773 -0.22615,-0.44137 -0.27968,-0.95782 -0.21342,-1.4493 0.0663,-0.49149 0.24657,-0.96132 0.46711,-1.40551 0.52564,-1.05872 1.29001,-1.998073 2.21979,-2.727944"
+ id="path164012"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163615"
+ inkscape:original-d="m 688.35038,105.60417 c -1.12021,-0.88842 -0.5494,-0.14827 -1.64897,-1.07849 -1.09958,-0.9302 0.19053,-1.75544 0.25369,-2.85481 0.0632,-1.09938 1.77608,-2.516734 2.21979,-2.727944"
+ sodipodi:nodetypes="cccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="ccccc"
+ inkscape:original-d="m 678.17919,117.2974 0.0935,1.83339 c -0.0114,0.94605 -1.57444,3.19538 -2.23006,3.5441 -0.65561,0.34873 -1.64744,1.19432 -2.23963,2.09058 -0.5922,0.89625 -2.53928,0.86565 -3.63886,-0.70198"
+ inkscape:path-effect="#path-effect163619"
+ inkscape:connector-curvature="0"
+ id="path164014"
+ d="m 678.17919,117.2974 0.0935,1.83339 c 0.019,0.37335 0.03,0.75093 -0.0438,1.11741 -0.0738,0.36648 -0.23099,0.71566 -0.4563,1.01397 -0.22531,0.29831 -0.51338,0.54373 -0.81663,0.76235 -0.30325,0.21862 -0.62323,0.41457 -0.91332,0.65037 -0.3976,0.3232 -0.73099,0.71664 -1.07802,1.09362 -0.34704,0.37698 -0.71589,0.74422 -1.16161,0.99696 -0.58472,0.33156 -1.28881,0.44708 -1.94883,0.31976 -0.66001,-0.12733 -1.2706,-0.49647 -1.69003,-1.02174"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cszsccc"
+ inkscape:connector-curvature="0"
+ d="m 680.97887,116.32503 c -0.20725,-0.54651 -0.20725,-1.63953 -0.82897,-1.63953 -0.62171,0 -1.19648,-0.40988 -1.808,-0.40988 -0.61152,0 -1.18629,0.40988 -1.80801,0.40988 -0.62172,0 -0.62172,1.09302 -0.82896,1.63953 -0.20725,0.54651 2.63697,2.32265 2.63697,2.32265 0,0 2.84421,-1.77614 2.63697,-2.32265 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path164016"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164018"
+ cx="670.97522"
+ cy="112.94144"
+ rx="2.5586886"
+ ry="2.6315403"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ ry="1.3111838"
+ rx="1.2582382"
+ cy="112.2009"
+ cx="670.12122"
+ id="ellipse164020"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 668.68054,109.52895 c 0.51006,-0.66669 1.32419,-1.09079 2.16284,-1.12668 0.79156,-0.0339 1.5919,0.27667 2.15345,0.83557"
+ id="path164022"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163629"
+ inkscape:original-d="m 668.68054,109.52895 c 0.77869,-0.31413 1.37767,-0.81293 2.16284,-1.12668 0.78518,-0.31375 1.50033,0.61846 2.15345,0.83557"
+ sodipodi:nodetypes="csc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cccc"
+ inkscape:original-d="m 667.89668,105.60417 c 1.12021,-0.88842 0.5494,-0.14827 1.64897,-1.07849 1.09958,-0.9302 -0.19053,-1.75544 -0.25369,-2.85481 -0.0632,-1.09938 -1.77608,-2.516734 -2.21979,-2.727944"
+ inkscape:path-effect="#path-effect163633"
+ inkscape:connector-curvature="0"
+ id="path164024"
+ d="m 667.89668,105.60417 c 0.34291,-0.021 0.67986,-0.13271 0.96738,-0.32076 0.28752,-0.18805 0.52493,-0.45197 0.68159,-0.75773 0.22615,-0.44137 0.27968,-0.95782 0.21342,-1.4493 -0.0663,-0.49149 -0.24657,-0.96132 -0.46711,-1.40551 -0.52564,-1.05872 -1.29001,-1.998073 -2.21979,-2.727944"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path164026"
+ d="m 678.45158,127.93405 v 3.63912"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cscccc"
+ inkscape:connector-curvature="0"
+ id="path164028"
+ d="m 671.55002,100.46623 c 0,0 -2.51266,1.87089 -3.38243,2.15152 -0.86976,0.28063 -4.44548,0.74835 -4.44548,0.74835 l 0.0966,2.80633 c 0,0 1.54625,0.28064 3.28579,-0.0935 1.73954,-0.37418 14.20621,-0.46772 14.20621,-0.46772"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 700.13359,107.36883 -6.8301,1.80907"
+ id="path164030"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="czc"
+ inkscape:connector-curvature="0"
+ d="m 682.85977,124.87214 c -2.21493,1.35318 -4.81675,1.26558 -4.4536,1.26558 0.36315,0 -2.23867,0.0876 -4.4536,-1.26558"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path164032"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164034"
+ d="m 690.79253,127.7282 c -1.45053,0.14024 -2.01604,-0.91633 -2.5656,-1.36858 -1.09913,-0.90452 -0.1113,-1.91988 -0.23409,-2.39785"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="csc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164036"
+ d="m 660.77576,124.86583 4.67858,-3.21224"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164038"
+ cx="686.58099"
+ cy="112.2009"
+ rx="1.2582382"
+ ry="1.3111838"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:original-d="m 677.59167,116.05505 c -0.13203,0.41866 -0.35251,0.83758 -0.52917,1.25677"
+ inkscape:path-effect="#path-effect163651"
+ inkscape:connector-curvature="0"
+ id="path164040"
+ d="m 677.59167,116.05505 -0.52917,1.25677"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 679.17915,116.05505 0.52917,1.25677"
+ id="path164042"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163655"
+ inkscape:original-d="m 679.17915,116.05505 c 0.13203,0.41866 0.35251,0.83758 0.52917,1.25677"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 695.61685,124.86583 -4.67858,-3.21224"
+ id="path164044"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 685.11528,100.46623 c 0,0 2.51266,1.87089 3.38243,2.15152 0.86976,0.28063 4.44548,0.74835 4.44548,0.74835 l -0.0966,2.80633 c 0,0 -1.54625,0.28064 -3.28579,-0.0935 -1.73954,-0.37418 -14.20621,-0.46772 -14.20621,-0.46772"
+ id="path164046"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cccccccc"
+ inkscape:connector-curvature="0"
+ id="path164048"
+ d="m 685.05575,107.26614 5.96227,-0.7956 -9.18023,-1.59078 -8.88926,0.0497 -6.9128,1.24282 3.69017,1.09388 7.66492,0.19883 z"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path164050"
+ d="m 676.81873,99.653466 -2.18282,-0.0661 -1.91823,-2.38125 0.0661,-2.910414 3.16158,-0.842507 c 1.69501,-0.584128 3.19616,-0.406767 4.77192,0 2.72967,0.795783 3.24708,0.842507 3.24708,0.842507 l 0.0661,2.910414 -1.91823,2.38125 -2.18282,0.0661"
+ style="fill:#fcda5d;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cccccccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ style="fill:#fde385;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 681.51475,97.860416 0.46611,-5.424686 -3.46306,-0.577161 -3.46269,0.577161 0.46575,5.424686 2.99695,3.704164 z"
+ id="path164052"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 678.57794,117.2974 -0.0935,1.83339 c -0.019,0.37335 -0.03,0.75093 0.0438,1.11741 0.0738,0.36648 0.23099,0.71566 0.4563,1.01397 0.22531,0.29831 0.51338,0.54373 0.81663,0.76235 0.30325,0.21862 0.62323,0.41457 0.91332,0.65037 0.3976,0.3232 0.73099,0.71664 1.07802,1.09362 0.34704,0.37698 0.71589,0.74422 1.16161,0.99696 0.58472,0.33156 1.28881,0.44708 1.94883,0.31976 0.66001,-0.12733 1.2706,-0.49647 1.69003,-1.02174"
+ id="path164000"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163597"
+ inkscape:original-d="m 678.57794,117.2974 -0.0935,1.83339 c 0.0114,0.94605 1.57444,3.19538 2.23006,3.5441 0.65561,0.34873 1.64744,1.19432 2.23963,2.09058 0.5922,0.89625 2.53928,0.86565 3.63886,-0.70198"
+ sodipodi:nodetypes="ccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ </g>
+ <rect
+ transform="scale(1,-1)"
+ ry="0.2606394"
+ y="-173.33992"
+ x="620.00122"
+ height="119.89413"
+ width="266.43546"
+ id="rect5042"
+ style="opacity:1;fill:url(#linearGradient5243);fill-opacity:1;stroke:#dedede;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ inkscape:export-xdpi="45.436005"
+ inkscape:export-ydpi="45.436005" />
+ <text
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:37.59195328px;line-height:22.55517006px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3c6eb4;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="711.80151"
+ y="124.85424"
+ id="text872"><tspan
+ sodipodi:role="line"
+ id="tspan870"
+ x="711.80151"
+ y="124.85424"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:Montserrat;fill:#3c6eb4;fill-opacity:1;stroke-width:0.26458332px"
+ dx="0 0 0 0"><tspan
+ id="tspan866"
+ style="fill:#ffffff;stroke-width:0.26458332px">build</tspan><tspan
+ style="fill:#ffcc00"
+ id="tspan4805">ah</tspan></tspan></text>
+ <rect
+ y="-18.590355"
+ x="343.46146"
+ height="92.608887"
+ width="72.777496"
+ id="rect1518"
+ style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-miterlimit:4;stroke-dasharray:none" />
+ <path
+ sodipodi:nodetypes="ccccsccsccscccccccc"
+ inkscape:connector-curvature="0"
+ id="path1491"
+ d="m 380.68538,11.893462 c 0,0 9.26041,-3.3072896 12.30312,-3.7041696 3.04271,-0.39687 11.37709,2.2489596 11.37709,2.2489596 l 4.49791,7.40833 c 0,0 0.79375,11.24479 0.66146,12.7 -0.13229,1.45521 -2.51354,8.99584 -1.71979,10.45104 0.79375,1.45521 1.05833,11.2448 1.05833,11.2448 0,0 2.11667,3.30729 2.11667,4.10104 0,0.79375 -0.66146,2.57969 -1.98438,2.97656 -1.32291,0.39688 -3.83645,0 -3.83645,0 0,0 -1.7198,-2.3151 -0.92605,-5.22552 0.79375,-2.91042 0.92605,-5.29167 0.79376,-5.95313 -0.13229,-0.66145 -1.71979,-11.90625 -1.85209,-12.43541 -0.13229,-0.52917 -2.77812,-1.85208 -3.175,-2.38125 -0.39687,-0.52917 -5.68854,-5.15937 -5.68854,-5.15937 0,0 -1.5875,4.10104 -2.24896,4.49791 -0.66146,0.39688 -3.43958,2.38125 -3.43958,2.38125 0,0 -1.50186,3.01265 -2.53085,3.1062 -1.02899,0.0935 -2.89987,-0.65481 -2.89987,-0.65481 l -2.05798,-0.52091 -3.18051,-13.18975 0.28064,-7.95127 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cccccccccccscccccccccsccccccccccc"
+ style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 359.53003,20.313802 c -0.12279,0.47797 0.86504,1.49333 -0.23409,2.39785 -2.46316,1.66439 -2.97507,2.37084 -4.01053,5.98286 -0.20307,5.23646 0.50134,8.87274 1.78594,10.78177 2.05229,3.9521 -0.6819,3.2593 0.99219,6.54844 0.52916,0.66146 3.9026,8.53282 4.16719,9.19427 0.26458,0.66146 0.59531,2.57969 0.19843,3.10885 -0.39687,0.52917 -2.77812,4.63021 -2.77812,4.63021 0,0 -1.5875,1.05834 -1.32292,2.38125 0.26458,1.32292 0.92604,2.31511 0.92604,2.31511 h 3.96875 c 0,0 3.57188,-2.71198 3.57188,-3.24115 0,-0.52917 -0.66146,-3.30729 -0.66146,-3.30729 l -0.46301,-4.36563 c 2.01397,-13.42842 0.50081,-10.95336 -0.33074,-12.83229 l 0.52917,-1.98437 3.55411,1.5875 3.02494,-1.5875 0.52917,1.98437 c -0.83155,1.87893 -2.34471,-0.59613 -0.33074,12.83229 l -0.46301,4.36563 c 0,0 -0.66146,2.77812 -0.66146,3.30729 0,0.52917 3.57188,3.24115 3.57188,3.24115 h 3.96875 c 0,0 0.66146,-0.99219 0.92604,-2.31511 0.26458,-1.32291 -1.32292,-2.38125 -1.32292,-2.38125 0,0 -2.38125,-4.10104 -2.77812,-4.63021 -0.39688,-0.52916 -0.0661,-2.44739 0.19843,-3.10885 0.26459,-0.66145 3.63803,-8.53281 4.16719,-9.19427 1.67409,-3.28914 -1.0601,-2.59634 0.99219,-6.54844 1.2846,-1.90903 1.98901,-5.54531 1.78594,-10.78177 -1.03546,-3.61202 -1.54737,-4.31847 -4.01053,-5.98286 -1.09913,-0.90452 -0.1113,-1.91988 -0.23409,-2.39785"
+ id="path5177"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:#cccccc;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 368.73121,13.038502 c -0.34914,-0.002 -0.69863,0.044 -1.03586,0.14075 -2.10393,0.68594 -2.93218,1.30639 -2.55731,3.44971 0.43873,2.20092 2.18697,4.04843 3.96734,3.90808 1.78019,0.13991 3.5281,-1.70739 3.96677,-3.90808 0.37487,-2.14332 -0.45337,-2.76377 -2.5573,-3.44971 -0.33722,-0.0967 -0.68672,-0.14308 -1.03586,-0.14075 -0.12482,0.001 -0.24946,0.0116 -0.37361,0.0253 -0.12433,-0.014 -0.24916,-0.0242 -0.37417,-0.0253 z"
+ id="path5169"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 372.82385,10.328142 c 0.91986,0.949198 1.75512,1.980345 2.49266,3.07723 0.68351,1.016538 1.29935,2.144009 1.34344,3.36818 0.0249,0.691162 -0.135,1.381331 -0.0846,2.071101 0.0252,0.344885 0.10516,0.690663 0.27704,0.990724 0.17189,0.30006 0.44195,0.551707 0.77047,0.659655 0.22103,0.07263 0.46179,0.0784 0.68921,0.02928 0.22741,-0.04912 0.44164,-0.151933 0.6304,-0.287958 0.37751,-0.27205 0.64758,-0.668649 0.86173,-1.081763 0.52388,-1.010617 0.75221,-2.152021 0.83092,-3.287629 0.0787,-1.135607 0.0138,-2.275487 -0.0167,-3.413411 -0.0193,-0.721555 -0.0232,-1.455766 0.17755,-2.1491 0.23128,-0.7987447 0.71878,-1.4934069 1.15186,-2.2032813 0.43309,-0.7098743 0.82802,-1.4854539 0.83801,-2.3169483 0.007,-0.5747903 -0.1711,-1.1330804 -0.31075,-1.6906912 -0.13965,-0.5576108 -0.24144,-1.1492717 -0.0841,-1.7021388 0.10561,-0.3710012 0.32101,-0.6990447 0.51086,-1.03483 0.95093,-1.68187974 1.29524,-3.6863801 1.02955,-5.6001207 -0.26569,-1.9137406 -1.12956,-3.730166 -2.39332,-5.1916383 -0.0591,-0.068382 -0.11962,-0.1364931 -0.19144,-0.1913962 -0.0718,-0.054903 -0.15638,-0.096366 -0.24644,-0.1042628 -0.0833,-0.0073 -0.16782,0.014711 -0.24093,0.055315 -0.0731,0.040604 -0.13525,0.099152 -0.18648,0.1652524 -0.10245,0.1322011 -0.16121,0.2919586 -0.22941,0.4446733 -0.34883,0.7810518 -0.96388,1.4087409 -1.59109,1.9903954 -0.62722,0.5816545 -1.28716,1.144393 -1.76597,1.8532426 -0.51192,0.757874 -0.79212,1.6477797 -0.99797,2.5388806 -0.20585,0.8911008 -0.34453,1.79879041 -0.60719,2.67482837 -0.0931,0.3103316 -0.2015,0.61604527 -0.32481,0.91564 -0.83771,-0.13942617 -1.68628,-0.21351766 -2.53548,-0.22138 -0.92151,-0.008532 -1.84374,0.0609538 -2.75357,0.20747 0.83081,0.40873943 1.56474,1.01280813 2.12562,1.74951003 0.68638,0.9015429 1.10965,2.0009911 1.20503,3.13006 z"
+ id="path5171"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5036"
+ inkscape:original-d="m 372.82385,10.328142 c 0.99336,-0.4713196 1.43537,2.00637 2.49266,3.07723 0.52864,0.53543 0.96775,1.92236 1.34344,3.36818 0.37569,1.44582 0.68796,2.95051 0.96292,3.72148 0.54992,1.54193 1.74318,-1.25801 2.18134,-1.34044 0.9632,-0.18121 0.58141,-6.38011 0.81425,-6.70104 0.2328,-0.32092 0.13498,0.0561 0.17755,-2.1491 0.0426,-2.2051796 1.12284,-2.2079796 1.98987,-4.5202296 0.86703,-2.31224 -0.17616,-2.45687 -0.39481,-3.39283 -0.21864,-0.93597 0.34029,-0.69019 0.51086,-1.03483 0.17058,-0.34465 -1.3155,-7.194799 -1.36377,-10.791759 -0.0483,-3.5969404 -0.29221,-0.197401 -0.43788,-0.295659 -0.14566,-0.09827 -0.43817,0.4432 -0.65682,0.665241 -0.21864,0.222049 -2.23834,2.562126 -3.35706,3.843638 -1.11874,1.28151 -0.59065,3.543319 -1.60516,5.21370897 -1.0145,1.67040003 -0.26164,0.57279 -0.32481,0.91564 -1.47046,-1.47997 -2.08084,-0.28081 -2.53548,-0.22138 -0.45464,0.0595 -1.5473,-1.02086 -2.75357,0.20747 0.25395,0.23578003 1.36429,0.99982003 2.12562,1.74951003 0.76132,0.74967 1.58531,2.48745 1.20503,3.13006 -0.20459,1.02558 -1.79371,4.5552396 -0.37418,4.5551096 z"
+ sodipodi:nodetypes="csscscccscccccccscscc" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 369.32062,13.836522 -0.0935,1.83339 c -0.019,0.373351 -0.03,0.750935 0.0438,1.117412 0.0738,0.366477 0.23099,0.715653 0.4563,1.013964 0.22531,0.298311 0.51338,0.543732 0.81663,0.762351 0.30325,0.218619 0.62323,0.414571 0.91332,0.650373 0.3976,0.323198 0.73099,0.716638 1.07802,1.093618 0.34704,0.376981 0.71589,0.744223 1.16161,0.996962 0.58472,0.331557 1.28881,0.447081 1.94883,0.319756 0.66001,-0.127324 1.2706,-0.496466 1.69003,-1.021736"
+ id="path5173"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect5003"
+ inkscape:original-d="m 369.32062,13.836522 -0.0935,1.83339 c 0.0114,0.94605 1.57444,3.19538 2.23006,3.5441 0.65561,0.34873 1.64744,1.19432 2.23963,2.09058 0.5922,0.89625 2.53928,0.86565 3.63886,-0.70198"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cccc"
+ inkscape:connector-curvature="0"
+ id="path5175"
+ d="m 360.32702,-6.4851386 c 2.35234,-2.846752 3.63135,-2.197639 6.36102,-2.993422 1.69501,-0.5841284 3.19616,-0.406767 4.77192,0 2.72967,0.795783 3.49419,0.14667 5.84653,2.993422"
+ style="fill:none;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:original-d="m 365.34935,10.328142 c -0.99336,-0.4713196 -1.43537,2.00637 -2.49266,3.07723 -0.52864,0.53543 -0.96775,1.92236 -1.34344,3.36818 -0.37569,1.44582 -0.68796,2.95051 -0.96292,3.72148 -0.54992,1.54193 -1.74318,-1.25801 -2.18134,-1.34044 -0.9632,-0.18121 -0.58141,-6.38011 -0.81425,-6.70104 -0.2328,-0.32092 -0.13498,0.0561 -0.17755,-2.1491 -0.0426,-2.2051796 -1.12284,-2.2079796 -1.98987,-4.5202296 -0.86703,-2.31224 0.17616,-2.45687 0.39481,-3.39283 0.21864,-0.93597 -0.34029,-0.69019 -0.51086,-1.03483 -0.17058,-0.34465 1.3155,-7.194799 1.36377,-10.791759 0.0483,-3.5969404 0.29221,-0.197401 0.43788,-0.295659 0.14566,-0.09827 0.43817,0.4432 0.65682,0.665241 0.21864,0.222049 2.23834,2.562126 3.35706,3.843638 1.11874,1.28151 0.59065,3.543319 1.60516,5.21370897 1.0145,1.67040003 0.26164,0.57279 0.32481,0.91564 1.47046,-1.47997 2.08084,-0.28081 2.53548,-0.22138 0.45464,0.0595 1.5473,-1.02086 2.75357,0.20747 -0.25395,0.23578003 -1.36429,0.99982003 -2.12562,1.74951003 -0.76132,0.74967 -1.58531,2.48745 -1.20503,3.13006 0.20459,1.02558 1.79371,4.5552396 0.37418,4.5551096 z"
+ inkscape:path-effect="#path-effect4214"
+ inkscape:connector-curvature="0"
+ id="path5181"
+ d="m 365.34935,10.328142 c -0.91986,0.949198 -1.75512,1.980345 -2.49266,3.07723 -0.68351,1.016538 -1.29935,2.144009 -1.34344,3.36818 -0.0249,0.691162 0.135,1.381331 0.0846,2.071101 -0.0252,0.344885 -0.10516,0.690663 -0.27704,0.990724 -0.17189,0.30006 -0.44195,0.551707 -0.77047,0.659655 -0.22103,0.07263 -0.46179,0.0784 -0.68921,0.02928 -0.22741,-0.04912 -0.44164,-0.151933 -0.6304,-0.287958 -0.37751,-0.27205 -0.64758,-0.668649 -0.86173,-1.081763 -0.52388,-1.010617 -0.75221,-2.152021 -0.83092,-3.287629 -0.0787,-1.135607 -0.0138,-2.275487 0.0167,-3.413411 0.0193,-0.721555 0.0232,-1.455766 -0.17755,-2.1491 -0.23128,-0.7987447 -0.71878,-1.4934069 -1.15186,-2.2032813 -0.43309,-0.7098743 -0.82802,-1.4854539 -0.83801,-2.3169483 -0.007,-0.5747903 0.1711,-1.1330804 0.31075,-1.6906912 0.13965,-0.5576108 0.24144,-1.1492717 0.0841,-1.7021388 -0.10561,-0.3710012 -0.32101,-0.6990447 -0.51086,-1.03483 -0.95093,-1.68187974 -1.29524,-3.6863801 -1.02955,-5.6001207 0.26569,-1.9137406 1.12956,-3.730166 2.39332,-5.1916383 0.0591,-0.068382 0.11962,-0.1364931 0.19144,-0.1913962 0.0718,-0.054903 0.15638,-0.096366 0.24644,-0.1042628 0.0833,-0.0073 0.16782,0.014711 0.24093,0.055315 0.0731,0.040604 0.13525,0.099152 0.18648,0.1652524 0.10245,0.1322011 0.16121,0.2919586 0.22941,0.4446733 0.34883,0.7810518 0.96388,1.4087409 1.59109,1.9903954 0.62722,0.5816545 1.28716,1.144393 1.76597,1.8532426 0.51192,0.757874 0.79212,1.6477797 0.99797,2.5388806 0.20585,0.8911008 0.34453,1.79879041 0.60719,2.67482837 0.0931,0.3103316 0.2015,0.61604527 0.32481,0.91564 0.83771,-0.13942617 1.68628,-0.21351766 2.53548,-0.22138 0.92151,-0.008532 1.84374,0.0609538 2.75357,0.20747 -0.83081,0.40873943 -1.56474,1.01280813 -2.12562,1.74951003 -0.68638,0.9015429 -1.10965,2.0009911 -1.20503,3.13006 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <ellipse
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ ry="2.6315403"
+ rx="2.5586886"
+ cy="9.4805746"
+ cx="-376.54404"
+ id="ellipse5183"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ transform="scale(-1,1)" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="csc"
+ inkscape:original-d="m 378.83832,6.0680824 c -0.77869,-0.31413 -1.37767,-0.81293 -2.16284,-1.12668 -0.78518,-0.31375 -1.50033,0.61846 -2.15345,0.83557"
+ inkscape:path-effect="#path-effect627"
+ inkscape:connector-curvature="0"
+ id="path5185"
+ d="m 378.83832,6.0680824 c -0.51006,-0.6666855 -1.32419,-1.0907856 -2.16284,-1.12668 -0.79156,-0.033879 -1.5919,0.2766676 -2.15345,0.83557"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 379.09306,2.1433024 c -0.34291,-0.020986 -0.67986,-0.1327106 -0.96738,-0.3207592 -0.28752,-0.1880486 -0.52493,-0.4519755 -0.68159,-0.7577308 -0.22615,-0.44136532 -0.27968,-0.95782174 -0.21342,-1.44930386 0.0663,-0.49148211 0.24657,-0.96131134 0.46711,-1.40550614 0.52564,-1.0587252 1.29001,-1.9980759 2.21979,-2.727949"
+ id="path5187"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect4229"
+ inkscape:original-d="m 379.09306,2.1433024 c -1.12021,-0.88842 -0.5494,-0.14827 -1.64897,-1.07849 -1.09958,-0.93020003 0.19053,-1.75544003 0.25369,-2.85481 0.0632,-1.09939 1.77608,-2.516739 2.21979,-2.727949"
+ sodipodi:nodetypes="cccc" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="ccccc"
+ inkscape:original-d="m 368.92187,13.836522 0.0935,1.83339 c -0.0114,0.94605 -1.57444,3.19538 -2.23006,3.5441 -0.65561,0.34873 -1.64744,1.19432 -2.23963,2.09058 -0.5922,0.89625 -2.53928,0.86565 -3.63886,-0.70198"
+ inkscape:path-effect="#path-effect623"
+ inkscape:connector-curvature="0"
+ id="path5189"
+ d="m 368.92187,13.836522 0.0935,1.83339 c 0.019,0.373351 0.03,0.750935 -0.0438,1.117412 -0.0738,0.366477 -0.23099,0.715653 -0.4563,1.013964 -0.22531,0.298311 -0.51338,0.543732 -0.81663,0.762351 -0.30325,0.218619 -0.62323,0.414571 -0.91332,0.650373 -0.3976,0.323198 -0.73099,0.716638 -1.07802,1.093618 -0.34704,0.376981 -0.71589,0.744223 -1.16161,0.996962 -0.58472,0.331557 -1.28881,0.447081 -1.94883,0.319756 -0.66001,-0.127324 -1.2706,-0.496466 -1.69003,-1.021736"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cszsccc"
+ inkscape:connector-curvature="0"
+ d="m 371.72155,12.864152 c -0.20725,-0.5465 -0.20725,-1.63952 -0.82897,-1.63952 -0.62171,0 -1.19648,-0.40988 -1.808,-0.40988 -0.61152,0 -1.18629,0.40988 -1.80801,0.40988 -0.62172,0 -0.62172,1.09302 -0.82896,1.63952 -0.20725,0.54651 2.63697,2.32265 2.63697,2.32265 0,0 2.84421,-1.77614 2.63697,-2.32265 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path5191" />
+ <ellipse
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse5193"
+ cx="361.71756"
+ cy="9.4805746"
+ rx="2.5586886"
+ ry="2.6315403" />
+ <ellipse
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ ry="1.3111838"
+ rx="1.2582382"
+ cy="8.7400274"
+ cx="360.86356"
+ id="ellipse5195"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 359.42322,6.0680824 c 0.51006,-0.6666855 1.32419,-1.0907856 2.16284,-1.12668 0.79156,-0.033879 1.5919,0.2766676 2.15345,0.83557"
+ id="path5197"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect617"
+ inkscape:original-d="m 359.42322,6.0680824 c 0.77869,-0.31413 1.37767,-0.81293 2.16284,-1.12668 0.78518,-0.31375 1.50033,0.61846 2.15345,0.83557"
+ sodipodi:nodetypes="csc" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cccc"
+ inkscape:original-d="m 358.63936,2.1433024 c 1.12021,-0.88842 0.5494,-0.14827 1.64897,-1.07849 1.09958,-0.93020003 -0.19053,-1.75544003 -0.25369,-2.85481 -0.0632,-1.09939 -1.77608,-2.516739 -2.21979,-2.727949"
+ inkscape:path-effect="#path-effect4217"
+ inkscape:connector-curvature="0"
+ id="path5199"
+ d="m 358.63936,2.1433024 c 0.34291,-0.020986 0.67986,-0.1327106 0.96738,-0.3207592 0.28752,-0.1880486 0.52493,-0.4519755 0.68159,-0.7577308 0.22615,-0.44136532 0.27968,-0.95782174 0.21342,-1.44930386 -0.0663,-0.49148211 -0.24657,-0.96131134 -0.46711,-1.40550614 -0.52564,-1.0587252 -1.29001,-1.9980759 -2.21979,-2.727949"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path5201"
+ d="m 369.19426,25.002342 v 3.63911"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cccc"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffcc00;stroke-width:0.71251804;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 367.75159,-2.0301976 -1.94157,-8.1178874 c -1.60631,0.112464 -2.9533,0.6616554 -4.3916,1.2343824 l 3.82163,7.589445"
+ id="path5205"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cscccc"
+ inkscape:connector-curvature="0"
+ id="path5207"
+ d="m 362.2927,-2.9946476 c 0,0 -2.51266,1.8709 -3.38243,2.15152997 -0.86976,0.28063 -4.44548,0.74835 -4.44548,0.74835 l 0.0966,2.80633003 c 0,0 1.54625,0.28064 3.28579,-0.0935 1.73954,-0.37418 14.20621,-0.46772 14.20621,-0.46772"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="czc"
+ inkscape:connector-curvature="0"
+ d="m 373.60245,21.411262 c -2.21493,1.35318 -4.81675,1.26558 -4.4536,1.26558 0.36315,0 -2.23867,0.0876 -4.4536,-1.26558"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path5211" />
+ <ellipse
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse5221"
+ cx="377.32333"
+ cy="8.7400274"
+ rx="1.2582382"
+ ry="1.3111838" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ inkscape:original-d="m 368.33435,12.594172 c -0.13203,0.41866 -0.35251,0.83758 -0.52917,1.25677"
+ inkscape:path-effect="#path-effect605"
+ inkscape:connector-curvature="0"
+ id="path5223"
+ d="m 368.33435,12.594172 -0.52917,1.25677"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 369.92183,12.594172 0.52917,1.25677"
+ id="path5225"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect602"
+ inkscape:original-d="m 369.92183,12.594172 c 0.13203,0.41866 0.35251,0.83758 0.52917,1.25677" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path378"
+ d="m 370.35946,-2.0302276 1.94157,-8.1178574 c 1.60631,0.112464 2.9533,0.6616554 4.3916,1.2343824 l -3.82163,7.589415"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffcc00;stroke-width:0.71251804;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ sodipodi:nodetypes="cccc"
+ inkscape:export-xdpi="89.962868"
+ inkscape:export-ydpi="89.962868" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 375.85796,-2.9946476 c 0,0 2.51266,1.8709 3.38243,2.15152997 0.86976,0.28063 4.44548,0.74835 4.44548,0.74835 l -0.0966,2.80633003 c 0,0 -1.54625,0.28064 -3.28579,-0.0935 -1.73954,-0.37418 -14.20621,-0.46772 -14.20621,-0.46772"
+ id="path5213"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscccc" />
+ <path
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ sodipodi:nodetypes="cccccccc"
+ inkscape:connector-curvature="0"
+ id="path5215"
+ d="m 375.79843,3.8052724 5.96227,-0.7956 -9.18023,-1.59078 -8.88926,0.0497 -6.9128,1.24282 3.69017,1.09388 7.66492,0.19883 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path1471"
+ d="m 365.97886,41.973692 c 0,0 -1.46543,-1.12693 -2.39798,-2.4208 -0.93255,-1.29388 -1.73188,-3.506 -1.73188,-3.506"
+ style="fill:none;stroke:#000000;stroke-width:0.63273007;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1473"
+ d="m 359.84947,67.455972 v -3.175"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 363.02446,67.455972 v -3.175"
+ id="path1475"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1477"
+ d="m 361.43695,67.455972 v -3.175"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ sodipodi:nodetypes="ccc"
+ style="fill:none;stroke:#000000;stroke-width:0.63273007;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 372.33769,41.973692 c 0,0 1.46543,-1.12693 2.39798,-2.4208 0.93255,-1.29388 1.73188,-3.506 1.73188,-3.506"
+ id="path1481"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 378.46708,67.455972 v -3.175"
+ id="path1483"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1485"
+ d="m 375.29209,67.455972 v -3.175"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 376.8796,67.455972 v -3.175"
+ id="path1487"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1495"
+ d="m 388.61772,35.197632 2.05797,-6.64164"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1497"
+ d="m 409.15876,59.518482 v -3.175"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 405.98377,59.518482 v -3.175"
+ id="path1499"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1501"
+ d="m 407.57128,59.518482 v -3.175"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1503"
+ d="m 390.6757,33.794472 c 0,0 1.77735,5.23848 1.40317,6.26747 -0.37418,1.02899 -4.11595,11.88013 -4.11595,11.88013 l -2.5257,2.61924 -4.39658,-1.02899 -0.18709,-1.68379 1.49671,-2.89988 c 0,0 4.58367,-2.05797 4.49012,-2.61924 -0.0935,-0.56127 1.12254,-5.33203 1.02899,-5.70621 -0.0935,-0.37417 -1.12253,-2.61924 -1.12253,-2.61924"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1505"
+ d="m 384.78241,54.000052 1.12253,-3.3676"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 383.44345,53.751502 1.12253,-3.3676"
+ id="path1507"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1509"
+ d="m 382.10449,53.564422 1.12253,-3.3676"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1511"
+ d="m 393.94975,28.181812 c 0.93533,-2.45959 -0.15294,-5.15831 -1.53626,-7.21894 -2.38319,-3.57049 -4.98951,-6.98573 -7.63108,-10.3674 -0.0912,-0.20258 1.52407,-0.7810696 2.09269,-1.0749896 2.1941,-0.88433 4.50994,-1.85515 6.92341,-1.66132 3.42757,0.63548 6.79301,1.56299 10.16049,2.4556796 1.71498,2.46333 3.42996,4.92667 5.14494,7.39 0.0624,2.21388 0.12473,4.42777 0.18709,6.64165 -1.68106,-0.92273 -3.37009,-1.97587 -5.27833,-2.3319 -1.6155,0.0381 -3.29216,0.4486 -4.64012,1.36302 -0.7878,0.98011 -1.04746,2.28904 -1.55107,3.43066 -0.2896,0.73051 -0.60073,1.45863 -0.53695,2.25923 -0.0202,0.54667 -0.0405,1.09333 -0.0607,1.64 -1.09137,-0.8419 -2.18273,-1.68379 -3.2741,-2.52569 z"
+ style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1513"
+ d="m 397.22381,29.491432 c 0,0 1.68379,-7.29646 1.30962,-8.13836 -0.37418,-0.8419 -2.24507,-4.39658 -2.80633,-5.23848 -0.56127,-0.8419 -5.61266,-7.6706396 -5.61266,-7.6706396"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path1515"
+ d="m 408.72976,17.704842 c 0,0 -7.39,-2.15152 -8.13836,-1.49671 -0.74835,0.65481 -3.36759,-7.6706296 -3.36759,-7.6706296"
+ style="fill:none;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ sodipodi:nodetypes="cscccc"
+ inkscape:connector-curvature="0"
+ id="path1520"
+ d="m 403.14276,10.221302 c 0,0 6.5481,2.99342 4.30304,-2.9934196 -2.24506,-5.98684 -6.17393,-9.35444 -6.17393,-9.35444 4.77805,2.88445997 13.97147,10.52375 9.54152,16.6508896 -1.30961,1.49672 -1.87088,2.24507 -1.87088,2.24507 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <rect
+ inkscape:export-ydpi="45.436005"
+ inkscape:export-xdpi="45.436005"
+ style="opacity:1;fill:none;fill-opacity:1;stroke:none;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect5235"
+ width="266.43546"
+ height="119.89413"
+ x="620.00122"
+ y="-41.048244"
+ ry="0.2606394"
+ transform="scale(1,-1)" />
+ <g
+ transform="translate(-1.0583333,-129.49087)"
+ id="g163776" />
+ <text
+ inkscape:export-ydpi="89.962868"
+ inkscape:export-xdpi="89.962868"
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:37.59195328px;line-height:22.55517006px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3c6eb4;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="711.80151"
+ y="-129.14578"
+ id="text163986"><tspan
+ sodipodi:role="line"
+ id="tspan163984"
+ x="711.80151"
+ y="-129.14578"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:Montserrat;fill:#3c6eb4;fill-opacity:1;stroke-width:0.26458332px"
+ dx="0 0 0 0"><tspan
+ id="tspan163980"
+ style="fill:#000000;stroke-width:0.26458332px">build</tspan><tspan
+ style="fill:#ffcc00"
+ id="tspan163982">ah</tspan></tspan></text>
+ <g
+ transform="translate(-1.0583333,-118.90753)"
+ id="g164165">
+ <g
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ transform="translate(-118.1493,-10.074071)"
+ id="g164101"
+ style="fill:none;stroke:#000000"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694">
+ <path
+ inkscape:connector-curvature="0"
+ style="opacity:1;fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.0583334;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780617 21.38926,-0.0641 13.38485,16.701147 -4.69887,20.8897 z"
+ id="path164095" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccccc"
+ transform="matrix(0.26458333,0,0,0.26458333,738.81198,118.53055)"
+ inkscape:connector-curvature="0"
+ id="path164097"
+ d="m 185.87625,59.397934 c -0.86973,0.217518 -2.57155,-0.134058 -2.52494,1.185647 0.29798,1.534184 0.51469,3.25265 -0.0756,4.733265 -2.59999,1.640261 -6.20252,6.520936 -9.25204,6.909269 l 43.94309,20.562507 42.71289,-19.994542 c 1.02895,-0.723043 2.47728,0.557072 1.54478,0.673163 -3.35476,-0.649977 -5.60495,-3.545794 -7.89139,-5.779104 -1.32714,-1.221465 -1.85144,-3.043527 -1.61413,-4.807152 -0.0512,-1.079531 1.14073,-2.378109 0.11981,-3.2134 -1.25435,-0.06792 -2.90233,-0.67841 -3.75435,0.61017 -3.09923,3.191828 -7.98829,4.311297 -12.1138,2.887779 -1.55682,-0.854291 -3.06748,0.550296 -4.47291,1.067862 -6.87259,3.170321 -14.6714,4.278483 -21.96511,2.268758 -3.27701,-0.820909 -6.47851,-1.975772 -9.37028,-3.683317 -1.34887,-0.137072 -2.59577,0.774552 -3.96402,0.837618 -3.77142,0.600908 -7.75015,-0.913634 -10.36088,-3.74839 -0.24699,-0.267363 -0.5888,-0.503792 -0.96111,-0.510133 z m 32.50205,15.39649 c 1.43219,-0.301225 0.54304,1.655686 0.79688,2.500732 0.0233,4.281784 0.0465,8.563566 0.0697,12.845351 -1.3554,0.293932 -1.91251,-0.210418 -1.5934,-1.590991 v -13.77926 c 0.24226,0.0081 0.48452,0.01611 0.72678,0.02417 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
+ <path
+ id="path164099"
+ d="m 799.53748,117.44571 c -0.14618,0.005 -0.2976,0.0274 -0.42892,0.009 -2.03273,0.0428 -4.06359,0.0892 -6.0942,0.031 -0.28926,0.11428 -0.24836,0.50228 -0.35967,0.75138 -0.22736,0.98319 -0.0226,1.99923 0.0233,2.99155 0.07,0.84001 0.14093,1.67976 0.21136,2.51974 -1.27629,1.32719 -2.44789,2.78357 -3.30058,4.42195 -0.45703,0.87705 -0.62335,1.88496 -0.49712,2.86391 0.0203,0.83713 0.14089,1.79437 -0.44494,2.48357 -0.2045,0.17656 -0.15243,0.47737 0.0667,0.6134 0.61884,0.5474 1.48745,0.80041 2.29805,0.60306 0.99978,-0.16365 1.63821,-1.03284 2.31768,-1.6955 0.15864,-0.2272 0.63711,-0.45408 0.38551,-0.76998 -0.7675,-1.08459 -1.32972,-2.44507 -1.0697,-3.79098 0.12353,-0.6101 0.64173,-1.02068 1.16892,-1.28675 0.28556,-0.28377 -0.2066,-0.53663 -0.23823,-0.83509 0.0848,-0.56057 0.18974,-1.14181 0.44597,-1.65209 0.25811,-0.42031 0.80937,-0.32417 1.21078,-0.48731 0.63192,-0.22035 1.33566,-0.35672 1.98024,-0.10284 0.44879,0.16718 0.94179,0.19308 1.38079,0.37569 0.40792,0.36381 0.44788,0.95927 0.5917,1.45728 0.0844,0.26471 0.0795,0.56128 -0.12093,0.77463 -0.15859,0.17909 -0.12732,0.47628 0.12661,0.54311 0.63811,0.32204 1.10695,0.97435 1.09296,1.70481 0.0246,1.20969 -0.45512,2.37707 -1.16789,3.33726 -0.10802,0.32476 0.32009,0.49853 0.46974,0.73794 0.65863,0.70094 1.34012,1.50616 2.33267,1.70584 0.88352,0.181 1.86515,-0.13458 2.46135,-0.80718 0.0949,-0.27388 -0.28759,-0.40886 -0.36122,-0.65371 -0.42704,-0.7126 -0.29638,-1.55962 -0.2806,-2.34869 0.15195,-1.12447 -0.16703,-2.25995 -0.74207,-3.22358 -0.84528,-1.44611 -1.89414,-2.76858 -3.0608,-3.96823 0.10988,-1.60331 0.32998,-3.20245 0.33383,-4.81108 -0.019,-0.47827 -0.13757,-0.95613 -0.33435,-1.39061 -0.10996,-0.0918 -0.25069,-0.10579 -0.39687,-0.10129 z m -0.81339,15.38149 c -0.0464,-0.003 -0.0974,0.0351 -0.14366,0.0372 -0.54357,0.44869 -1.198,0.81023 -1.92236,0.80047 -0.71698,0.0539 -1.45907,-0.0997 -2.0345,-0.54881 -0.19522,-0.13527 -0.4874,-0.43793 -0.68678,-0.14831 -0.47322,0.53276 -0.98364,1.03938 -1.4764,1.54771 -0.15575,0.31923 0.32053,0.38287 0.51935,0.50023 1.12198,0.45684 2.32628,0.77692 3.54397,0.77773 1.30395,-0.0584 2.61944,-0.35107 3.78271,-0.9524 0.27683,-0.2308 -0.0777,-0.51977 -0.23306,-0.70125 -0.43157,-0.4316 -0.83203,-0.91444 -1.30431,-1.29242 -0.0141,-0.0134 -0.0295,-0.019 -0.045,-0.0202 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
+ inkscape:connector-curvature="0" />
+ </g>
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:connector-curvature="0"
+ id="path164103"
+ d="m 669.39724,96.367584 1.02899,-1.957235 15.43482,-0.460526 1.59025,2.993416 -2.61924,6.332231 -5.42557,0.80592 -3.92887,0.34539 -4.20949,-2.76315 z"
+ style="fill:#fde385;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cccccc"
+ inkscape:connector-curvature="0"
+ id="path164105"
+ d="m 668.72873,96.705238 c 1.29471,-1.620631 2.29404,-2.396806 3.34528,-2.88108 l 0.51322,0.669933 m 14.83217,1.608651 c -1.2947,-1.620635 -2.15245,-2.246185 -3.06212,-2.579834 l -0.52526,0.706599"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:connector-curvature="0"
+ id="path164107"
+ d="m 677.98853,116.49938 c -0.34914,-0.002 -0.69863,0.044 -1.03586,0.14075 -2.10393,0.68594 -2.93218,1.30639 -2.55731,3.44971 0.43873,2.20092 2.18697,4.04843 3.96734,3.90808 1.78019,0.13991 3.5281,-1.70739 3.96677,-3.90808 0.37487,-2.14332 -0.45337,-2.76377 -2.5573,-3.44971 -0.33722,-0.0967 -0.68672,-0.14308 -1.03586,-0.14075 -0.12482,0.001 -0.24946,0.0116 -0.37361,0.0253 -0.12433,-0.014 -0.24916,-0.0242 -0.37417,-0.0253 z"
+ style="fill:#cccccc;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:original-d="m 682.08117,113.78901 c 0.99336,-0.47132 1.43537,2.00637 2.49266,3.07724 0.52864,0.53542 0.96775,1.92235 1.34344,3.36817 0.37569,1.44583 0.68796,2.95052 0.96292,3.72149 0.54992,1.54193 1.74318,-1.25801 2.18134,-1.34044 0.9632,-0.18121 0.58141,-6.38011 0.81425,-6.70104 0.2328,-0.32093 0.13498,0.0561 0.17755,-2.14911 0.0426,-2.20518 1.12284,-2.20798 1.98987,-4.52023 0.86703,-2.31224 -0.17616,-2.45687 -0.39481,-3.39283 -0.21864,-0.93597 0.34029,-0.69019 0.51086,-1.03483 0.17058,-0.34465 -1.3155,-7.194794 -1.36377,-10.791754 -0.0483,-3.596939 -0.29221,-0.197401 -0.43788,-0.295659 -0.14566,-0.09827 -0.43817,0.4432 -0.65682,0.665241 -0.21864,0.222049 -2.23834,2.562126 -3.35706,3.843638 -1.11874,1.28151 -0.59065,3.543314 -1.60516,5.213704 -1.0145,1.6704 -0.26164,0.57279 -0.32481,0.91564 -1.47046,-1.47997 -2.08084,-0.28081 -2.53548,-0.22138 -0.45464,0.0595 -1.5473,-1.02086 -2.75357,0.20747 0.25395,0.23578 1.36429,0.99982 2.12562,1.74951 0.76132,0.74967 1.58531,2.48745 1.20503,3.13006 -0.20459,1.02558 -1.79371,4.55524 -0.37418,4.55511 z"
+ inkscape:path-effect="#path-effect163593"
+ inkscape:connector-curvature="0"
+ id="path164109"
+ d="m 682.08117,113.78901 c 0.91986,0.9492 1.75512,1.98035 2.49266,3.07724 0.68351,1.01654 1.29935,2.144 1.34344,3.36817 0.0249,0.69116 -0.135,1.38133 -0.0846,2.07111 0.0252,0.34488 0.10516,0.69066 0.27705,0.99072 0.17189,0.30006 0.44194,0.55171 0.77047,0.65966 0.22103,0.0726 0.46179,0.0784 0.68921,0.0293 0.22741,-0.0491 0.44164,-0.15193 0.6304,-0.28796 0.37751,-0.27205 0.64758,-0.66865 0.86173,-1.08176 0.52388,-1.01062 0.75221,-2.15202 0.83092,-3.28763 0.0787,-1.13561 0.0138,-2.27549 -0.0167,-3.41341 -0.0193,-0.72156 -0.0232,-1.45577 0.17755,-2.14911 0.23128,-0.79874 0.71878,-1.49341 1.15186,-2.20328 0.43309,-0.70988 0.82802,-1.48546 0.83801,-2.31695 0.007,-0.57479 -0.1711,-1.13308 -0.31075,-1.69069 -0.13965,-0.55761 -0.24144,-1.14927 -0.0841,-1.70214 0.10561,-0.371 0.32101,-0.69904 0.51086,-1.03483 0.95093,-1.68188 1.29523,-3.68638 1.02955,-5.600118 -0.26569,-1.91374 -1.12956,-3.730164 -2.39332,-5.191636 -0.0591,-0.06838 -0.11962,-0.136493 -0.19144,-0.191396 -0.0718,-0.0549 -0.15638,-0.09637 -0.24644,-0.104263 -0.0833,-0.0073 -0.16782,0.01471 -0.24093,0.05531 -0.0731,0.0406 -0.13525,0.09915 -0.18648,0.165253 -0.10245,0.132201 -0.16121,0.291958 -0.22941,0.444673 -0.34883,0.781052 -0.96388,1.408741 -1.59109,1.990395 -0.62722,0.581655 -1.28716,1.144393 -1.76597,1.853243 -0.51192,0.757873 -0.79212,1.647778 -0.99797,2.538874 -0.20585,0.8911 -0.34453,1.79879 -0.60719,2.67483 -0.0931,0.31033 -0.2015,0.61605 -0.32481,0.91564 -0.83771,-0.13943 -1.68628,-0.21352 -2.53548,-0.22138 -0.92151,-0.009 -1.84374,0.0609 -2.75357,0.20747 0.83081,0.40874 1.56474,1.01281 2.12562,1.74951 0.68638,0.90154 1.10965,2.00099 1.20503,3.13006 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:connector-curvature="0"
+ id="path164111"
+ d="m 665.98766,127.54111 c 1.45053,0.14024 2.01604,-0.91633 2.5656,-1.36858 1.09913,-0.90452 0.1113,-1.91988 0.23409,-2.39785"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="csc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#fde385;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 656.79097,107.36883 6.8301,1.80907"
+ id="path164113"
+ inkscape:connector-curvature="0"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 674.60667,113.78901 c -0.91986,0.9492 -1.75512,1.98035 -2.49266,3.07724 -0.68351,1.01654 -1.29935,2.14401 -1.34344,3.36818 -0.0249,0.69116 0.135,1.38133 0.0846,2.0711 -0.0252,0.34489 -0.10516,0.69066 -0.27704,0.99072 -0.17189,0.30007 -0.44195,0.55171 -0.77047,0.65966 -0.22103,0.0726 -0.46179,0.0784 -0.68921,0.0293 -0.22741,-0.0491 -0.44164,-0.15193 -0.6304,-0.28796 -0.37751,-0.27205 -0.64758,-0.66865 -0.86173,-1.08176 -0.52388,-1.01062 -0.75221,-2.15202 -0.83092,-3.28763 -0.0787,-1.13561 -0.0138,-2.27549 0.0167,-3.41341 0.0193,-0.72156 0.0232,-1.45577 -0.17755,-2.14911 -0.23128,-0.79874 -0.71878,-1.49341 -1.15186,-2.20328 -0.43309,-0.70988 -0.82802,-1.48546 -0.83801,-2.31695 -0.007,-0.57479 0.1711,-1.13308 0.31075,-1.69069 0.13965,-0.55761 0.24144,-1.14927 0.0841,-1.70214 -0.10561,-0.371 -0.32101,-0.69904 -0.51086,-1.03483 -0.95093,-1.68188 -1.29523,-3.68638 -1.02955,-5.600118 0.26569,-1.91374 1.12956,-3.730164 2.39332,-5.191636 0.0591,-0.06838 0.11962,-0.136493 0.19144,-0.191396 0.0718,-0.0549 0.15638,-0.09637 0.24644,-0.104263 0.0833,-0.0073 0.16782,0.01471 0.24093,0.05531 0.0731,0.0406 0.13525,0.09915 0.18648,0.165253 0.10245,0.132201 0.16121,0.291958 0.22941,0.444673 0.34883,0.781052 0.96388,1.408741 1.59109,1.990395 0.62722,0.581655 1.28716,1.144393 1.76597,1.853243 0.51192,0.757873 0.79212,1.647778 0.99797,2.538874 0.20585,0.8911 0.34453,1.79879 0.60719,2.67483 0.093,0.31033 0.2015,0.61605 0.32481,0.91564 0.83771,-0.13943 1.68628,-0.21352 2.53548,-0.22138 0.92151,-0.009 1.84374,0.0609 2.75357,0.20747 -0.83081,0.40874 -1.56474,1.01281 -2.12562,1.74951 -0.68638,0.90154 -1.10965,2.00099 -1.20503,3.13006 z"
+ id="path164115"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163605"
+ inkscape:original-d="m 674.60667,113.78901 c -0.99336,-0.47132 -1.43537,2.00637 -2.49266,3.07724 -0.52864,0.53543 -0.96775,1.92236 -1.34344,3.36818 -0.37569,1.44582 -0.68796,2.95051 -0.96292,3.72148 -0.54992,1.54193 -1.74318,-1.25801 -2.18134,-1.34044 -0.9632,-0.18121 -0.58141,-6.38011 -0.81425,-6.70104 -0.2328,-0.32093 -0.13498,0.0561 -0.17755,-2.14911 -0.0426,-2.20518 -1.12284,-2.20798 -1.98987,-4.52023 -0.86703,-2.31224 0.17616,-2.45687 0.39481,-3.39283 0.21864,-0.93597 -0.34029,-0.69019 -0.51086,-1.03483 -0.17058,-0.34465 1.3155,-7.194794 1.36377,-10.791754 0.0483,-3.596939 0.29221,-0.197401 0.43788,-0.295659 0.14566,-0.09827 0.43817,0.4432 0.65682,0.665241 0.21864,0.222049 2.23834,2.562126 3.35706,3.843638 1.11874,1.28151 0.59065,3.543314 1.60516,5.213704 1.0145,1.6704 0.26164,0.57279 0.32481,0.91564 1.47046,-1.47997 2.08084,-0.28081 2.53548,-0.22138 0.45464,0.0595 1.5473,-1.02086 2.75357,0.20747 -0.25395,0.23578 -1.36429,0.99982 -2.12562,1.74951 -0.76132,0.74967 -1.58531,2.48745 -1.20503,3.13006 0.20459,1.02558 1.79371,4.55524 0.37418,4.55511 z"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <ellipse
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ transform="scale(-1,1)"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164117"
+ cx="-685.8017"
+ cy="112.94144"
+ rx="2.5586886"
+ ry="2.6315403"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 688.09564,109.52895 c -0.51006,-0.66669 -1.32419,-1.09079 -2.16284,-1.12668 -0.79156,-0.0339 -1.5919,0.27667 -2.15345,0.83557"
+ id="path164119"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163611"
+ inkscape:original-d="m 688.09564,109.52895 c -0.77869,-0.31413 -1.37767,-0.81293 -2.16284,-1.12668 -0.78518,-0.31375 -1.50033,0.61846 -2.15345,0.83557"
+ sodipodi:nodetypes="csc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="cccc"
+ inkscape:original-d="m 688.35038,105.60417 c -1.12021,-0.88842 -0.5494,-0.14827 -1.64897,-1.07849 -1.09958,-0.9302 0.19053,-1.75544 0.25369,-2.85481 0.0632,-1.09938 1.77608,-2.516734 2.21979,-2.727944"
+ inkscape:path-effect="#path-effect163615"
+ inkscape:connector-curvature="0"
+ id="path164121"
+ d="m 688.35038,105.60417 c -0.34291,-0.021 -0.67986,-0.13271 -0.96738,-0.32076 -0.28752,-0.18805 -0.52493,-0.45197 -0.68159,-0.75773 -0.22615,-0.44137 -0.27968,-0.95782 -0.21342,-1.4493 0.0663,-0.49149 0.24657,-0.96132 0.46711,-1.40551 0.52564,-1.05872 1.29001,-1.998073 2.21979,-2.727944"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 678.17919,117.2974 0.0935,1.83339 c 0.019,0.37335 0.03,0.75093 -0.0438,1.11741 -0.0738,0.36648 -0.23099,0.71566 -0.4563,1.01397 -0.22531,0.29831 -0.51338,0.54373 -0.81663,0.76235 -0.30325,0.21862 -0.62323,0.41457 -0.91332,0.65037 -0.3976,0.3232 -0.73099,0.71664 -1.07802,1.09362 -0.34704,0.37698 -0.71589,0.74422 -1.16161,0.99696 -0.58472,0.33156 -1.28881,0.44708 -1.94883,0.31976 -0.66001,-0.12733 -1.2706,-0.49647 -1.69003,-1.02174"
+ id="path164123"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163619"
+ inkscape:original-d="m 678.17919,117.2974 0.0935,1.83339 c -0.0114,0.94605 -1.57444,3.19538 -2.23006,3.5441 -0.65561,0.34873 -1.64744,1.19432 -2.23963,2.09058 -0.5922,0.89625 -2.53928,0.86565 -3.63886,-0.70198"
+ sodipodi:nodetypes="ccccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ id="path164125"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 680.97887,116.32503 c -0.20725,-0.54651 -0.20725,-1.63953 -0.82897,-1.63953 -0.62171,0 -1.19648,-0.40988 -1.808,-0.40988 -0.61152,0 -1.18629,0.40988 -1.80801,0.40988 -0.62172,0 -0.62172,1.09302 -0.82896,1.63953 -0.20725,0.54651 2.63697,2.32265 2.63697,2.32265 0,0 2.84421,-1.77614 2.63697,-2.32265 z"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cszsccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <ellipse
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ ry="2.6315403"
+ rx="2.5586886"
+ cy="112.94144"
+ cx="670.97522"
+ id="ellipse164127"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <ellipse
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164129"
+ cx="670.12122"
+ cy="112.2009"
+ rx="1.2582382"
+ ry="1.3111838"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="csc"
+ inkscape:original-d="m 668.68054,109.52895 c 0.77869,-0.31413 1.37767,-0.81293 2.16284,-1.12668 0.78518,-0.31375 1.50033,0.61846 2.15345,0.83557"
+ inkscape:path-effect="#path-effect163629"
+ inkscape:connector-curvature="0"
+ id="path164131"
+ d="m 668.68054,109.52895 c 0.51006,-0.66669 1.32419,-1.09079 2.16284,-1.12668 0.79156,-0.0339 1.5919,0.27667 2.15345,0.83557"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 667.89668,105.60417 c 0.34291,-0.021 0.67986,-0.13271 0.96738,-0.32076 0.28752,-0.18805 0.52493,-0.45197 0.68159,-0.75773 0.22615,-0.44137 0.27968,-0.95782 0.21342,-1.4493 -0.0663,-0.49149 -0.24657,-0.96132 -0.46711,-1.40551 -0.52564,-1.05872 -1.29001,-1.998073 -2.21979,-2.727944"
+ id="path164133"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163633"
+ inkscape:original-d="m 667.89668,105.60417 c 1.12021,-0.88842 0.5494,-0.14827 1.64897,-1.07849 1.09958,-0.9302 -0.19053,-1.75544 -0.25369,-2.85481 -0.0632,-1.09938 -1.77608,-2.516734 -2.21979,-2.727944"
+ sodipodi:nodetypes="cccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 678.45158,127.93405 v 3.63912"
+ id="path164135"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 671.55002,100.46623 c 0,0 -2.51266,1.87089 -3.38243,2.15152 -0.86976,0.28063 -4.44548,0.74835 -4.44548,0.74835 l 0.0966,2.80633 c 0,0 1.54625,0.28064 3.28579,-0.0935 1.73954,-0.37418 14.20621,-0.46772 14.20621,-0.46772"
+ id="path164137"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:connector-curvature="0"
+ id="path164139"
+ d="m 700.13359,107.36883 -6.8301,1.80907"
+ style="fill:none;fill-opacity:1;stroke:#fde385;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ id="path164141"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 682.85977,124.87214 c -2.21493,1.35318 -4.81675,1.26558 -4.4536,1.26558 0.36315,0 -2.23867,0.0876 -4.4536,-1.26558"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="czc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="csc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 690.79253,127.7282 c -1.45053,0.14024 -2.01604,-0.91633 -2.5656,-1.36858 -1.09913,-0.90452 -0.1113,-1.91988 -0.23409,-2.39785"
+ id="path164143"
+ inkscape:connector-curvature="0"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#fde385;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 660.77576,124.86583 4.67858,-3.21224"
+ id="path164145"
+ inkscape:connector-curvature="0"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <ellipse
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ ry="1.3111838"
+ rx="1.2582382"
+ cy="112.2009"
+ cx="686.58099"
+ id="ellipse164147"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 677.59167,116.05505 -0.52917,1.25677"
+ id="path164149"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163651"
+ inkscape:original-d="m 677.59167,116.05505 c -0.13203,0.41866 -0.35251,0.83758 -0.52917,1.25677"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:original-d="m 679.17915,116.05505 c 0.13203,0.41866 0.35251,0.83758 0.52917,1.25677"
+ inkscape:path-effect="#path-effect163655"
+ inkscape:connector-curvature="0"
+ id="path164151"
+ d="m 679.17915,116.05505 0.52917,1.25677"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:connector-curvature="0"
+ id="path164153"
+ d="m 695.61685,124.86583 -4.67858,-3.21224"
+ style="fill:none;fill-opacity:1;stroke:#fde385;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="cscccc"
+ inkscape:connector-curvature="0"
+ id="path164155"
+ d="m 685.11528,100.46623 c 0,0 2.51266,1.87089 3.38243,2.15152 0.86976,0.28063 4.44548,0.74835 4.44548,0.74835 l -0.0966,2.80633 c 0,0 -1.54625,0.28064 -3.28579,-0.0935 -1.73954,-0.37418 -14.20621,-0.46772 -14.20621,-0.46772"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 685.05575,107.26614 5.96227,-0.7956 -9.18023,-1.59078 -8.88926,0.0497 -6.9128,1.24282 3.69017,1.09388 7.66492,0.19883 z"
+ id="path164157"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="cccccccccc"
+ style="fill:#fcda5d;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 676.81873,99.653466 -2.18282,-0.0661 -1.91823,-2.38125 0.0661,-2.910414 3.16158,-0.842507 c 1.69501,-0.584128 3.19616,-0.406767 4.77192,0 2.72967,0.795783 3.24708,0.842507 3.24708,0.842507 l 0.0661,2.910414 -1.91823,2.38125 -2.18282,0.0661"
+ id="path164159"
+ inkscape:connector-curvature="0" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="ccccccc"
+ inkscape:connector-curvature="0"
+ id="path164161"
+ d="m 681.51475,97.860416 0.46611,-5.424686 -3.46306,-0.577161 -3.46269,0.577161 0.46575,5.424686 2.99695,3.704164 z"
+ style="fill:#fde385;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+ <path
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ sodipodi:nodetypes="ccccc"
+ inkscape:original-d="m 678.57794,117.2974 -0.0935,1.83339 c 0.0114,0.94605 1.57444,3.19538 2.23006,3.5441 0.65561,0.34873 1.64744,1.19432 2.23963,2.09058 0.5922,0.89625 2.53928,0.86565 3.63886,-0.70198"
+ inkscape:path-effect="#path-effect163597"
+ inkscape:connector-curvature="0"
+ id="path164163"
+ d="m 678.57794,117.2974 -0.0935,1.83339 c -0.019,0.37335 -0.03,0.75093 0.0438,1.11741 0.0738,0.36648 0.23099,0.71566 0.4563,1.01397 0.22531,0.29831 0.51338,0.54373 0.81663,0.76235 0.30325,0.21862 0.62323,0.41457 0.91332,0.65037 0.3976,0.3232 0.73099,0.71664 1.07802,1.09362 0.34704,0.37698 0.71589,0.74422 1.16161,0.99696 0.58472,0.33156 1.28881,0.44708 1.94883,0.31976 0.66001,-0.12733 1.2706,-0.49647 1.69003,-1.02174"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ </g>
+ <text
+ id="text164173"
+ y="3.1458828"
+ x="711.80151"
+ style="font-style:normal;font-weight:normal;font-size:37.59195328px;line-height:22.55517006px;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3c6eb4;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"
+ inkscape:export-xdpi="89.962868"
+ inkscape:export-ydpi="89.962868"><tspan
+ dx="0 0 0 0"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Montserrat;-inkscape-font-specification:Montserrat;fill:#3c6eb4;fill-opacity:1;stroke-width:0.26458332px"
+ y="3.1458828"
+ x="711.80151"
+ id="tspan164171"
+ sodipodi:role="line"><tspan
+ style="fill:#000000;stroke-width:0.26458332px"
+ id="tspan164167">build</tspan><tspan
+ id="tspan164169"
+ style="fill:#ffcc00">ah</tspan></tspan></text>
+ <g
+ id="g164245"
+ transform="translate(-1.0583333,2.8008191)">
+ <g
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;stroke:#000000"
+ id="g164181"
+ transform="translate(-118.1493,-10.074071)"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png">
+ <path
+ id="path164175"
+ d="m 796.57913,145.63255 -19.29817,-9.23285 -4.82036,-20.8616 13.2871,-16.780617 21.38926,-0.0641 13.38485,16.701147 -4.69887,20.8897 z"
+ style="opacity:1;fill:#c9c8c6;fill-opacity:1;stroke:#ffffff;stroke-width:1.0583334;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
+ d="m 185.87625,59.397934 c -0.86973,0.217518 -2.57155,-0.134058 -2.52494,1.185647 0.29798,1.534184 0.51469,3.25265 -0.0756,4.733265 -2.59999,1.640261 -6.20252,6.520936 -9.25204,6.909269 l 43.94309,20.562507 42.71289,-19.994542 c 1.02895,-0.723043 2.47728,0.557072 1.54478,0.673163 -3.35476,-0.649977 -5.60495,-3.545794 -7.89139,-5.779104 -1.32714,-1.221465 -1.85144,-3.043527 -1.61413,-4.807152 -0.0512,-1.079531 1.14073,-2.378109 0.11981,-3.2134 -1.25435,-0.06792 -2.90233,-0.67841 -3.75435,0.61017 -3.09923,3.191828 -7.98829,4.311297 -12.1138,2.887779 -1.55682,-0.854291 -3.06748,0.550296 -4.47291,1.067862 -6.87259,3.170321 -14.6714,4.278483 -21.96511,2.268758 -3.27701,-0.820909 -6.47851,-1.975772 -9.37028,-3.683317 -1.34887,-0.137072 -2.59577,0.774552 -3.96402,0.837618 -3.77142,0.600908 -7.75015,-0.913634 -10.36088,-3.74839 -0.24699,-0.267363 -0.5888,-0.503792 -0.96111,-0.510133 z m 32.50205,15.39649 c 1.43219,-0.301225 0.54304,1.655686 0.79688,2.500732 0.0233,4.281784 0.0465,8.563566 0.0697,12.845351 -1.3554,0.293932 -1.91251,-0.210418 -1.5934,-1.590991 v -13.77926 c 0.24226,0.0081 0.48452,0.01611 0.72678,0.02417 z"
+ id="path164177"
+ inkscape:connector-curvature="0"
+ transform="matrix(0.26458333,0,0,0.26458333,738.81198,118.53055)"
+ sodipodi:nodetypes="cccccccccccccccccccccccccc" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
+ d="m 799.53748,117.44571 c -0.14618,0.005 -0.2976,0.0274 -0.42892,0.009 -2.03273,0.0428 -4.06359,0.0892 -6.0942,0.031 -0.28926,0.11428 -0.24836,0.50228 -0.35967,0.75138 -0.22736,0.98319 -0.0226,1.99923 0.0233,2.99155 0.07,0.84001 0.14093,1.67976 0.21136,2.51974 -1.27629,1.32719 -2.44789,2.78357 -3.30058,4.42195 -0.45703,0.87705 -0.62335,1.88496 -0.49712,2.86391 0.0203,0.83713 0.14089,1.79437 -0.44494,2.48357 -0.2045,0.17656 -0.15243,0.47737 0.0667,0.6134 0.61884,0.5474 1.48745,0.80041 2.29805,0.60306 0.99978,-0.16365 1.63821,-1.03284 2.31768,-1.6955 0.15864,-0.2272 0.63711,-0.45408 0.38551,-0.76998 -0.7675,-1.08459 -1.32972,-2.44507 -1.0697,-3.79098 0.12353,-0.6101 0.64173,-1.02068 1.16892,-1.28675 0.28556,-0.28377 -0.2066,-0.53663 -0.23823,-0.83509 0.0848,-0.56057 0.18974,-1.14181 0.44597,-1.65209 0.25811,-0.42031 0.80937,-0.32417 1.21078,-0.48731 0.63192,-0.22035 1.33566,-0.35672 1.98024,-0.10284 0.44879,0.16718 0.94179,0.19308 1.38079,0.37569 0.40792,0.36381 0.44788,0.95927 0.5917,1.45728 0.0844,0.26471 0.0795,0.56128 -0.12093,0.77463 -0.15859,0.17909 -0.12732,0.47628 0.12661,0.54311 0.63811,0.32204 1.10695,0.97435 1.09296,1.70481 0.0246,1.20969 -0.45512,2.37707 -1.16789,3.33726 -0.10802,0.32476 0.32009,0.49853 0.46974,0.73794 0.65863,0.70094 1.34012,1.50616 2.33267,1.70584 0.88352,0.181 1.86515,-0.13458 2.46135,-0.80718 0.0949,-0.27388 -0.28759,-0.40886 -0.36122,-0.65371 -0.42704,-0.7126 -0.29638,-1.55962 -0.2806,-2.34869 0.15195,-1.12447 -0.16703,-2.25995 -0.74207,-3.22358 -0.84528,-1.44611 -1.89414,-2.76858 -3.0608,-3.96823 0.10988,-1.60331 0.32998,-3.20245 0.33383,-4.81108 -0.019,-0.47827 -0.13757,-0.95613 -0.33435,-1.39061 -0.10996,-0.0918 -0.25069,-0.10579 -0.39687,-0.10129 z m -0.81339,15.38149 c -0.0464,-0.003 -0.0974,0.0351 -0.14366,0.0372 -0.54357,0.44869 -1.198,0.81023 -1.92236,0.80047 -0.71698,0.0539 -1.45907,-0.0997 -2.0345,-0.54881 -0.19522,-0.13527 -0.4874,-0.43793 -0.68678,-0.14831 -0.47322,0.53276 -0.98364,1.03938 -1.4764,1.54771 -0.15575,0.31923 0.32053,0.38287 0.51935,0.50023 1.12198,0.45684 2.32628,0.77692 3.54397,0.77773 1.30395,-0.0584 2.61944,-0.35107 3.78271,-0.9524 0.27683,-0.2308 -0.0777,-0.51977 -0.23306,-0.70125 -0.43157,-0.4316 -0.83203,-0.91444 -1.30431,-1.29242 -0.0141,-0.0134 -0.0295,-0.019 -0.045,-0.0202 z"
+ id="path164179" />
+ </g>
+ <path
+ style="fill:#fde385;fill-opacity:1;stroke:none;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 669.39724,96.367584 1.02899,-1.957235 15.43482,-0.460526 1.59025,2.993416 -2.61924,6.332231 -5.42557,0.80592 -3.92887,0.34539 -4.20949,-2.76315 z"
+ id="path164183"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 668.72873,96.705238 c 1.29471,-1.620631 2.29404,-2.396806 3.34528,-2.88108 l 0.51322,0.669933 m 14.83217,1.608651 c -1.2947,-1.620635 -2.15245,-2.246185 -3.06212,-2.579834 l -0.52526,0.706599"
+ id="path164185"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#cccccc;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 677.98853,116.49938 c -0.34914,-0.002 -0.69863,0.044 -1.03586,0.14075 -2.10393,0.68594 -2.93218,1.30639 -2.55731,3.44971 0.43873,2.20092 2.18697,4.04843 3.96734,3.90808 1.78019,0.13991 3.5281,-1.70739 3.96677,-3.90808 0.37487,-2.14332 -0.45337,-2.76377 -2.5573,-3.44971 -0.33722,-0.0967 -0.68672,-0.14308 -1.03586,-0.14075 -0.12482,0.001 -0.24946,0.0116 -0.37361,0.0253 -0.12433,-0.014 -0.24916,-0.0242 -0.37417,-0.0253 z"
+ id="path164187"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 682.08117,113.78901 c 0.91986,0.9492 1.75512,1.98035 2.49266,3.07724 0.68351,1.01654 1.29935,2.144 1.34344,3.36817 0.0249,0.69116 -0.135,1.38133 -0.0846,2.07111 0.0252,0.34488 0.10516,0.69066 0.27705,0.99072 0.17189,0.30006 0.44194,0.55171 0.77047,0.65966 0.22103,0.0726 0.46179,0.0784 0.68921,0.0293 0.22741,-0.0491 0.44164,-0.15193 0.6304,-0.28796 0.37751,-0.27205 0.64758,-0.66865 0.86173,-1.08176 0.52388,-1.01062 0.75221,-2.15202 0.83092,-3.28763 0.0787,-1.13561 0.0138,-2.27549 -0.0167,-3.41341 -0.0193,-0.72156 -0.0232,-1.45577 0.17755,-2.14911 0.23128,-0.79874 0.71878,-1.49341 1.15186,-2.20328 0.43309,-0.70988 0.82802,-1.48546 0.83801,-2.31695 0.007,-0.57479 -0.1711,-1.13308 -0.31075,-1.69069 -0.13965,-0.55761 -0.24144,-1.14927 -0.0841,-1.70214 0.10561,-0.371 0.32101,-0.69904 0.51086,-1.03483 0.95093,-1.68188 1.29523,-3.68638 1.02955,-5.600118 -0.26569,-1.91374 -1.12956,-3.730164 -2.39332,-5.191636 -0.0591,-0.06838 -0.11962,-0.136493 -0.19144,-0.191396 -0.0718,-0.0549 -0.15638,-0.09637 -0.24644,-0.104263 -0.0833,-0.0073 -0.16782,0.01471 -0.24093,0.05531 -0.0731,0.0406 -0.13525,0.09915 -0.18648,0.165253 -0.10245,0.132201 -0.16121,0.291958 -0.22941,0.444673 -0.34883,0.781052 -0.96388,1.408741 -1.59109,1.990395 -0.62722,0.581655 -1.28716,1.144393 -1.76597,1.853243 -0.51192,0.757873 -0.79212,1.647778 -0.99797,2.538874 -0.20585,0.8911 -0.34453,1.79879 -0.60719,2.67483 -0.0931,0.31033 -0.2015,0.61605 -0.32481,0.91564 -0.83771,-0.13943 -1.68628,-0.21352 -2.53548,-0.22138 -0.92151,-0.009 -1.84374,0.0609 -2.75357,0.20747 0.83081,0.40874 1.56474,1.01281 2.12562,1.74951 0.68638,0.90154 1.10965,2.00099 1.20503,3.13006 z"
+ id="path164189"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163593"
+ inkscape:original-d="m 682.08117,113.78901 c 0.99336,-0.47132 1.43537,2.00637 2.49266,3.07724 0.52864,0.53542 0.96775,1.92235 1.34344,3.36817 0.37569,1.44583 0.68796,2.95052 0.96292,3.72149 0.54992,1.54193 1.74318,-1.25801 2.18134,-1.34044 0.9632,-0.18121 0.58141,-6.38011 0.81425,-6.70104 0.2328,-0.32093 0.13498,0.0561 0.17755,-2.14911 0.0426,-2.20518 1.12284,-2.20798 1.98987,-4.52023 0.86703,-2.31224 -0.17616,-2.45687 -0.39481,-3.39283 -0.21864,-0.93597 0.34029,-0.69019 0.51086,-1.03483 0.17058,-0.34465 -1.3155,-7.194794 -1.36377,-10.791754 -0.0483,-3.596939 -0.29221,-0.197401 -0.43788,-0.295659 -0.14566,-0.09827 -0.43817,0.4432 -0.65682,0.665241 -0.21864,0.222049 -2.23834,2.562126 -3.35706,3.843638 -1.11874,1.28151 -0.59065,3.543314 -1.60516,5.213704 -1.0145,1.6704 -0.26164,0.57279 -0.32481,0.91564 -1.47046,-1.47997 -2.08084,-0.28081 -2.53548,-0.22138 -0.45464,0.0595 -1.5473,-1.02086 -2.75357,0.20747 0.25395,0.23578 1.36429,0.99982 2.12562,1.74951 0.76132,0.74967 1.58531,2.48745 1.20503,3.13006 -0.20459,1.02558 -1.79371,4.55524 -0.37418,4.55511 z"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 665.98766,127.54111 c 1.45053,0.14024 2.01604,-0.91633 2.5656,-1.36858 1.09913,-0.90452 0.1113,-1.91988 0.23409,-2.39785"
+ id="path164191"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164193"
+ d="m 656.79097,107.36883 6.8301,1.80907"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csscscccscccccccscscc"
+ inkscape:original-d="m 674.60667,113.78901 c -0.99336,-0.47132 -1.43537,2.00637 -2.49266,3.07724 -0.52864,0.53543 -0.96775,1.92236 -1.34344,3.36818 -0.37569,1.44582 -0.68796,2.95051 -0.96292,3.72148 -0.54992,1.54193 -1.74318,-1.25801 -2.18134,-1.34044 -0.9632,-0.18121 -0.58141,-6.38011 -0.81425,-6.70104 -0.2328,-0.32093 -0.13498,0.0561 -0.17755,-2.14911 -0.0426,-2.20518 -1.12284,-2.20798 -1.98987,-4.52023 -0.86703,-2.31224 0.17616,-2.45687 0.39481,-3.39283 0.21864,-0.93597 -0.34029,-0.69019 -0.51086,-1.03483 -0.17058,-0.34465 1.3155,-7.194794 1.36377,-10.791754 0.0483,-3.596939 0.29221,-0.197401 0.43788,-0.295659 0.14566,-0.09827 0.43817,0.4432 0.65682,0.665241 0.21864,0.222049 2.23834,2.562126 3.35706,3.843638 1.11874,1.28151 0.59065,3.543314 1.60516,5.213704 1.0145,1.6704 0.26164,0.57279 0.32481,0.91564 1.47046,-1.47997 2.08084,-0.28081 2.53548,-0.22138 0.45464,0.0595 1.5473,-1.02086 2.75357,0.20747 -0.25395,0.23578 -1.36429,0.99982 -2.12562,1.74951 -0.76132,0.74967 -1.58531,2.48745 -1.20503,3.13006 0.20459,1.02558 1.79371,4.55524 0.37418,4.55511 z"
+ inkscape:path-effect="#path-effect163605"
+ inkscape:connector-curvature="0"
+ id="path164195"
+ d="m 674.60667,113.78901 c -0.91986,0.9492 -1.75512,1.98035 -2.49266,3.07724 -0.68351,1.01654 -1.29935,2.14401 -1.34344,3.36818 -0.0249,0.69116 0.135,1.38133 0.0846,2.0711 -0.0252,0.34489 -0.10516,0.69066 -0.27704,0.99072 -0.17189,0.30007 -0.44195,0.55171 -0.77047,0.65966 -0.22103,0.0726 -0.46179,0.0784 -0.68921,0.0293 -0.22741,-0.0491 -0.44164,-0.15193 -0.6304,-0.28796 -0.37751,-0.27205 -0.64758,-0.66865 -0.86173,-1.08176 -0.52388,-1.01062 -0.75221,-2.15202 -0.83092,-3.28763 -0.0787,-1.13561 -0.0138,-2.27549 0.0167,-3.41341 0.0193,-0.72156 0.0232,-1.45577 -0.17755,-2.14911 -0.23128,-0.79874 -0.71878,-1.49341 -1.15186,-2.20328 -0.43309,-0.70988 -0.82802,-1.48546 -0.83801,-2.31695 -0.007,-0.57479 0.1711,-1.13308 0.31075,-1.69069 0.13965,-0.55761 0.24144,-1.14927 0.0841,-1.70214 -0.10561,-0.371 -0.32101,-0.69904 -0.51086,-1.03483 -0.95093,-1.68188 -1.29523,-3.68638 -1.02955,-5.600118 0.26569,-1.91374 1.12956,-3.730164 2.39332,-5.191636 0.0591,-0.06838 0.11962,-0.136493 0.19144,-0.191396 0.0718,-0.0549 0.15638,-0.09637 0.24644,-0.104263 0.0833,-0.0073 0.16782,0.01471 0.24093,0.05531 0.0731,0.0406 0.13525,0.09915 0.18648,0.165253 0.10245,0.132201 0.16121,0.291958 0.22941,0.444673 0.34883,0.781052 0.96388,1.408741 1.59109,1.990395 0.62722,0.581655 1.28716,1.144393 1.76597,1.853243 0.51192,0.757873 0.79212,1.647778 0.99797,2.538874 0.20585,0.8911 0.34453,1.79879 0.60719,2.67483 0.093,0.31033 0.2015,0.61605 0.32481,0.91564 0.83771,-0.13943 1.68628,-0.21352 2.53548,-0.22138 0.92151,-0.009 1.84374,0.0609 2.75357,0.20747 -0.83081,0.40874 -1.56474,1.01281 -2.12562,1.74951 -0.68638,0.90154 -1.10965,2.00099 -1.20503,3.13006 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ ry="2.6315403"
+ rx="2.5586886"
+ cy="112.94144"
+ cx="-685.8017"
+ id="ellipse164197"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ transform="scale(-1,1)"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="csc"
+ inkscape:original-d="m 688.09564,109.52895 c -0.77869,-0.31413 -1.37767,-0.81293 -2.16284,-1.12668 -0.78518,-0.31375 -1.50033,0.61846 -2.15345,0.83557"
+ inkscape:path-effect="#path-effect163611"
+ inkscape:connector-curvature="0"
+ id="path164199"
+ d="m 688.09564,109.52895 c -0.51006,-0.66669 -1.32419,-1.09079 -2.16284,-1.12668 -0.79156,-0.0339 -1.5919,0.27667 -2.15345,0.83557"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 688.35038,105.60417 c -0.34291,-0.021 -0.67986,-0.13271 -0.96738,-0.32076 -0.28752,-0.18805 -0.52493,-0.45197 -0.68159,-0.75773 -0.22615,-0.44137 -0.27968,-0.95782 -0.21342,-1.4493 0.0663,-0.49149 0.24657,-0.96132 0.46711,-1.40551 0.52564,-1.05872 1.29001,-1.998073 2.21979,-2.727944"
+ id="path164201"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163615"
+ inkscape:original-d="m 688.35038,105.60417 c -1.12021,-0.88842 -0.5494,-0.14827 -1.64897,-1.07849 -1.09958,-0.9302 0.19053,-1.75544 0.25369,-2.85481 0.0632,-1.09938 1.77608,-2.516734 2.21979,-2.727944"
+ sodipodi:nodetypes="cccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="ccccc"
+ inkscape:original-d="m 678.17919,117.2974 0.0935,1.83339 c -0.0114,0.94605 -1.57444,3.19538 -2.23006,3.5441 -0.65561,0.34873 -1.64744,1.19432 -2.23963,2.09058 -0.5922,0.89625 -2.53928,0.86565 -3.63886,-0.70198"
+ inkscape:path-effect="#path-effect163619"
+ inkscape:connector-curvature="0"
+ id="path164203"
+ d="m 678.17919,117.2974 0.0935,1.83339 c 0.019,0.37335 0.03,0.75093 -0.0438,1.11741 -0.0738,0.36648 -0.23099,0.71566 -0.4563,1.01397 -0.22531,0.29831 -0.51338,0.54373 -0.81663,0.76235 -0.30325,0.21862 -0.62323,0.41457 -0.91332,0.65037 -0.3976,0.3232 -0.73099,0.71664 -1.07802,1.09362 -0.34704,0.37698 -0.71589,0.74422 -1.16161,0.99696 -0.58472,0.33156 -1.28881,0.44708 -1.94883,0.31976 -0.66001,-0.12733 -1.2706,-0.49647 -1.69003,-1.02174"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cszsccc"
+ inkscape:connector-curvature="0"
+ d="m 680.97887,116.32503 c -0.20725,-0.54651 -0.20725,-1.63953 -0.82897,-1.63953 -0.62171,0 -1.19648,-0.40988 -1.808,-0.40988 -0.61152,0 -1.18629,0.40988 -1.80801,0.40988 -0.62172,0 -0.62172,1.09302 -0.82896,1.63953 -0.20725,0.54651 2.63697,2.32265 2.63697,2.32265 0,0 2.84421,-1.77614 2.63697,-2.32265 z"
+ style="fill:#808080;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path164205"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#000000;fill-opacity:1;stroke:#ffcc00;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164207"
+ cx="670.97522"
+ cy="112.94144"
+ rx="2.5586886"
+ ry="2.6315403"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ ry="1.3111838"
+ rx="1.2582382"
+ cy="112.2009"
+ cx="670.12122"
+ id="ellipse164209"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 668.68054,109.52895 c 0.51006,-0.66669 1.32419,-1.09079 2.16284,-1.12668 0.79156,-0.0339 1.5919,0.27667 2.15345,0.83557"
+ id="path164211"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163629"
+ inkscape:original-d="m 668.68054,109.52895 c 0.77869,-0.31413 1.37767,-0.81293 2.16284,-1.12668 0.78518,-0.31375 1.50033,0.61846 2.15345,0.83557"
+ sodipodi:nodetypes="csc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cccc"
+ inkscape:original-d="m 667.89668,105.60417 c 1.12021,-0.88842 0.5494,-0.14827 1.64897,-1.07849 1.09958,-0.9302 -0.19053,-1.75544 -0.25369,-2.85481 -0.0632,-1.09938 -1.77608,-2.516734 -2.21979,-2.727944"
+ inkscape:path-effect="#path-effect163633"
+ inkscape:connector-curvature="0"
+ id="path164213"
+ d="m 667.89668,105.60417 c 0.34291,-0.021 0.67986,-0.13271 0.96738,-0.32076 0.28752,-0.18805 0.52493,-0.45197 0.68159,-0.75773 0.22615,-0.44137 0.27968,-0.95782 0.21342,-1.4493 -0.0663,-0.49149 -0.24657,-0.96132 -0.46711,-1.40551 -0.52564,-1.05872 -1.29001,-1.998073 -2.21979,-2.727944"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path164215"
+ d="m 678.45158,127.93405 v 3.63912"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cscccc"
+ inkscape:connector-curvature="0"
+ id="path164217"
+ d="m 671.55002,100.46623 c 0,0 -2.51266,1.87089 -3.38243,2.15152 -0.86976,0.28063 -4.44548,0.74835 -4.44548,0.74835 l 0.0966,2.80633 c 0,0 1.54625,0.28064 3.28579,-0.0935 1.73954,-0.37418 14.20621,-0.46772 14.20621,-0.46772"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 700.13359,107.36883 -6.8301,1.80907"
+ id="path164219"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="czc"
+ inkscape:connector-curvature="0"
+ d="m 682.85977,124.87214 c -2.21493,1.35318 -4.81675,1.26558 -4.4536,1.26558 0.36315,0 -2.23867,0.0876 -4.4536,-1.26558"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path164221"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164223"
+ d="m 690.79253,127.7282 c -1.45053,0.14024 -2.01604,-0.91633 -2.5656,-1.36858 -1.09913,-0.90452 -0.1113,-1.91988 -0.23409,-2.39785"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="csc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:connector-curvature="0"
+ id="path164225"
+ d="m 660.77576,124.86583 4.67858,-3.21224"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <ellipse
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="ellipse164227"
+ cx="686.58099"
+ cy="112.2009"
+ rx="1.2582382"
+ ry="1.3111838"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ inkscape:original-d="m 677.59167,116.05505 c -0.13203,0.41866 -0.35251,0.83758 -0.52917,1.25677"
+ inkscape:path-effect="#path-effect163651"
+ inkscape:connector-curvature="0"
+ id="path164229"
+ d="m 677.59167,116.05505 -0.52917,1.25677"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 679.17915,116.05505 0.52917,1.25677"
+ id="path164231"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163655"
+ inkscape:original-d="m 679.17915,116.05505 c 0.13203,0.41866 0.35251,0.83758 0.52917,1.25677"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cc"
+ style="fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 695.61685,124.86583 -4.67858,-3.21224"
+ id="path164233"
+ inkscape:connector-curvature="0"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374993;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 685.11528,100.46623 c 0,0 2.51266,1.87089 3.38243,2.15152 0.86976,0.28063 4.44548,0.74835 4.44548,0.74835 l -0.0966,2.80633 c 0,0 -1.54625,0.28064 -3.28579,-0.0935 -1.73954,-0.37418 -14.20621,-0.46772 -14.20621,-0.46772"
+ id="path164235"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cscccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ sodipodi:nodetypes="cccccccc"
+ inkscape:connector-curvature="0"
+ id="path164237"
+ d="m 685.05575,107.26614 5.96227,-0.7956 -9.18023,-1.59078 -8.88926,0.0497 -6.9128,1.24282 3.69017,1.09388 7.66492,0.19883 z"
+ style="fill:#fde385;fill-opacity:1;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path164239"
+ d="m 676.81873,99.653466 -2.18282,-0.0661 -1.91823,-2.38125 0.0661,-2.910414 3.16158,-0.842507 c 1.69501,-0.584128 3.19616,-0.406767 4.77192,0 2.72967,0.795783 3.24708,0.842507 3.24708,0.842507 l 0.0661,2.910414 -1.91823,2.38125 -2.18282,0.0661"
+ style="fill:#fcda5d;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ sodipodi:nodetypes="cccccccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ style="fill:#fde385;fill-opacity:1;fill-rule:evenodd;stroke:#fcc917;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 681.51475,97.860416 0.46611,-5.424686 -3.46306,-0.577161 -3.46269,0.577161 0.46575,5.424686 2.99695,3.704164 z"
+ id="path164241"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png"
+ inkscape:export-xdpi="96.181694"
+ inkscape:export-ydpi="96.181694" />
+ <path
+ inkscape:export-ydpi="96.181694"
+ inkscape:export-xdpi="96.181694"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 678.57794,117.2974 -0.0935,1.83339 c -0.019,0.37335 -0.03,0.75093 0.0438,1.11741 0.0738,0.36648 0.23099,0.71566 0.4563,1.01397 0.22531,0.29831 0.51338,0.54373 0.81663,0.76235 0.30325,0.21862 0.62323,0.41457 0.91332,0.65037 0.3976,0.3232 0.73099,0.71664 1.07802,1.09362 0.34704,0.37698 0.71589,0.74422 1.16161,0.99696 0.58472,0.33156 1.28881,0.44708 1.94883,0.31976 0.66001,-0.12733 1.2706,-0.49647 1.69003,-1.02174"
+ id="path164243"
+ inkscape:connector-curvature="0"
+ inkscape:path-effect="#path-effect163597"
+ inkscape:original-d="m 678.57794,117.2974 -0.0935,1.83339 c 0.0114,0.94605 1.57444,3.19538 2.23006,3.5441 0.65561,0.34873 1.64744,1.19432 2.23963,2.09058 0.5922,0.89625 2.53928,0.86565 3.63886,-0.70198"
+ sodipodi:nodetypes="ccccc"
+ inkscape:export-filename="/home/duffy/Documents/Projects/Favors/Buildah logo/final/color-not-color.png" />
+ </g>
+ <rect
+ inkscape:export-ydpi="45.436005"
+ inkscape:export-xdpi="45.436005"
+ style="opacity:1;fill:none;fill-opacity:1;stroke:#dedede;stroke-width:0.79374993;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect164274"
+ width="266.43546"
+ height="119.89413"
+ x="620.00122"
+ y="79.601753"
+ ry="0.2606394"
+ transform="scale(1,-1)" />
+ </g>
+</svg>
diff --git a/logos/buildah-logo_large.png b/logos/buildah-logo_large.png
new file mode 100644
index 0000000..1d69ff3
--- /dev/null
+++ b/logos/buildah-logo_large.png
Binary files differ
diff --git a/logos/buildah-logo_large_transparent-bg.png b/logos/buildah-logo_large_transparent-bg.png
new file mode 100644
index 0000000..aa11c35
--- /dev/null
+++ b/logos/buildah-logo_large_transparent-bg.png
Binary files differ
diff --git a/logos/buildah-logo_medium.png b/logos/buildah-logo_medium.png
new file mode 100644
index 0000000..ca43b3d
--- /dev/null
+++ b/logos/buildah-logo_medium.png
Binary files differ
diff --git a/logos/buildah-logo_medium_transparent-bg.png b/logos/buildah-logo_medium_transparent-bg.png
new file mode 100644
index 0000000..6f166dd
--- /dev/null
+++ b/logos/buildah-logo_medium_transparent-bg.png
Binary files differ
diff --git a/logos/buildah-logo_reverse_large.png b/logos/buildah-logo_reverse_large.png
new file mode 100644
index 0000000..0489a0c
--- /dev/null
+++ b/logos/buildah-logo_reverse_large.png
Binary files differ
diff --git a/logos/buildah-logo_reverse_medium.png b/logos/buildah-logo_reverse_medium.png
new file mode 100644
index 0000000..c8fb2f5
--- /dev/null
+++ b/logos/buildah-logo_reverse_medium.png
Binary files differ
diff --git a/logos/buildah-logo_reverse_small.png b/logos/buildah-logo_reverse_small.png
new file mode 100644
index 0000000..7ad828e
--- /dev/null
+++ b/logos/buildah-logo_reverse_small.png
Binary files differ
diff --git a/logos/buildah-logo_small.png b/logos/buildah-logo_small.png
new file mode 100644
index 0000000..3d53074
--- /dev/null
+++ b/logos/buildah-logo_small.png
Binary files differ
diff --git a/logos/buildah-logo_small_transparent-bg.png b/logos/buildah-logo_small_transparent-bg.png
new file mode 100644
index 0000000..90f3a6c
--- /dev/null
+++ b/logos/buildah-logo_small_transparent-bg.png
Binary files differ
diff --git a/logos/buildah-logomark_large.png b/logos/buildah-logomark_large.png
new file mode 100644
index 0000000..6a0fd16
--- /dev/null
+++ b/logos/buildah-logomark_large.png
Binary files differ
diff --git a/logos/buildah-logomark_large_transparent-bg.png b/logos/buildah-logomark_large_transparent-bg.png
new file mode 100644
index 0000000..71eb074
--- /dev/null
+++ b/logos/buildah-logomark_large_transparent-bg.png
Binary files differ
diff --git a/logos/buildah-logomark_medium.png b/logos/buildah-logomark_medium.png
new file mode 100644
index 0000000..7f57445
--- /dev/null
+++ b/logos/buildah-logomark_medium.png
Binary files differ
diff --git a/logos/buildah-logomark_medium_transparent-bg.png b/logos/buildah-logomark_medium_transparent-bg.png
new file mode 100644
index 0000000..aa45c2e
--- /dev/null
+++ b/logos/buildah-logomark_medium_transparent-bg.png
Binary files differ
diff --git a/logos/buildah-logomark_small.png b/logos/buildah-logomark_small.png
new file mode 100644
index 0000000..53a8040
--- /dev/null
+++ b/logos/buildah-logomark_small.png
Binary files differ
diff --git a/logos/buildah-logomark_small_transparent-bg.png b/logos/buildah-logomark_small_transparent-bg.png
new file mode 100644
index 0000000..85b1aed
--- /dev/null
+++ b/logos/buildah-logomark_small_transparent-bg.png
Binary files differ
diff --git a/manifests/compat.go b/manifests/compat.go
new file mode 100644
index 0000000..4adacbb
--- /dev/null
+++ b/manifests/compat.go
@@ -0,0 +1,33 @@
+// This package is deprecated. Its functionality has been moved to
+// github.com/containers/common/libimage/manifests, which provides the same
+// API. The stubs here are present for compatibility with older code. New
+// implementations should use github.com/containers/common/libimage/manifests
+// directly.
+package manifests
+
+import (
+ "github.com/containers/common/libimage/manifests"
+ "github.com/containers/storage"
+)
+
+type (
+ // List is an alias for github.com/containers/common/libimage/manifests.List.
+ List = manifests.List
+ // PushOptions is an alias for github.com/containers/common/libimage/manifests.PushOptions.
+ PushOptions = manifests.PushOptions
+)
+
+var (
+ // ErrListImageUnknown is an alias for github.com/containers/common/libimage/manifests.ErrListImageUnknown
+ ErrListImageUnknown = manifests.ErrListImageUnknown
+)
+
+// Create wraps github.com/containers/common/libimage/manifests.Create().
+func Create() List {
+ return manifests.Create()
+}
+
+// LoadFromImage wraps github.com/containers/common/libimage/manifests.LoadFromImage().
+func LoadFromImage(store storage.Store, image string) (string, List, error) {
+ return manifests.LoadFromImage(store, image)
+}
diff --git a/mount.go b/mount.go
new file mode 100644
index 0000000..932c1bb
--- /dev/null
+++ b/mount.go
@@ -0,0 +1,51 @@
+package buildah
+
+import "fmt"
+
+// Mount mounts a container's root filesystem in a location which can be
+// accessed from the host, and returns the location.
+func (b *Builder) Mount(label string) (string, error) {
+ mountpoint, err := b.store.Mount(b.ContainerID, label)
+ if err != nil {
+ return "", fmt.Errorf("mounting build container %q: %w", b.ContainerID, err)
+ }
+ b.MountPoint = mountpoint
+
+ err = b.Save()
+ if err != nil {
+ return "", fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
+ }
+ return mountpoint, nil
+}
+
+func (b *Builder) setMountPoint(mountPoint string) error {
+ b.MountPoint = mountPoint
+ if err := b.Save(); err != nil {
+ return fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
+ }
+ return nil
+}
+
+// Mounted returns whether the container is mounted or not
+func (b *Builder) Mounted() (bool, error) {
+ mountCnt, err := b.store.Mounted(b.ContainerID)
+ if err != nil {
+ return false, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ }
+ mounted := mountCnt > 0
+ if mounted && b.MountPoint == "" {
+ ctr, err := b.store.Container(b.ContainerID)
+ if err != nil {
+ return mountCnt > 0, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ }
+ layer, err := b.store.Layer(ctr.LayerID)
+ if err != nil {
+ return mountCnt > 0, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ }
+ return mounted, b.setMountPoint(layer.MountPoint)
+ }
+ if !mounted && b.MountPoint != "" {
+ return mounted, b.setMountPoint("")
+ }
+ return mounted, nil
+}
diff --git a/new.go b/new.go
new file mode 100644
index 0000000..45269c8
--- /dev/null
+++ b/new.go
@@ -0,0 +1,355 @@
+package buildah
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "strings"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/shortnames"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/stringid"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/openshift/imagebuilder"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // BaseImageFakeName is the "name" of a source image which we interpret
+ // as "no image".
+ BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
+)
+
+func getImageName(name string, img *storage.Image) string {
+ imageName := name
+ if len(img.Names) > 0 {
+ imageName = img.Names[0]
+ // When the image used by the container is a tagged image
+ // the container name might be set to the original image instead of
+ // the image given in the "from" command line.
+ // This loop is supposed to fix this.
+ for _, n := range img.Names {
+ if strings.Contains(n, name) {
+ imageName = n
+ break
+ }
+ }
+ }
+ return imageName
+}
+
+func imageNamePrefix(imageName string) string {
+ prefix := imageName
+ if d, err := digest.Parse(imageName); err == nil {
+ prefix = d.Encoded()
+ if len(prefix) > 12 {
+ prefix = prefix[:12]
+ }
+ }
+ if stringid.ValidateID(prefix) == nil {
+ prefix = stringid.TruncateID(prefix)
+ }
+ s := strings.Split(prefix, ":")
+ if len(s) > 0 {
+ prefix = s[0]
+ }
+ s = strings.Split(prefix, "/")
+ if len(s) > 0 {
+ prefix = s[len(s)-1]
+ }
+ s = strings.Split(prefix, "@")
+ if len(s) > 0 {
+ prefix = s[0]
+ }
+ return prefix
+}
+
+func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage.IDMappingOptions {
+ var options storage.IDMappingOptions
+ if idmapOptions != nil {
+ if idmapOptions.AutoUserNs {
+ options.AutoUserNs = true
+ options.AutoUserNsOpts = idmapOptions.AutoUserNsOpts
+ } else {
+ options.HostUIDMapping = idmapOptions.HostUIDMapping
+ options.HostGIDMapping = idmapOptions.HostGIDMapping
+ uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap)
+ if len(uidmap) > 0 && len(gidmap) > 0 {
+ options.UIDMap = uidmap
+ options.GIDMap = gidmap
+ } else {
+ options.HostUIDMapping = true
+ options.HostGIDMapping = true
+ }
+ }
+ }
+ return options
+}
+
+func containerNameExist(name string, containers []storage.Container) bool {
+ for _, container := range containers {
+ for _, cname := range container.Names {
+ if cname == name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func findUnusedContainer(name string, containers []storage.Container) string {
+ suffix := 1
+ tmpName := name
+ for containerNameExist(tmpName, containers) {
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ suffix++
+ }
+ return tmpName
+}
+
+func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
+ var (
+ ref types.ImageReference
+ img *storage.Image
+ err error
+ )
+
+ if options.FromImage == BaseImageFakeName {
+ options.FromImage = ""
+ }
+
+ if options.NetworkInterface == nil {
+ // create the network interface
+ // Note: It is important to do this before we pull any images/create containers.
+ // The default backend detection logic needs an empty store to correctly detect
+ // that we can use netavark, if the store was not empty it will use CNI to not break existing installs.
+ options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
+
+ if options.FromImage != "" && options.FromImage != BaseImageFakeName {
+ imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return nil, err
+ }
+
+ pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
+ if err != nil {
+ return nil, err
+ }
+
+ // Note: options.Format does *not* relate to the image we're
+ // about to pull (see tests/digests.bats). So we're not
+ // forcing a MIMEType in the pullOptions below.
+ pullOptions := libimage.PullOptions{}
+ pullOptions.RetryDelay = &options.PullRetryDelay
+ pullOptions.OciDecryptConfig = options.OciDecryptConfig
+ pullOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ pullOptions.Writer = options.ReportWriter
+ pullOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
+
+ maxRetries := uint(options.MaxPullRetries)
+ pullOptions.MaxRetries = &maxRetries
+
+ pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions)
+ if err != nil {
+ return nil, err
+ }
+ if len(pulledImages) > 0 {
+ img = pulledImages[0].StorageImage()
+ ref, err = pulledImages[0].StorageReference()
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ imageSpec := options.FromImage
+ imageID := ""
+ imageDigest := ""
+ topLayer := ""
+ if img != nil {
+ imageSpec = getImageName(imageNamePrefix(imageSpec), img)
+ imageID = img.ID
+ topLayer = img.TopLayer
+ }
+ var src types.Image
+ if ref != nil {
+ srcSrc, err := ref.NewImageSource(ctx, systemContext)
+ if err != nil {
+ return nil, fmt.Errorf("instantiating image for %q: %w", transports.ImageName(ref), err)
+ }
+ defer srcSrc.Close()
+ manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
+ }
+ if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
+ imageDigest = manifestDigest.String()
+ }
+ var instanceDigest *digest.Digest
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
+ }
+ instance, err := list.ChooseInstance(systemContext)
+ if err != nil {
+ return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
+ }
+ instanceDigest = &instance
+ }
+ src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
+ if err != nil {
+ return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
+ }
+ }
+
+ name := "working-container"
+ if options.ContainerSuffix != "" {
+ name = options.ContainerSuffix
+ }
+ if options.Container != "" {
+ name = options.Container
+ } else {
+ if imageSpec != "" {
+ name = imageNamePrefix(imageSpec) + "-" + name
+ }
+ }
+ var container *storage.Container
+ tmpName := name
+ if options.Container == "" {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, fmt.Errorf("unable to check for container names: %w", err)
+ }
+ tmpName = findUnusedContainer(tmpName, containers)
+ }
+
+ suffixDigitsModulo := 100
+ for {
+
+ var flags map[string]interface{}
+ // check if we have predefined ProcessLabel and MountLabel
+ // this could be true if this is another stage in a build
+ if options.ProcessLabel != "" && options.MountLabel != "" {
+ flags = map[string]interface{}{
+ "ProcessLabel": options.ProcessLabel,
+ "MountLabel": options.MountLabel,
+ }
+ }
+ coptions := storage.ContainerOptions{
+ LabelOpts: options.CommonBuildOpts.LabelOpts,
+ IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
+ Flags: flags,
+ Volatile: true,
+ }
+ container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
+ if err == nil {
+ name = tmpName
+ break
+ }
+ if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" {
+ return nil, fmt.Errorf("creating container: %w", err)
+ }
+ tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%suffixDigitsModulo)
+ if suffixDigitsModulo < 1_000_000_000 {
+ suffixDigitsModulo *= 10
+ }
+ }
+ defer func() {
+ if err != nil {
+ if err2 := store.DeleteContainer(container.ID); err2 != nil {
+ logrus.Errorf("error deleting container %q: %v", container.ID, err2)
+ }
+ }
+ }()
+
+ uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap)
+
+ defaultNamespaceOptions, err := DefaultNamespaceOptions()
+ if err != nil {
+ return nil, err
+ }
+
+ namespaceOptions := defaultNamespaceOptions
+ namespaceOptions.AddOrReplace(options.NamespaceOptions...)
+
+ builder := &Builder{
+ store: store,
+ Type: containerType,
+ FromImage: imageSpec,
+ FromImageID: imageID,
+ FromImageDigest: imageDigest,
+ GroupAdd: options.GroupAdd,
+ Container: name,
+ ContainerID: container.ID,
+ ImageAnnotations: map[string]string{},
+ ImageCreatedBy: "",
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
+ DefaultMountsFilePath: options.DefaultMountsFilePath,
+ Isolation: options.Isolation,
+ NamespaceOptions: namespaceOptions,
+ ConfigureNetwork: options.ConfigureNetwork,
+ CNIPluginPath: options.CNIPluginPath,
+ CNIConfigDir: options.CNIConfigDir,
+ IDMappingOptions: define.IDMappingOptions{
+ HostUIDMapping: len(uidmap) == 0,
+ HostGIDMapping: len(uidmap) == 0,
+ UIDMap: uidmap,
+ GIDMap: gidmap,
+ },
+ Capabilities: copyStringSlice(options.Capabilities),
+ CommonBuildOpts: options.CommonBuildOpts,
+ TopLayer: topLayer,
+ Args: copyStringStringMap(options.Args),
+ Format: options.Format,
+ TempVolumes: map[string]bool{},
+ Devices: options.Devices,
+ Logger: options.Logger,
+ NetworkInterface: options.NetworkInterface,
+ }
+
+ if options.Mount {
+ _, err = builder.Mount(container.MountLabel())
+ if err != nil {
+ return nil, fmt.Errorf("mounting build container %q: %w", builder.ContainerID, err)
+ }
+ }
+
+ if err := builder.initConfig(ctx, src, systemContext); err != nil {
+ return nil, fmt.Errorf("preparing image configuration: %w", err)
+ }
+
+ if !options.PreserveBaseImageAnns {
+ builder.SetAnnotation(v1.AnnotationBaseImageDigest, imageDigest)
+ if !shortnames.IsShortName(imageSpec) {
+ // If the base image was specified as a fully-qualified
+ // image name, let's set it.
+ builder.SetAnnotation(v1.AnnotationBaseImageName, imageSpec)
+ } else {
+ builder.UnsetAnnotation(v1.AnnotationBaseImageName)
+ }
+ }
+
+ err = builder.Save()
+ if err != nil {
+ return nil, fmt.Errorf("saving builder state for container %q: %w", builder.ContainerID, err)
+ }
+
+ return builder, nil
+}
diff --git a/new_test.go b/new_test.go
new file mode 100644
index 0000000..ae78a26
--- /dev/null
+++ b/new_test.go
@@ -0,0 +1,35 @@
+package buildah
+
+import (
+ "testing"
+
+ "github.com/containers/storage"
+ "github.com/openshift/imagebuilder"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetImageName(t *testing.T) {
+ tt := []struct {
+ caseName string
+ name string
+ names []string
+ expected string
+ }{
+ {"tagged image", "busybox1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox1:latest"},
+ {"image name not in the resolved image names", "image1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox:latest"},
+ {"resolved image with empty name list", "image1", []string{}, "image1"},
+ }
+
+ for _, tc := range tt {
+ img := &storage.Image{Names: tc.names}
+ res := getImageName(tc.name, img)
+ if res != tc.expected {
+ t.Errorf("test case '%s' failed: expected %#v but got %#v", tc.caseName, tc.expected, res)
+ }
+ }
+}
+
+func TestNoBaseImageSpecifierIsScratch(t *testing.T) {
+ assert.Equal(t, "scratch", imagebuilder.NoBaseImageSpecifier) // juuuuust in case
+ assert.Equal(t, "scratch", BaseImageFakeName)
+}
diff --git a/nix/default-arm64.nix b/nix/default-arm64.nix
new file mode 100644
index 0000000..77eaa51
--- /dev/null
+++ b/nix/default-arm64.nix
@@ -0,0 +1,85 @@
+let
+ pkgs = (import ./nixpkgs.nix {
+ crossSystem = {
+ config = "aarch64-unknown-linux-gnu";
+ };
+ config = {
+ packageOverrides = pkg: {
+ gpgme = (static pkg.gpgme);
+ libassuan = (static pkg.libassuan);
+ libgpgerror = (static pkg.libgpgerror);
+ libseccomp = (static pkg.libseccomp);
+ glib = (static pkg.glib).overrideAttrs (x: {
+ outputs = [ "bin" "out" "dev" ];
+ mesonFlags = [
+ "-Ddefault_library=static"
+ "-Ddevbindir=${placeholder ''dev''}/bin"
+ "-Dgtk_doc=false"
+ "-Dnls=disabled"
+ ];
+ postInstall = ''
+ moveToOutput "share/glib-2.0" "$dev"
+ substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
+ sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
+ sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
+ -i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
+ '';
+ });
+ pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
+ configureFlags = [
+ "--enable-confdir=/etc"
+ "--enable-usbdropdir=/var/lib/pcsc/drivers"
+ "--disable-libsystemd"
+ "--disable-libudev"
+ "--disable-libusb"
+ ];
+ buildInputs = [ pkgs.python3 pkgs.dbus ];
+ });
+ systemd = (static pkg.systemd).overrideAttrs (x: {
+ outputs = [ "out" "dev" ];
+ mesonFlags = x.mesonFlags ++ [
+ "-Dglib=false"
+ "-Dstatic-libsystemd=true"
+ ];
+ });
+ };
+ };
+ });
+
+ static = pkg: pkg.overrideAttrs (x: {
+ doCheck = false;
+ configureFlags = (x.configureFlags or [ ]) ++ [
+ "--without-shared"
+ "--disable-shared"
+ ];
+ dontDisableStatic = true;
+ enableSharedExecutables = false;
+ enableStatic = true;
+ });
+
+ self = with pkgs; buildGoModule rec {
+ name = "buildah";
+ src = ./..;
+ vendorSha256 = null;
+ doCheck = false;
+ enableParallelBuilding = true;
+ outputs = [ "out" ];
+ nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
+ buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp libapparmor libselinux ];
+ prePatch = ''
+ export CFLAGS='-static -pthread'
+ export LDFLAGS='-s -w -static-libgcc -static'
+ export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
+ export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper seccomp apparmor selinux'
+ export CGO_ENABLED=1
+ '';
+ buildPhase = ''
+ patchShebangs .
+ make bin/buildah
+ '';
+ installPhase = ''
+ install -Dm755 bin/buildah $out/bin/buildah
+ '';
+ };
+in
+self
diff --git a/nix/default.nix b/nix/default.nix
new file mode 100644
index 0000000..9b5c39f
--- /dev/null
+++ b/nix/default.nix
@@ -0,0 +1,83 @@
+{ system ? builtins.currentSystem }:
+let
+ pkgs = (import ./nixpkgs.nix {
+ config = {
+ packageOverrides = pkg: {
+ gpgme = (static pkg.gpgme);
+ libassuan = (static pkg.libassuan);
+ libgpgerror = (static pkg.libgpgerror);
+ libseccomp = (static pkg.libseccomp);
+ glib = (static pkg.glib).overrideAttrs (x: {
+ outputs = [ "bin" "out" "dev" ];
+ mesonFlags = [
+ "-Ddefault_library=static"
+ "-Ddevbindir=${placeholder ''dev''}/bin"
+ "-Dgtk_doc=false"
+ "-Dnls=disabled"
+ ];
+ postInstall = ''
+ moveToOutput "share/glib-2.0" "$dev"
+ substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
+ sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
+ sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
+ -i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
+ '';
+ });
+ pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
+ configureFlags = [
+ "--enable-confdir=/etc"
+ "--enable-usbdropdir=/var/lib/pcsc/drivers"
+ "--disable-libsystemd"
+ "--disable-libudev"
+ "--disable-libusb"
+ ];
+ buildInputs = [ pkgs.python3 pkgs.dbus ];
+ });
+ systemd = (static pkg.systemd).overrideAttrs (x: {
+ outputs = [ "out" "dev" ];
+ mesonFlags = x.mesonFlags ++ [
+ "-Dglib=false"
+ "-Dstatic-libsystemd=true"
+ ];
+ });
+ };
+ };
+ });
+
+ static = pkg: pkg.overrideAttrs (x: {
+ doCheck = false;
+ configureFlags = (x.configureFlags or [ ]) ++ [
+ "--without-shared"
+ "--disable-shared"
+ ];
+ dontDisableStatic = true;
+ enableSharedExecutables = false;
+ enableStatic = true;
+ });
+
+ self = with pkgs; buildGoModule rec {
+ name = "buildah";
+ src = ./..;
+ vendorSha256 = null;
+ doCheck = false;
+ enableParallelBuilding = true;
+ outputs = [ "out" ];
+ nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
+ buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp libapparmor libselinux ];
+ prePatch = ''
+ export CFLAGS='-static -pthread'
+ export LDFLAGS='-s -w -static-libgcc -static'
+ export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
+ export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper seccomp apparmor selinux'
+ export CGO_ENABLED=1
+ '';
+ buildPhase = ''
+ patchShebangs .
+ make bin/buildah
+ '';
+ installPhase = ''
+ install -Dm755 bin/buildah $out/bin/buildah
+ '';
+ };
+in
+self
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
new file mode 100644
index 0000000..efcfe20
--- /dev/null
+++ b/nix/nixpkgs.json
@@ -0,0 +1,10 @@
+{
+ "url": "https://github.com/nixos/nixpkgs",
+ "rev": "2a96414d7e350160a33ed0978449c9ff5b5a6eb3",
+ "date": "2021-07-13T18:21:47+02:00",
+ "path": "/nix/store/2ai9q8ac6vxb2rrngdz82y8jxnk15cvm-nixpkgs",
+ "sha256": "1dzrfqdjq3yq5jjskiqflzy58l2xx6059gay9p1k07zrlm1wigy5",
+ "fetchSubmodules": false,
+ "deepClone": false,
+ "leaveDotGit": false
+}
diff --git a/nix/nixpkgs.nix b/nix/nixpkgs.nix
new file mode 100644
index 0000000..11c2038
--- /dev/null
+++ b/nix/nixpkgs.nix
@@ -0,0 +1,9 @@
+let
+ json = builtins.fromJSON (builtins.readFile ./nixpkgs.json);
+ nixpkgs = import (builtins.fetchTarball {
+ name = "nixos-unstable";
+ url = "${json.url}/archive/${json.rev}.tar.gz";
+ inherit (json) sha256;
+ });
+in
+nixpkgs
diff --git a/pkg/blobcache/blobcache.go b/pkg/blobcache/blobcache.go
new file mode 100644
index 0000000..fa60619
--- /dev/null
+++ b/pkg/blobcache/blobcache.go
@@ -0,0 +1,31 @@
+package blobcache
+
+import (
+ imageBlobCache "github.com/containers/image/v5/pkg/blobcache"
+ "github.com/containers/image/v5/types"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+type BlobCache interface {
+ types.ImageReference
+ // HasBlob checks if a blob that matches the passed-in digest (and
+ // size, if not -1), is present in the cache.
+ HasBlob(types.BlobInfo) (bool, int64, error)
+ // Directories returns the list of cache directories.
+ Directory() string
+ // ClearCache() clears the contents of the cache directories. Note
+ // that this also clears content which was not placed there by this
+ // cache implementation.
+ ClearCache() error
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specified directory or a temporary directory.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) {
+ return imageBlobCache.NewBlobCache(ref, directory, compress)
+}
diff --git a/pkg/chrootuser/user.go b/pkg/chrootuser/user.go
new file mode 100644
index 0000000..4614ecf
--- /dev/null
+++ b/pkg/chrootuser/user.go
@@ -0,0 +1,116 @@
+package chrootuser
+
+import (
+ "errors"
+ "fmt"
+ "os/user"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ErrNoSuchUser indicates that the user provided by the caller does not
+ // exist in /etc/passws
+ ErrNoSuchUser = errors.New("user does not exist in /etc/passwd")
+)
+
+// GetUser will return the uid, gid of the user specified in the userspec
+// it will use the /etc/passwd and /etc/group files inside of the rootdir
+// to return this information.
+// userspec format [user | user:group | uid | uid:gid | user:gid | uid:group ]
+func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
+ var gid64 uint64
+ var gerr error = user.UnknownGroupError("error looking up group")
+
+ spec := strings.SplitN(userspec, ":", 2)
+ userspec = spec[0]
+ groupspec := ""
+
+ if userspec == "" {
+ userspec = "0"
+ }
+
+ if len(spec) > 1 {
+ groupspec = spec[1]
+ }
+
+ uid64, uerr := strconv.ParseUint(userspec, 10, 32)
+ if uerr == nil && groupspec == "" {
+ // We parsed the user name as a number, and there's no group
+ // component, so try to look up the primary GID of the user who
+ // has this UID.
+ var name string
+ name, gid64, gerr = lookupGroupForUIDInContainer(rootdir, uid64)
+ if gerr == nil {
+ userspec = name
+ } else {
+ // Leave userspec alone, but swallow the error and just
+ // use GID 0.
+ gid64 = 0
+ gerr = nil
+ }
+ }
+ if uerr != nil {
+ // The user ID couldn't be parsed as a number, so try to look
+ // up the user's UID and primary GID.
+ uid64, gid64, uerr = lookupUserInContainer(rootdir, userspec)
+ gerr = uerr
+ }
+
+ if groupspec != "" {
+ // We have a group name or number, so parse it.
+ gid64, gerr = strconv.ParseUint(groupspec, 10, 32)
+ if gerr != nil {
+ // The group couldn't be parsed as a number, so look up
+ // the group's GID.
+ gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
+ }
+ }
+
+ homedir, err := lookupHomedirInContainer(rootdir, uid64)
+ if err != nil {
+ homedir = "/"
+ }
+
+ if uerr == nil && gerr == nil {
+ return uint32(uid64), uint32(gid64), homedir, nil
+ }
+
+ err = fmt.Errorf("determining run uid: %w", uerr)
+ if uerr == nil {
+ err = fmt.Errorf("determining run gid: %w", gerr)
+ }
+
+ return 0, 0, homedir, err
+}
+
+// GetGroup returns the gid by looking it up in the /etc/group file
+// groupspec format [ group | gid ]
+func GetGroup(rootdir, groupspec string) (uint32, error) {
+ gid64, gerr := strconv.ParseUint(groupspec, 10, 32)
+ if gerr != nil {
+ // The group couldn't be parsed as a number, so look up
+ // the group's GID.
+ gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
+ }
+ if gerr != nil {
+ return 0, fmt.Errorf("looking up group for gid %q: %w", groupspec, gerr)
+ }
+ return uint32(gid64), nil
+}
+
+// GetAdditionalGroupsForUser returns a list of gids that userid is associated with
+func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) {
+ gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid)
+ if err != nil {
+ return nil, fmt.Errorf("looking up supplemental groups for uid %d: %w", userid, err)
+ }
+ return gids, nil
+}
+
+// LookupUIDInContainer returns username and gid associated with a UID in a container
+// it will use the /etc/passwd files inside of the rootdir
+// to return this information.
+func LookupUIDInContainer(rootdir string, uid uint64) (user string, gid uint64, err error) {
+ return lookupUIDInContainer(rootdir, uid)
+}
diff --git a/pkg/chrootuser/user_basic.go b/pkg/chrootuser/user_basic.go
new file mode 100644
index 0000000..5655a54
--- /dev/null
+++ b/pkg/chrootuser/user_basic.go
@@ -0,0 +1,32 @@
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
+
+package chrootuser
+
+import (
+ "errors"
+)
+
+func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) {
+ return 0, 0, errors.New("user lookup not supported")
+}
+
+func lookupGroupInContainer(rootdir, groupname string) (uint64, error) {
+ return 0, errors.New("group lookup not supported")
+}
+
+func lookupGroupForUIDInContainer(rootdir string, userid uint64) (string, uint64, error) {
+ return "", 0, errors.New("primary group lookup by uid not supported")
+}
+
+func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) {
+ return nil, errors.New("supplemental groups list lookup by uid not supported")
+}
+
+func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
+ return "", 0, errors.New("UID lookup not supported")
+}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ return "", errors.New("Home directory lookup not supported")
+}
diff --git a/pkg/chrootuser/user_test.go b/pkg/chrootuser/user_test.go
new file mode 100644
index 0000000..6b0dafa
--- /dev/null
+++ b/pkg/chrootuser/user_test.go
@@ -0,0 +1,40 @@
+package chrootuser
+
+import (
+ "bufio"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var testGroupData = `# comment
+ # indented comment
+wheel:*:0:root
+daemon:*:1:
+kmem:*:2:
+`
+
+func TestParseStripComments(t *testing.T) {
+ // Test reading group file, ignoring comment lines
+ rc := bufio.NewScanner(strings.NewReader(testGroupData))
+ line, ok := scanWithoutComments(rc)
+ assert.Equal(t, ok, true)
+ assert.Equal(t, line, "wheel:*:0:root")
+}
+
+func TestParseNextGroup(t *testing.T) {
+ // Test parsing group file
+ rc := bufio.NewScanner(strings.NewReader(testGroupData))
+ expected := []lookupGroupEntry{
+ lookupGroupEntry{"wheel", 0, "root"},
+ lookupGroupEntry{"daemon", 1, ""},
+ lookupGroupEntry{"kmem", 2, ""},
+ }
+ for _, exp := range expected {
+ grp := parseNextGroup(rc)
+ assert.NotNil(t, grp)
+ assert.Equal(t, *grp, exp)
+ }
+ assert.Nil(t, parseNextGroup(rc))
+}
diff --git a/pkg/chrootuser/user_unix.go b/pkg/chrootuser/user_unix.go
new file mode 100644
index 0000000..0ccaf8a
--- /dev/null
+++ b/pkg/chrootuser/user_unix.go
@@ -0,0 +1,314 @@
+//go:build linux || freebsd
+// +build linux freebsd
+
+package chrootuser
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/storage/pkg/reexec"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ openChrootedCommand = "chrootuser-open"
+)
+
+func init() {
+ reexec.Register(openChrootedCommand, openChrootedFileMain)
+}
+
+func openChrootedFileMain() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v", err)
+ os.Exit(1)
+ }
+ // Anything else is a file we want to dump out.
+ for _, filename := range flag.Args()[1:] {
+ f, err := os.Open(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "open(%q): %v", filename, err)
+ status = 1
+ continue
+ }
+ _, err = io.Copy(os.Stdout, f)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "read(%q): %v", filename, err)
+ }
+ f.Close()
+ }
+ os.Exit(status)
+}
+
+func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error) {
+ // The child process expects a chroot and one or more filenames that
+ // will be consulted relative to the chroot directory and concatenated
+ // to its stdout. Start it up.
+ cmd := reexec.Command(openChrootedCommand, rootdir, filename)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+ err = cmd.Start()
+ if err != nil {
+ return nil, nil, err
+ }
+ // Hand back the child's stdout for reading, and the child to reap.
+ return cmd, stdout, nil
+}
+
+var (
+ lookupUser, lookupGroup sync.Mutex
+)
+
+type lookupPasswdEntry struct {
+ name string
+ uid uint64
+ gid uint64
+ home string
+}
+type lookupGroupEntry struct {
+ name string
+ gid uint64
+ user string
+}
+
+func scanWithoutComments(rc *bufio.Scanner) (string, bool) {
+ for {
+ if !rc.Scan() {
+ return "", false
+ }
+ line := rc.Text()
+ if strings.HasPrefix(strings.TrimSpace(line), "#") {
+ continue
+ }
+ return line, true
+ }
+}
+
+func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry {
+ if !rc.Scan() {
+ return nil
+ }
+ line := rc.Text()
+ fields := strings.Split(line, ":")
+ if len(fields) != 7 {
+ return nil
+ }
+ uid, err := strconv.ParseUint(fields[2], 10, 32)
+ if err != nil {
+ return nil
+ }
+ gid, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil
+ }
+ return &lookupPasswdEntry{
+ name: fields[0],
+ uid: uid,
+ gid: gid,
+ home: fields[5],
+ }
+}
+
+func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry {
+ // On FreeBSD, /etc/group may contain comments:
+ // https://man.freebsd.org/cgi/man.cgi?query=group&sektion=5&format=html
+ // We need to ignore those lines rather than trying to parse them.
+ line, ok := scanWithoutComments(rc)
+ if !ok {
+ return nil
+ }
+ fields := strings.Split(line, ":")
+ if len(fields) != 4 {
+ return nil
+ }
+ gid, err := strconv.ParseUint(fields[2], 10, 32)
+ if err != nil {
+ return nil
+ }
+ return &lookupGroupEntry{
+ name: fields[0],
+ gid: gid,
+ user: fields[3],
+ }
+}
+
+func lookupUserInContainer(rootdir, username string) (uid uint64, gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return 0, 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.name != username {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.uid, pwd.gid, nil
+ }
+
+ return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username))
+}
+
+func lookupGroupForUIDInContainer(rootdir string, userid uint64) (username string, gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != userid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.name, pwd.gid, nil
+ }
+
+ return "", 0, ErrNoSuchUser
+}
+
+func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) {
+ // Get the username associated with userid
+ username, _, err := lookupGroupForUIDInContainer(rootdir, userid)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd, f, err := openChrootedFile(rootdir, "/etc/group")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupGroup.Lock()
+ defer lookupGroup.Unlock()
+
+ grp := parseNextGroup(rc)
+ for grp != nil {
+ if strings.Contains(grp.user, username) {
+ gid = append(gid, uint32(grp.gid))
+ }
+ grp = parseNextGroup(rc)
+ }
+ return gid, nil
+}
+
+func lookupGroupInContainer(rootdir, groupname string) (gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/group")
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupGroup.Lock()
+ defer lookupGroup.Unlock()
+
+ grp := parseNextGroup(rc)
+ for grp != nil {
+ if grp.name != groupname {
+ grp = parseNextGroup(rc)
+ continue
+ }
+ return grp.gid, nil
+ }
+
+ return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname))
+}
+
+func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != uid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.name, pwd.gid, nil
+ }
+
+ return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up uid %q", uid))
+}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != uid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.home, nil
+ }
+
+ return "", user.UnknownUserError(fmt.Sprintf("error looking up uid %q for homedir", uid))
+}
diff --git a/pkg/cli/build.go b/pkg/cli/build.go
new file mode 100644
index 0000000..e58e755
--- /dev/null
+++ b/pkg/cli/build.go
@@ -0,0 +1,477 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/util"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type BuildOptions struct {
+ *LayerResults
+ *BudResults
+ *UserNSResults
+ *FromAndBudResults
+ *NameSpaceResults
+ Logwriter *os.File
+}
+
+const (
+ MaxPullPushRetries = 3
+ PullPushRetryDelay = 2 * time.Second
+)
+
+// GenBuildOptions translates command line flags into a BuildOptions structure
+func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (define.BuildOptions, []string, []string, error) {
+ options := define.BuildOptions{}
+
+ var removeAll []string
+
+ output := ""
+ cleanTmpFile := false
+ tags := []string{}
+ if iopts.Network == "none" {
+ if c.Flag("dns").Changed {
+ return options, nil, nil, errors.New("the --dns option cannot be used with --network=none")
+ }
+ if c.Flag("dns-option").Changed {
+ return options, nil, nil, errors.New("the --dns-option option cannot be used with --network=none")
+ }
+ if c.Flag("dns-search").Changed {
+ return options, nil, nil, errors.New("the --dns-search option cannot be used with --network=none")
+ }
+
+ }
+ if c.Flag("tag").Changed {
+ tags = iopts.Tag
+ if len(tags) > 0 {
+ output = tags[0]
+ tags = tags[1:]
+ }
+ if c.Flag("manifest").Changed {
+ for _, tag := range tags {
+ if tag == iopts.Manifest {
+ return options, nil, nil, errors.New("the same name must not be specified for both '--tag' and '--manifest'")
+ }
+ }
+ }
+ }
+ if err := auth.CheckAuthFile(iopts.BudResults.Authfile); err != nil {
+ return options, nil, nil, err
+ }
+
+ if c.Flag("logsplit").Changed {
+ if !c.Flag("logfile").Changed {
+ return options, nil, nil, errors.New("cannot use --logsplit without --logfile")
+ }
+ }
+
+ iopts.BudResults.Authfile, cleanTmpFile = util.MirrorToTempFileIfPathIsDescriptor(iopts.BudResults.Authfile)
+ if cleanTmpFile {
+ removeAll = append(removeAll, iopts.BudResults.Authfile)
+ }
+
+ // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
+ // --pull-always and --pull-never. The --pull-never and --pull-always options
+ // will not be documented.
+ pullPolicy := define.PullIfMissing
+ if strings.EqualFold(strings.TrimSpace(iopts.Pull), "true") {
+ pullPolicy = define.PullIfNewer
+ }
+ if iopts.PullAlways || strings.EqualFold(strings.TrimSpace(iopts.Pull), "always") {
+ pullPolicy = define.PullAlways
+ }
+ if iopts.PullNever || strings.EqualFold(strings.TrimSpace(iopts.Pull), "never") {
+ pullPolicy = define.PullNever
+ }
+ logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
+
+ args := make(map[string]string)
+ if c.Flag("build-arg-file").Changed {
+ for _, argfile := range iopts.BuildArgFile {
+ if err := readBuildArgFile(argfile, args); err != nil {
+ return options, nil, nil, err
+ }
+ }
+ }
+ if c.Flag("build-arg").Changed {
+ for _, arg := range iopts.BuildArg {
+ readBuildArg(arg, args)
+ }
+ }
+
+ additionalBuildContext := make(map[string]*define.AdditionalBuildContext)
+ if c.Flag("build-context").Changed {
+ for _, contextString := range iopts.BuildContext {
+ av := strings.SplitN(contextString, "=", 2)
+ if len(av) > 1 {
+ parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err)
+ }
+ additionalBuildContext[av[0]] = &parseAdditionalBuildContext
+ } else {
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %q, accepts value in the form of key=value", av)
+ }
+ }
+ }
+
+ containerfiles := getContainerfiles(iopts.File)
+ format, err := GetFormat(iopts.Format)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ layers := UseLayers()
+ if c.Flag("layers").Changed {
+ layers = iopts.Layers
+ }
+ contextDir := ""
+ cliArgs := inputArgs
+
+ // Nothing provided, we assume the current working directory as build
+ // context
+ if len(cliArgs) == 0 {
+ contextDir, err = os.Getwd()
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to choose current working directory as build context: %w", err)
+ }
+ } else {
+ // The context directory could be a URL. Try to handle that.
+ tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("prepping temporary context directory: %w", err)
+ }
+ if tempDir != "" {
+ // We had to download it to a temporary directory.
+ // Delete it later.
+ removeAll = append(removeAll, tempDir)
+ contextDir = filepath.Join(tempDir, subDir)
+ } else {
+ // Nope, it was local. Use it as is.
+ absDir, err := filepath.Abs(cliArgs[0])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("determining path to directory: %w", err)
+ }
+ contextDir = absDir
+ }
+ }
+
+ if len(containerfiles) == 0 {
+ // Try to find the Containerfile/Dockerfile within the contextDir
+ containerfile, err := util.DiscoverContainerfile(contextDir)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ containerfiles = append(containerfiles, containerfile)
+ contextDir = filepath.Dir(containerfile)
+ }
+
+ contextDir, err = filepath.EvalSymlinks(contextDir)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("evaluating symlinks in build context path: %w", err)
+ }
+
+ var stdin io.Reader
+ if iopts.Stdin {
+ stdin = os.Stdin
+ }
+
+ var stdout, stderr, reporter *os.File
+ stdout = os.Stdout
+ stderr = os.Stderr
+ reporter = os.Stderr
+ if iopts.Logwriter != nil {
+ logrus.SetOutput(iopts.Logwriter)
+ stdout = iopts.Logwriter
+ stderr = iopts.Logwriter
+ reporter = iopts.Logwriter
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("building system context: %w", err)
+ }
+
+ isolation, err := parse.IsolationOption(iopts.Isolation)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ runtimeFlags := []string{}
+ for _, arg := range iopts.RuntimeFlags {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+
+ commonOpts, err := parse.CommonBuildOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ pullFlagsCount := 0
+ if c.Flag("pull").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-always").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-never").Changed {
+ pullFlagsCount++
+ }
+
+ if pullFlagsCount > 1 {
+ return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ }
+
+ if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
+ return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
+ }
+
+ if c.Flag("compress").Changed {
+ logrus.Debugf("--compress option specified but is ignored")
+ }
+
+ compression := define.Gzip
+ if iopts.DisableCompression {
+ compression = define.Uncompressed
+ }
+
+ if c.Flag("disable-content-trust").Changed {
+ logrus.Debugf("--disable-content-trust option specified but is ignored")
+ }
+
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("parsing ID mapping options: %w", err)
+ }
+ namespaceOptions.AddOrReplace(usernsOption...)
+
+ platforms, err := parse.PlatformsFromOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ decryptConfig, err := DecryptConfig(iopts.DecryptionKeys)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
+ }
+
+ var excludes []string
+ if iopts.IgnoreFile != "" {
+ if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile, containerfiles); err != nil {
+ return options, nil, nil, err
+ }
+ }
+ var timestamp *time.Time
+ if c.Flag("timestamp").Changed {
+ t := time.Unix(iopts.Timestamp, 0).UTC()
+ timestamp = &t
+ }
+ if c.Flag("output").Changed {
+ buildOption, err := parse.GetBuildOutput(iopts.BuildOutput)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ if buildOption.IsStdout {
+ iopts.Quiet = true
+ }
+ }
+ var confidentialWorkloadOptions define.ConfidentialWorkloadOptions
+ if c.Flag("cw").Changed {
+ confidentialWorkloadOptions, err = parse.GetConfidentialWorkloadOptions(iopts.CWOptions)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ }
+ var cacheTo []reference.Named
+ var cacheFrom []reference.Named
+ cacheTo = nil
+ cacheFrom = nil
+ if c.Flag("cache-to").Changed {
+ cacheTo, err = parse.RepoNamesToNamedReferences(iopts.CacheTo)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err)
+ }
+ }
+ if c.Flag("cache-from").Changed {
+ cacheFrom, err = parse.RepoNamesToNamedReferences(iopts.CacheFrom)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err)
+ }
+ }
+ var cacheTTL time.Duration
+ if c.Flag("cache-ttl").Changed {
+ cacheTTL, err = time.ParseDuration(iopts.CacheTTL)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", iopts.CacheTTL, err)
+ }
+ // If user explicitly specified `--cache-ttl=0s`
+ // it would effectively mean that user is asking
+ // to use no cache at all. In such use cases
+ // buildah can skip looking for cache entirely
+ // by setting `--no-cache=true` internally.
+ if int64(cacheTTL) == 0 {
+ logrus.Debug("Setting --no-cache=true since --cache-ttl was set to 0s which effectively means user wants to ignore cache")
+ if c.Flag("no-cache").Changed && !iopts.NoCache {
+ return options, nil, nil, fmt.Errorf("cannot use --cache-ttl with duration as 0 and --no-cache=false")
+ }
+ iopts.NoCache = true
+ }
+ }
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.RetryDelay)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.RetryDelay, err)
+ }
+ // Following log line is used in integration test.
+ logrus.Debugf("Setting MaxPullPushRetries to %d and PullPushRetryDelay to %v", iopts.Retry, pullPushRetryDelay)
+
+ if c.Flag("network").Changed && c.Flag("isolation").Changed {
+ if isolation == define.IsolationChroot {
+ if ns := namespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ if !ns.Host {
+ return options, nil, nil, fmt.Errorf("cannot set --network other than host with --isolation %s", c.Flag("isolation").Value.String())
+ }
+ }
+ }
+ }
+
+ options = define.BuildOptions{
+ AddCapabilities: iopts.CapAdd,
+ AdditionalBuildContexts: additionalBuildContext,
+ AdditionalTags: tags,
+ AllPlatforms: iopts.AllPlatforms,
+ Annotations: iopts.Annotation,
+ Architecture: systemContext.ArchitectureChoice,
+ Args: args,
+ BlobDirectory: iopts.BlobCache,
+ BuildOutput: iopts.BuildOutput,
+ CacheFrom: cacheFrom,
+ CacheTo: cacheTo,
+ CacheTTL: cacheTTL,
+ CNIConfigDir: iopts.CNIConfigDir,
+ CNIPluginPath: iopts.CNIPlugInPath,
+ ConfidentialWorkload: confidentialWorkloadOptions,
+ CPPFlags: iopts.CPPFlags,
+ CommonBuildOpts: commonOpts,
+ Compression: compression,
+ ConfigureNetwork: networkPolicy,
+ ContextDirectory: contextDir,
+ Devices: iopts.Devices,
+ DropCapabilities: iopts.CapDrop,
+ Err: stderr,
+ Excludes: excludes,
+ ForceRmIntermediateCtrs: iopts.ForceRm,
+ From: iopts.From,
+ GroupAdd: iopts.GroupAdd,
+ IDMappingOptions: idmappingOptions,
+ IIDFile: iopts.Iidfile,
+ IgnoreFile: iopts.IgnoreFile,
+ In: stdin,
+ Isolation: isolation,
+ Jobs: &iopts.Jobs,
+ Labels: iopts.Label,
+ LayerLabels: iopts.LayerLabel,
+ Layers: layers,
+ LogFile: iopts.Logfile,
+ LogRusage: iopts.LogRusage,
+ LogSplitByPlatform: iopts.LogSplitByPlatform,
+ Manifest: iopts.Manifest,
+ MaxPullPushRetries: iopts.Retry,
+ NamespaceOptions: namespaceOptions,
+ NoCache: iopts.NoCache,
+ OS: systemContext.OSChoice,
+ OSFeatures: iopts.OSFeatures,
+ OSVersion: iopts.OSVersion,
+ OciDecryptConfig: decryptConfig,
+ Out: stdout,
+ Output: output,
+ OutputFormat: format,
+ Platforms: platforms,
+ PullPolicy: pullPolicy,
+ PullPushRetryDelay: pullPushRetryDelay,
+ Quiet: iopts.Quiet,
+ RemoveIntermediateCtrs: iopts.Rm,
+ ReportWriter: reporter,
+ Runtime: iopts.Runtime,
+ RuntimeArgs: runtimeFlags,
+ RusageLogFile: iopts.RusageLogFile,
+ SignBy: iopts.SignBy,
+ SignaturePolicyPath: iopts.SignaturePolicy,
+ SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages),
+ Squash: iopts.Squash,
+ SystemContext: systemContext,
+ Target: iopts.Target,
+ Timestamp: timestamp,
+ TransientMounts: iopts.Volumes,
+ UnsetEnvs: iopts.UnsetEnvs,
+ UnsetLabels: iopts.UnsetLabels,
+ }
+ if iopts.Quiet {
+ options.ReportWriter = io.Discard
+ }
+
+ options.Envs = LookupEnvVarReferences(iopts.Envs, os.Environ())
+
+ return options, containerfiles, removeAll, nil
+}
+
+func readBuildArgFile(buildargfile string, args map[string]string) error {
+ argfile, err := os.ReadFile(buildargfile)
+ if err != nil {
+ return err
+ }
+ for _, arg := range strings.Split(string(argfile), "\n") {
+ if len(arg) == 0 || arg[0] == '#' {
+ continue
+ }
+ readBuildArg(arg, args)
+ }
+ return err
+}
+
+func readBuildArg(buildarg string, args map[string]string) {
+ av := strings.SplitN(buildarg, "=", 2)
+ if len(av) > 1 {
+ args[av[0]] = av[1]
+ } else {
+ // check if the env is set in the local environment and use that value if it is
+ if val, present := os.LookupEnv(av[0]); present {
+ args[av[0]] = val
+ } else {
+ delete(args, av[0])
+ }
+ }
+}
+
+func getContainerfiles(files []string) []string {
+ var containerfiles []string
+ for _, f := range files {
+ if f == "-" {
+ containerfiles = append(containerfiles, "/dev/stdin")
+ } else {
+ containerfiles = append(containerfiles, f)
+ }
+ }
+ return containerfiles
+}
diff --git a/pkg/cli/common.go b/pkg/cli/common.go
new file mode 100644
index 0000000..76e03ba
--- /dev/null
+++ b/pkg/cli/common.go
@@ -0,0 +1,584 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/completion"
+ "github.com/containers/buildah/pkg/parse"
+ commonComp "github.com/containers/common/pkg/completion"
+ "github.com/containers/common/pkg/config"
+ encconfig "github.com/containers/ocicrypt/config"
+ enchelpers "github.com/containers/ocicrypt/helpers"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/pflag"
+)
+
+// LayerResults represents the results of the layer flags
+type LayerResults struct {
+ ForceRm bool
+ Layers bool
+}
+
+// UserNSResults represents the results for the UserNS flags
+type UserNSResults struct {
+ UserNS string
+ GroupAdd []string
+ UserNSUIDMap []string
+ UserNSGIDMap []string
+ UserNSUIDMapUser string
+ UserNSGIDMapGroup string
+}
+
+// NameSpaceResults represents the results for Namespace flags
+type NameSpaceResults struct {
+ Cgroup string
+ IPC string
+ Network string
+ CNIConfigDir string
+ CNIPlugInPath string
+ PID string
+ UTS string
+}
+
+// BudResults represents the results for Build flags
+type BudResults struct {
+ AllPlatforms bool
+ Annotation []string
+ Authfile string
+ BuildArg []string
+ BuildArgFile []string
+ BuildContext []string
+ CacheFrom []string
+ CacheTo []string
+ CacheTTL string
+ CertDir string
+ Compress bool
+ Creds string
+ CPPFlags []string
+ DisableCompression bool
+ DisableContentTrust bool
+ IgnoreFile string
+ File []string
+ Format string
+ From string
+ Iidfile string
+ Label []string
+ LayerLabel []string
+ Logfile string
+ LogSplitByPlatform bool
+ Manifest string
+ NoHostname bool
+ NoHosts bool
+ NoCache bool
+ Timestamp int64
+ OmitHistory bool
+ OCIHooksDir []string
+ Pull string
+ PullAlways bool
+ PullNever bool
+ Quiet bool
+ IdentityLabel bool
+ Rm bool
+ Runtime string
+ RuntimeFlags []string
+ Secrets []string
+ SSH []string
+ SignaturePolicy string
+ SignBy string
+ Squash bool
+ SkipUnusedStages bool
+ Stdin bool
+ Tag []string
+ BuildOutput string
+ Target string
+ TLSVerify bool
+ Jobs int
+ LogRusage bool
+ RusageLogFile string
+ UnsetEnvs []string
+ UnsetLabels []string
+ Envs []string
+ OSFeatures []string
+ OSVersion string
+ CWOptions string
+}
+
+// FromAndBugResults represents the results for common flags
+// in build and from
+type FromAndBudResults struct {
+ AddHost []string
+ BlobCache string
+ CapAdd []string
+ CapDrop []string
+ CgroupParent string
+ CPUPeriod uint64
+ CPUQuota int64
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares uint64
+ DecryptionKeys []string
+ Devices []string
+ DNSSearch []string
+ DNSServers []string
+ DNSOptions []string
+ HTTPProxy bool
+ Isolation string
+ Memory string
+ MemorySwap string
+ Retry int
+ RetryDelay string
+ SecurityOpt []string
+ ShmSize string
+ Ulimit []string
+ Volumes []string
+}
+
+// GetUserNSFlags returns the common flags for usernamespace
+func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
+ usernsFlags := pflag.FlagSet{}
+ usernsFlags.StringSliceVar(&flags.GroupAdd, "group-add", nil, "add additional groups to the primary container process. 'keep-groups' allows container processes to use supplementary groups.")
+ usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
+ usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace")
+ usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
+ usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
+ return usernsFlags
+}
+
+// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags
+func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["group-add"] = commonComp.AutocompleteNone
+ flagCompletion["userns"] = completion.AutocompleteNamespaceFlag
+ flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName
+ flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName
+ return flagCompletion
+}
+
+// GetNameSpaceFlags returns the common flags for a namespace menu
+func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringVar(&flags.Cgroup, "cgroupns", "", "'private', or 'host'")
+ fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
+ fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
+ fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", "", "`directory` of CNI configuration files")
+ _ = fs.MarkHidden("cni-config-dir")
+ fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", "", "`path` of CNI network plugins")
+ _ = fs.MarkHidden("cni-plugin-path")
+ fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
+ fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
+ return fs
+}
+
+// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
+func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["cgroupns"] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
+ return flagCompletion
+}
+
+// GetLayerFlags returns the common flags for layers
+func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.ForceRm, "force-rm", false, "always remove intermediate containers after a build, even if the build is unsuccessful.")
+ fs.BoolVar(&flags.Layers, "layers", UseLayers(), "use intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")
+ return fs
+}
+
+// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags
+
+// GetBudFlags returns common build flags
+func GetBudFlags(flags *BudResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms")
+ fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
+ fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
+ fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
+ fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
+ fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringArrayVar(&flags.BuildArgFile, "build-arg-file", []string{}, "`argfile.conf` containing lines of argument=value to supply to the builder")
+ fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
+ fs.StringArrayVar(&flags.CacheFrom, "cache-from", []string{}, "remote repository list to utilise as potential cache source.")
+ fs.StringArrayVar(&flags.CacheTo, "cache-to", []string{}, "remote repository list to utilise as potential cache destination.")
+ fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.")
+ fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
+ fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
+ fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ fs.StringVarP(&flags.CWOptions, "cw", "", "", "confidential workload `options`")
+ fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
+ fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP")
+ fs.StringArrayVar(&flags.Envs, "env", []string{}, "set environment variable for the image")
+ fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile")
+ fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file")
+ fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
+ fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
+ fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
+ fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
+ fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])")
+ fs.StringArrayVar(&flags.LayerLabel, "layer-label", []string{}, "set metadata for an intermediate image (default [])")
+ fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
+ fs.BoolVar(&flags.LogSplitByPlatform, "logsplit", false, "split logfile to different files for each platform")
+ fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
+ if err := fs.MarkHidden("loglevel"); err != nil {
+ panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
+ if err := fs.MarkHidden("log-rusage"); err != nil {
+ panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
+ }
+ fs.StringVar(&flags.RusageLogFile, "rusage-logfile", "", "destination file to which rusage should be logged to instead of stdout (= the default).")
+ if err := fs.MarkHidden("rusage-logfile"); err != nil {
+ panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err))
+ }
+ fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist")
+ fs.BoolVar(&flags.NoCache, "no-cache", false, "do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
+ fs.BoolVar(&flags.NoHostname, "no-hostname", false, "do not create new /etc/hostname file for RUN instructions, use the one from the base image.")
+ fs.BoolVar(&flags.NoHosts, "no-hosts", false, "do not create new /etc/hosts file for RUN instructions, use the one from the base image.")
+ fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
+ fs.StringArrayVar(&flags.OSFeatures, "os-feature", []string{}, "set required OS `feature` for the target image in addition to values from the base image")
+ fs.StringVar(&flags.OSVersion, "os-version", "", "set required OS `version` for the target image instead of the value from the base image")
+ fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
+ fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
+ fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
+ if err := fs.MarkHidden("pull-always"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
+ if err := fs.MarkHidden("pull-never"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
+ }
+ fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
+ fs.BoolVar(&flags.OmitHistory, "omit-history", false, "omit build history information from built image")
+ fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label")
+ fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build")
+ // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
+ fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
+ fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build")
+ fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
+ fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := fs.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.SkipUnusedStages, "skip-unused-stages", true, "skips stages in multi-stage builds which do not affect the final target")
+ fs.BoolVar(&flags.Squash, "squash", false, "squash all image layers into a single layer")
+ fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])")
+ fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers")
+ fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
+ fs.StringVarP(&flags.BuildOutput, "output", "o", "", "output destination (format: type=local,dest=path)")
+ fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
+ fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
+ fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ fs.String("variant", "", "override the `variant` of the specified image")
+ fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image")
+ fs.StringSliceVar(&flags.UnsetLabels, "unsetlabel", nil, "unset label when inheriting labels from base image")
+ return fs
+}
+
+// GetBudFlagsCompletions returns the FlagCompletions for the common build flags
+func GetBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["annotation"] = commonComp.AutocompleteNone
+ flagCompletion["arch"] = commonComp.AutocompleteNone
+ flagCompletion["authfile"] = commonComp.AutocompleteDefault
+ flagCompletion["build-arg"] = commonComp.AutocompleteNone
+ flagCompletion["build-arg-file"] = commonComp.AutocompleteDefault
+ flagCompletion["build-context"] = commonComp.AutocompleteNone
+ flagCompletion["cache-from"] = commonComp.AutocompleteNone
+ flagCompletion["cache-to"] = commonComp.AutocompleteNone
+ flagCompletion["cache-ttl"] = commonComp.AutocompleteNone
+ flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
+ flagCompletion["cpp-flag"] = commonComp.AutocompleteNone
+ flagCompletion["creds"] = commonComp.AutocompleteNone
+ flagCompletion["cw"] = commonComp.AutocompleteNone
+ flagCompletion["env"] = commonComp.AutocompleteNone
+ flagCompletion["file"] = commonComp.AutocompleteDefault
+ flagCompletion["format"] = commonComp.AutocompleteNone
+ flagCompletion["from"] = commonComp.AutocompleteDefault
+ flagCompletion["hooks-dir"] = commonComp.AutocompleteNone
+ flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
+ flagCompletion["iidfile"] = commonComp.AutocompleteDefault
+ flagCompletion["jobs"] = commonComp.AutocompleteNone
+ flagCompletion["label"] = commonComp.AutocompleteNone
+ flagCompletion["layer-label"] = commonComp.AutocompleteNone
+ flagCompletion["logfile"] = commonComp.AutocompleteDefault
+ flagCompletion["manifest"] = commonComp.AutocompleteDefault
+ flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["os-feature"] = commonComp.AutocompleteNone
+ flagCompletion["os-version"] = commonComp.AutocompleteNone
+ flagCompletion["output"] = commonComp.AutocompleteNone
+ flagCompletion["pull"] = commonComp.AutocompleteDefault
+ flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
+ flagCompletion["secret"] = commonComp.AutocompleteNone
+ flagCompletion["sign-by"] = commonComp.AutocompleteNone
+ flagCompletion["signature-policy"] = commonComp.AutocompleteNone
+ flagCompletion["ssh"] = commonComp.AutocompleteNone
+ flagCompletion["tag"] = commonComp.AutocompleteNone
+ flagCompletion["target"] = commonComp.AutocompleteNone
+ flagCompletion["timestamp"] = commonComp.AutocompleteNone
+ flagCompletion["unsetenv"] = commonComp.AutocompleteNone
+ flagCompletion["unsetlabel"] = commonComp.AutocompleteNone
+ flagCompletion["variant"] = commonComp.AutocompleteNone
+ return flagCompletion
+}
+
+// GetFromAndBudFlags returns from and build flags
+func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) {
+ fs := pflag.FlagSet{}
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fs, fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
+ fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ if err := fs.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
+ }
+ fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
+ fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
+ fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
+ fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
+ fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
+ fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
+ fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
+ fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
+ fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices.Get(), "additional devices to be used within containers (default [])")
+ fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches.Get(), "set custom DNS search domains")
+ fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers.Get(), "set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
+ fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions.Get(), "set custom DNS options")
+ fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables")
+ fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
+ fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
+ fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
+ fs.IntVar(&flags.Retry, "retry", MaxPullPushRetries, "number of times to retry in case of failure when performing push/pull")
+ fs.StringVar(&flags.RetryDelay, "retry-delay", PullPushRetryDelay.String(), "delay between retries in case of push/pull failures")
+ fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
+ fs.String("os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images")
+ fs.StringSlice("platform", []string{parse.DefaultPlatform()}, "set the `OS/ARCH[/VARIANT]` of the image to the provided value instead of the current operating system and architecture of the host (for example \"linux/arm\")")
+ fs.String("variant", "", "override the `variant` of the specified image")
+ fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
+ fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.")
+ fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits.Get(), "ulimit options")
+ fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Volumes(), "bind mount a volume into the container")
+
+ // Add in the usernamespace and namespaceflags
+ usernsFlags := GetUserNSFlags(usernsResults)
+ namespaceFlags := GetNameSpaceFlags(namespaceResults)
+ fs.AddFlagSet(&usernsFlags)
+ fs.AddFlagSet(&namespaceFlags)
+
+ return fs, nil
+}
+
+// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and build flags
+func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["arch"] = commonComp.AutocompleteNone
+ flagCompletion["add-host"] = commonComp.AutocompleteNone
+ flagCompletion["blob-cache"] = commonComp.AutocompleteNone
+ flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?!
+ flagCompletion["cpu-period"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-quota"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-shares"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone
+ flagCompletion["decryption-key"] = commonComp.AutocompleteNone
+ flagCompletion["device"] = commonComp.AutocompleteDefault
+ flagCompletion["dns-search"] = commonComp.AutocompleteNone
+ flagCompletion["dns"] = commonComp.AutocompleteNone
+ flagCompletion["dns-option"] = commonComp.AutocompleteNone
+ flagCompletion["isolation"] = commonComp.AutocompleteNone
+ flagCompletion["memory"] = commonComp.AutocompleteNone
+ flagCompletion["memory-swap"] = commonComp.AutocompleteNone
+ flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["platform"] = commonComp.AutocompleteNone
+ flagCompletion["retry"] = commonComp.AutocompleteNone
+ flagCompletion["retry-delay"] = commonComp.AutocompleteNone
+ flagCompletion["security-opt"] = commonComp.AutocompleteNone
+ flagCompletion["shm-size"] = commonComp.AutocompleteNone
+ flagCompletion["ulimit"] = commonComp.AutocompleteNone
+ flagCompletion["volume"] = commonComp.AutocompleteDefault
+ flagCompletion["variant"] = commonComp.AutocompleteNone
+
+ // Add in the usernamespace and namespace flag completions
+ userNsComp := GetUserNSFlagsCompletions()
+ for name, comp := range userNsComp {
+ flagCompletion[name] = comp
+ }
+ namespaceComp := GetNameSpaceFlagsCompletions()
+ for name, comp := range namespaceComp {
+ flagCompletion[name] = comp
+ }
+
+ return flagCompletion
+}
+
+// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
+// otherwise it returns false
+func UseLayers() bool {
+ layers := os.Getenv("BUILDAH_LAYERS")
+ if strings.ToLower(layers) == "true" || layers == "1" {
+ return true
+ }
+ return false
+}
+
+// DefaultFormat returns the default image format
+func DefaultFormat() string {
+ format := os.Getenv("BUILDAH_FORMAT")
+ if format != "" {
+ return format
+ }
+ return define.OCI
+}
+
+// DefaultIsolation returns the default image format
+func DefaultIsolation() string {
+ isolation := os.Getenv("BUILDAH_ISOLATION")
+ if isolation != "" {
+ return isolation
+ }
+ if unshare.IsRootless() {
+ return "rootless"
+ }
+ return define.OCI
+}
+
+// DefaultHistory returns the default add-history setting
+func DefaultHistory() bool {
+ history := os.Getenv("BUILDAH_HISTORY")
+ if strings.ToLower(history) == "true" || history == "1" {
+ return true
+ }
+ return false
+}
+
+func VerifyFlagsArgsOrder(args []string) error {
+ for _, arg := range args {
+ if strings.HasPrefix(arg, "-") {
+ return fmt.Errorf("no options (%s) can be specified after the image or container name", arg)
+ }
+ }
+ return nil
+}
+
+// AliasFlags is a function to handle backwards compatibility with old flags
+func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "net":
+ name = "network"
+ case "override-arch":
+ name = "arch"
+ case "override-os":
+ name = "os"
+ case "purge":
+ name = "rm"
+ case "tty":
+ name = "terminal"
+ }
+ return pflag.NormalizedName(name)
+}
+
+// LookupEnvVarReferences returns a copy of specs with keys and values resolved
+// from environ. Strings are in "key=value" form, the same as [os.Environ].
+//
+// - When a string in specs lacks "=", it is treated as a key and the value
+// is retrieved from environ. When the key is missing from environ, neither
+// the key nor value are returned.
+//
+// - When a string in specs lacks "=" and ends with "*", it is treated as
+// a key prefix and any keys with the same prefix in environ are returned.
+//
+// - When a string in specs is exactly "*", all keys and values in environ
+// are returned.
+func LookupEnvVarReferences(specs, environ []string) []string {
+ result := make([]string, 0, len(specs))
+
+ for _, spec := range specs {
+ if key, _, ok := strings.Cut(spec, "="); ok {
+ result = append(result, spec)
+
+ } else if key == "*" {
+ result = append(result, environ...)
+
+ } else {
+ prefix := key + "="
+ if strings.HasSuffix(key, "*") {
+ prefix = strings.TrimSuffix(key, "*")
+ }
+
+ for _, spec := range environ {
+ if strings.HasPrefix(spec, prefix) {
+ result = append(result, spec)
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
+func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
+ var decryptConfig *encconfig.DecryptConfig
+ if len(decryptionKeys) > 0 {
+ // decryption
+ dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
+ if err != nil {
+ return nil, fmt.Errorf("invalid decryption keys: %w", err)
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
+ decryptConfig = cc.DecryptConfig
+ }
+
+ return decryptConfig, nil
+}
+
+// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure
+func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
+ var encLayers *[]int
+ var encConfig *encconfig.EncryptConfig
+
+ if len(encryptionKeys) > 0 {
+ // encryption
+ encLayers = &encryptLayers
+ ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
+ encConfig = cc.EncryptConfig
+ }
+ return encConfig, encLayers, nil
+}
+
+// GetFormat translates format string into either docker or OCI format constant
+func GetFormat(format string) (string, error) {
+ switch format {
+ case define.OCI:
+ return define.OCIv1ImageManifest, nil
+ case define.DOCKER:
+ return define.Dockerv2ImageManifest, nil
+ default:
+ return "", fmt.Errorf("unrecognized image type %q", format)
+ }
+}
diff --git a/pkg/cli/common_test.go b/pkg/cli/common_test.go
new file mode 100644
index 0000000..7449a6c
--- /dev/null
+++ b/pkg/cli/common_test.go
@@ -0,0 +1,142 @@
+package cli
+
+import (
+ "testing"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/pkg/completion"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+func testFlagCompletion(t *testing.T, flags pflag.FlagSet, flagCompletions completion.FlagCompletions) {
+ // lookup if for each flag a flag completion function exists
+ flags.VisitAll(func(f *pflag.Flag) {
+ // skip hidden and deprecated flags
+ if f.Hidden || len(f.Deprecated) > 0 {
+ return
+ }
+ if _, ok := flagCompletions[f.Name]; !ok && f.Value.Type() != "bool" {
+ t.Errorf("Flag %q has no shell completion function set.", f.Name)
+ } else if ok && f.Value.Type() == "bool" {
+ // make sure bool flags don't have a completion function
+ t.Errorf(`Flag %q is a bool flag but has a shell completion function set.
+ You have to remove this shell completion function.`, f.Name)
+ return
+
+ }
+ })
+
+ // make sure no unnecessary flag completion functions are defined
+ for name := range flagCompletions {
+ if flag := flags.Lookup(name); flag == nil {
+ t.Errorf("Flag %q does not exist but has a shell completion function set.", name)
+ }
+ }
+}
+
+func TestUserNsFlagsCompletion(t *testing.T) {
+ flags := GetUserNSFlags(&UserNSResults{})
+ flagCompletions := GetUserNSFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestNameSpaceFlagsCompletion(t *testing.T) {
+ flags := GetNameSpaceFlags(&NameSpaceResults{})
+ flagCompletions := GetNameSpaceFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestBudFlagsCompletion(t *testing.T) {
+ flags := GetBudFlags(&BudResults{})
+ flagCompletions := GetBudFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestFromAndBudFlagsCompletions(t *testing.T) {
+ flags, err := GetFromAndBudFlags(&FromAndBudResults{}, &UserNSResults{}, &NameSpaceResults{})
+ if err != nil {
+ t.Error("Could load the from and build flags.")
+ }
+ flagCompletions := GetFromAndBudFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestLookupEnvVarReferences(t *testing.T) {
+ t.Run("EmptyInput", func(t *testing.T) {
+ assert.Empty(t, LookupEnvVarReferences(nil, nil))
+ assert.Empty(t, LookupEnvVarReferences([]string{}, nil))
+ })
+
+ t.Run("EmptyEnvironment", func(t *testing.T) {
+ assert.Equal(t, []string{"a=b"}, LookupEnvVarReferences([]string{"a=b"}, nil))
+ assert.Equal(t, []string{"a="}, LookupEnvVarReferences([]string{"a="}, nil))
+ assert.Equal(t, []string{}, LookupEnvVarReferences([]string{"a"}, nil))
+ assert.Equal(t, []string{}, LookupEnvVarReferences([]string{"*"}, nil))
+ })
+
+ t.Run("MissingEnvironment", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c="},
+ LookupEnvVarReferences([]string{"a=b", "c="}, []string{"x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b"},
+ LookupEnvVarReferences([]string{"a=b", "c"}, []string{"x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"x=y"}))
+ })
+
+ t.Run("MatchingEnvironment", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c="},
+ LookupEnvVarReferences([]string{"a=b", "c="}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d"},
+ LookupEnvVarReferences([]string{"a=b", "c"}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d", "cg=i"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"c=d", "x=y", "cg=i"}))
+ })
+
+ t.Run("MultipleMatches", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c=d", "cg=i", "c=d", "x=y", "cg=i", "cg=i"},
+ LookupEnvVarReferences([]string{"a=b", "c*", "*", "cg*"}, []string{"c=d", "x=y", "cg=i"}))
+ })
+}
+
+func TestDecryptConfig(t *testing.T) {
+ // Just a smoke test for the default path.
+ res, err := DecryptConfig(nil)
+ assert.NoError(t, err)
+ assert.Nil(t, res)
+}
+
+func TestEncryptConfig(t *testing.T) {
+ // Just a smoke test for the default path.
+ cfg, layers, err := EncryptConfig(nil, nil)
+ assert.NoError(t, err)
+ assert.Nil(t, cfg)
+ assert.Nil(t, layers)
+}
+
+func TestGetFormat(t *testing.T) {
+ _, err := GetFormat("bogus")
+ assert.NotNil(t, err)
+
+ format, err := GetFormat("oci")
+ assert.Nil(t, err)
+ assert.Equalf(t, define.OCIv1ImageManifest, format, "expected oci format but got %v.", format)
+ format, err = GetFormat("docker")
+ assert.Nil(t, err)
+ assert.Equalf(t, define.Dockerv2ImageManifest, format, "expected docker format but got %v.", format)
+}
diff --git a/pkg/cli/exec_codes.go b/pkg/cli/exec_codes.go
new file mode 100644
index 0000000..7ba42e9
--- /dev/null
+++ b/pkg/cli/exec_codes.go
@@ -0,0 +1,13 @@
+package cli
+
+const (
+ // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed
+ // prior to calling the runtime
+ ExecErrorCodeGeneric = 125
+ // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command
+ // an example of this can be found by trying to execute a directory:
+ // `podman exec -l /etc`
+ ExecErrorCodeCannotInvoke = 126
+ // ExecErrorCodeNotFound is the error code to return when a command cannot be found
+ ExecErrorCodeNotFound = 127
+)
diff --git a/pkg/completion/completion.go b/pkg/completion/completion.go
new file mode 100644
index 0000000..a7812d2
--- /dev/null
+++ b/pkg/completion/completion.go
@@ -0,0 +1,23 @@
+package completion
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+/* Autocomplete Functions for cobra ValidArgsFunction */
+
+// AutocompleteNamespaceFlag - Autocomplete the userns flag.
+// -> host, private, container, ns:[path], [path]
+func AutocompleteNamespaceFlag(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var completions []string
+ // If we don't filter on "toComplete", zsh and fish will not do file completion
+ // even if the prefix typed by the user does not match the returned completions
+ for _, comp := range []string{"host", "private", "container", "ns:"} {
+ if strings.HasPrefix(comp, toComplete) {
+ completions = append(completions, comp)
+ }
+ }
+ return completions, cobra.ShellCompDirectiveDefault
+}
diff --git a/pkg/dummy/dummy_test.go b/pkg/dummy/dummy_test.go
new file mode 100644
index 0000000..4326a92
--- /dev/null
+++ b/pkg/dummy/dummy_test.go
@@ -0,0 +1,8 @@
+package dummy
+
+import (
+ "testing"
+)
+
+func TestDummy(t *testing.T) {
+}
diff --git a/pkg/formats/formats.go b/pkg/formats/formats.go
new file mode 100644
index 0000000..676da30
--- /dev/null
+++ b/pkg/formats/formats.go
@@ -0,0 +1,166 @@
+package formats
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "text/tabwriter"
+ "text/template"
+
+ "golang.org/x/term"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ // JSONString const to save on duplicate variable names
+ JSONString = "json"
+ // IDString const to save on duplicates for Go templates
+ IDString = "{{.ID}}"
+
+ parsingErrorStr = "Template parsing error"
+)
+
+// Writer interface for outputs
+type Writer interface {
+ Out() error
+}
+
+// JSONStructArray for JSON output
+type JSONStructArray struct {
+ Output []interface{}
+}
+
+// StdoutTemplateArray for Go template output
+type StdoutTemplateArray struct {
+ Output []interface{}
+ Template string
+ Fields map[string]string
+}
+
+// JSONStruct for JSON output
+type JSONStruct struct {
+ Output interface{}
+}
+
+// StdoutTemplate for Go template output
+type StdoutTemplate struct {
+ Output interface{}
+ Template string
+ Fields map[string]string
+}
+
+// YAMLStruct for YAML output
+type YAMLStruct struct {
+ Output interface{}
+}
+
+func setJSONFormatEncoder(isTerminal bool, w io.Writer) *json.Encoder {
+ enc := json.NewEncoder(w)
+ enc.SetIndent("", " ")
+ if isTerminal {
+ enc.SetEscapeHTML(false)
+ }
+ return enc
+}
+
+// Out method for JSON Arrays
+func (j JSONStructArray) Out() error {
+ buf := bytes.NewBuffer(nil)
+ enc := setJSONFormatEncoder(term.IsTerminal(int(os.Stdout.Fd())), buf)
+ if err := enc.Encode(j.Output); err != nil {
+ return err
+ }
+ data := buf.Bytes()
+
+ // JSON returns a byte array with a literal null [110 117 108 108] in it
+ // if it is passed empty data. We used bytes.Compare to see if that is
+ // the case.
+ if diff := bytes.Compare(data, []byte("null")); diff == 0 {
+ data = []byte("[]")
+ }
+
+ // If the we did get NULL back, we should spit out {} which is
+ // at least valid JSON for the consumer.
+ fmt.Printf("%s", data)
+ humanNewLine()
+ return nil
+}
+
+// Out method for Go templates
+func (t StdoutTemplateArray) Out() error {
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
+ if strings.HasPrefix(t.Template, "table") {
+ // replace any spaces with tabs in template so that tabwriter can align it
+ t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1)
+ headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ err = headerTmpl.Execute(w, t.Fields)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(w, "")
+ }
+ t.Template = strings.Replace(t.Template, " ", "\t", -1)
+ tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ for _, raw := range t.Output {
+ basicTmpl := tmpl.Funcs(basicFunctions)
+ if err := basicTmpl.Execute(w, raw); err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ fmt.Fprintln(w, "")
+ }
+ return w.Flush()
+}
+
+// Out method for JSON struct
+func (j JSONStruct) Out() error {
+ data, err := json.MarshalIndent(j.Output, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s", data)
+ humanNewLine()
+ return nil
+}
+
+//Out method for Go templates
+func (t StdoutTemplate) Out() error {
+ tmpl, err := template.New("image").Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("template parsing error: %w", err)
+ }
+ err = tmpl.Execute(os.Stdout, t.Output)
+ if err != nil {
+ return err
+ }
+ humanNewLine()
+ return nil
+}
+
+// Out method for YAML
+func (y YAMLStruct) Out() error {
+ var buf []byte
+ var err error
+ buf, err = yaml.Marshal(y.Output)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s", string(buf))
+ humanNewLine()
+ return nil
+}
+
+// humanNewLine prints a new line at the end of the output only if stdout is the terminal
+func humanNewLine() {
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ fmt.Println()
+ }
+}
diff --git a/pkg/formats/formats_test.go b/pkg/formats/formats_test.go
new file mode 100644
index 0000000..628da01
--- /dev/null
+++ b/pkg/formats/formats_test.go
@@ -0,0 +1,44 @@
+package formats
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+type ImageData struct {
+ Author string `json:"Author"`
+}
+
+func TestSetJSONFormatEncoder(t *testing.T) {
+ tt := []struct {
+ name string
+ imageData *ImageData
+ expected string
+ isTerminal bool
+ }{
+ {
+ name: "HTML tags are not escaped",
+ imageData: &ImageData{Author: "dave <dave@corp.io>"},
+ expected: `"Author": "dave <dave@corp.io>"`,
+ isTerminal: true,
+ },
+ {
+ name: "HTML tags are escaped",
+ imageData: &ImageData{Author: "dave <dave@corp.io>"},
+ expected: `"Author": "dave \u003cdave@corp.io\u003e"`,
+ isTerminal: false,
+ },
+ }
+
+ for _, tc := range tt {
+ buf := bytes.NewBuffer(nil)
+ enc := setJSONFormatEncoder(tc.isTerminal, buf)
+ if err := enc.Encode(tc.imageData); err != nil {
+ t.Errorf("test %#v failed encoding: %s", tc.name, err)
+ }
+ if !strings.Contains(buf.String(), tc.expected) {
+ t.Errorf("test %#v expected output to contain %#v. Output:\n%v\n", tc.name, tc.expected, buf.String())
+ }
+ }
+}
diff --git a/pkg/formats/templates.go b/pkg/formats/templates.go
new file mode 100644
index 0000000..66f3ba3
--- /dev/null
+++ b/pkg/formats/templates.go
@@ -0,0 +1,82 @@
+package formats
+
+import (
+ "bytes"
+ "encoding/json"
+ "strings"
+ "text/template"
+)
+
+// basicFunctions are the set of initial
+// functions provided to every template.
+var basicFunctions = template.FuncMap{
+ "json": func(v interface{}) string {
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ _ = enc.Encode(v)
+ // Remove the trailing new line added by the encoder
+ return strings.TrimSpace(buf.String())
+ },
+ "split": strings.Split,
+ "join": strings.Join,
+ // strings.Title is deprecated since go 1.18
+ // However for our use case it is still fine. The recommended replacement
+ // is adding about 400kb binary size so lets keep using this for now.
+ //nolint:staticcheck
+ "title": strings.Title,
+ "lower": strings.ToLower,
+ "upper": strings.ToUpper,
+ "pad": padWithSpace,
+ "truncate": truncateWithLength,
+}
+
+// HeaderFunctions are used to created headers of a table.
+// This is a replacement of basicFunctions for header generation
+// because we want the header to remain intact.
+// Some functions like `split` are irrelevant so not added.
+var headerFunctions = template.FuncMap{
+ "json": func(v string) string {
+ return v
+ },
+ "title": func(v string) string {
+ return v
+ },
+ "lower": func(v string) string {
+ return v
+ },
+ "upper": func(v string) string {
+ return v
+ },
+ "truncate": func(v string, l int) string {
+ return v
+ },
+}
+
+// Parse creates a new anonymous template with the basic functions
+// and parses the given format.
+func Parse(format string) (*template.Template, error) {
+ return NewParse("", format)
+}
+
+// NewParse creates a new tagged template with the basic functions
+// and parses the given format.
+func NewParse(tag, format string) (*template.Template, error) {
+ return template.New(tag).Funcs(basicFunctions).Parse(format)
+}
+
+// padWithSpace adds whitespace to the input if the input is non-empty
+func padWithSpace(source string, prefix, suffix int) string {
+ if source == "" {
+ return source
+ }
+ return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix)
+}
+
+// truncateWithLength truncates the source string up to the length provided by the input
+func truncateWithLength(source string, length int) string {
+ if len(source) < length {
+ return source
+ }
+ return source[:length]
+}
diff --git a/pkg/jail/jail.go b/pkg/jail/jail.go
new file mode 100644
index 0000000..fdaca5a
--- /dev/null
+++ b/pkg/jail/jail.go
@@ -0,0 +1,180 @@
+//go:build freebsd
+// +build freebsd
+
+package jail
+
+import (
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type NS int32
+
+const (
+ DISABLED NS = 0
+ NEW NS = 1
+ INHERIT NS = 2
+
+ JAIL_CREATE = 0x01
+ JAIL_UPDATE = 0x02
+ JAIL_ATTACH = 0x04
+)
+
+type config struct {
+ params map[string]interface{}
+}
+
+func NewConfig() *config {
+ return &config{
+ params: make(map[string]interface{}),
+ }
+}
+
+func handleBoolSetting(key string, val bool) (string, interface{}) {
+ // jail doesn't deal with booleans - it uses paired parameter
+ // names, e.g. "persist"/"nopersist". If the key contains '.',
+ // the "no" prefix is applied to the last element.
+ if val == false {
+ parts := strings.Split(key, ".")
+ parts[len(parts)-1] = "no" + parts[len(parts)-1]
+ key = strings.Join(parts, ".")
+ }
+ return key, nil
+}
+
+func (c *config) Set(key string, value interface{}) {
+ // Normalise integer types to int32
+ switch v := value.(type) {
+ case int:
+ value = int32(v)
+ case uint32:
+ value = int32(v)
+ }
+
+ switch key {
+ case "jid", "devfs_ruleset", "enforce_statfs", "children.max", "securelevel":
+ if _, ok := value.(int32); !ok {
+ logrus.Fatalf("value for parameter %s must be an int32", key)
+ }
+ case "ip4", "ip6", "host", "vnet":
+ nsval, ok := value.(NS)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be a jail.NS", key)
+ }
+ if (key == "host" || key == "vnet") && nsval == DISABLED {
+ logrus.Fatalf("value for parameter %s cannot be DISABLED", key)
+ }
+ case "persist", "sysvmsg", "sysvsem", "sysvshm":
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ default:
+ if strings.HasPrefix(key, "allow.") {
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ } else {
+ if _, ok := value.(string); !ok {
+ logrus.Fatalf("value for parameter %s must be a string", key)
+ }
+ }
+ }
+ c.params[key] = value
+}
+
+func (c *config) getIovec() ([]syscall.Iovec, error) {
+ jiov := make([]syscall.Iovec, 0)
+ for key, value := range c.params {
+ iov, err := stringToIovec(key)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ switch v := value.(type) {
+ case string:
+ iov, err := stringToIovec(v)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ case int32:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ case NS:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ default:
+ jiov = append(jiov, syscall.Iovec{
+ Base: nil,
+ Len: 0,
+ })
+ }
+ }
+ return jiov, nil
+}
+
+type jail struct {
+ jid int32
+}
+
+func jailSet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_SET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func jailGet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_GET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func Create(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE)
+}
+
+func CreateAndAttach(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE|JAIL_ATTACH)
+}
+
+func FindByName(name string) (*jail, error) {
+ jconf := NewConfig()
+ jconf.Set("name", name)
+ return jailGet(jconf, 0)
+}
+
+func (j *jail) Set(jconf *config) error {
+ jconf.Set("jid", j.jid)
+ _, err := jailSet(jconf, JAIL_UPDATE)
+ return err
+}
diff --git a/pkg/jail/jail_int32.go b/pkg/jail/jail_int32.go
new file mode 100644
index 0000000..3e56bb6
--- /dev/null
+++ b/pkg/jail/jail_int32.go
@@ -0,0 +1,20 @@
+//go:build (386 || arm) && freebsd
+// +build 386 arm
+// +build freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint32(len(bs))
+ return res, nil
+}
diff --git a/pkg/jail/jail_int64.go b/pkg/jail/jail_int64.go
new file mode 100644
index 0000000..dace13f
--- /dev/null
+++ b/pkg/jail/jail_int64.go
@@ -0,0 +1,19 @@
+//go:build !(386 || arm) && freebsd
+// +build !386,!arm,freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint64(len(bs))
+ return res, nil
+}
diff --git a/pkg/manifests/compat.go b/pkg/manifests/compat.go
new file mode 100644
index 0000000..dfb63b3
--- /dev/null
+++ b/pkg/manifests/compat.go
@@ -0,0 +1,28 @@
+// This package is deprecated. Its functionality has been moved to
+// github.com/containers/common/pkg/manifests, which provides the same API.
+// The stubs and aliases here are present for compatibility with older code.
+// New implementations should use github.com/containers/common/pkg/manifests
+// directly.
+package manifests
+
+import "github.com/containers/common/pkg/manifests"
+
+// List is an alias for github.com/containers/common/pkg/manifests.List.
+type List = manifests.List
+
+var (
+ // ErrDigestNotFound is an alias for github.com/containers/common/pkg/manifests.ErrDigestNotFound.
+ ErrDigestNotFound = manifests.ErrDigestNotFound
+ // ErrManifestTypeNotSupported is an alias for github.com/containers/common/pkg/manifests.ErrManifestTypeNotSupported.
+ ErrManifestTypeNotSupported = manifests.ErrManifestTypeNotSupported
+)
+
+// Create wraps github.com/containers/common/pkg/manifests.Create().
+func Create() List {
+ return manifests.Create()
+}
+
+// FromBlob wraps github.com/containers/common/pkg/manifests.FromBlob().
+func FromBlob(manifestBytes []byte) (List, error) {
+ return manifests.FromBlob(manifestBytes)
+}
diff --git a/pkg/overlay/overlay.go b/pkg/overlay/overlay.go
new file mode 100644
index 0000000..e416ecd
--- /dev/null
+++ b/pkg/overlay/overlay.go
@@ -0,0 +1,242 @@
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "errors"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// Options type holds various configuration options for overlay
+// MountWithOptions accepts following type so it is easier to specify
+// more verbose configuration for overlay mount.
+type Options struct {
+ // The Upper directory is normally writable layer in an overlay mount.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to UpperDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behaviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ UpperDirOptionFragment string
+ // The Workdir is used to prepare files as they are switched between the layers.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to WorkDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behaviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ WorkDirOptionFragment string
+ // Graph options relayed from podman, will be responsible for choosing mount program
+ GraphOpts []string
+ // Mark if following overlay is read only
+ ReadOnly bool
+ // RootUID is not used yet but keeping it here for legacy reasons.
+ RootUID int
+ // RootGID is not used yet but keeping it here for legacy reasons.
+ RootGID int
+}
+
+// TempDir generates an overlay Temp directory in the container content
+func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
+ contentDir := filepath.Join(containerDir, "overlay")
+ if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
+ }
+
+ contentDir, err := os.MkdirTemp(contentDir, "")
+ if err != nil {
+ return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
+ }
+
+ return generateOverlayStructure(contentDir, rootUID, rootGID)
+}
+
+// GenerateStructure generates an overlay directory structure for container content
+func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) {
+ contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name)
+ if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
+ }
+
+ return generateOverlayStructure(contentDir, rootUID, rootGID)
+}
+
+// generateOverlayStructure generates upper, work and merge directory structure for overlay directory
+func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string, error) {
+ upperDir := filepath.Join(containerDir, "upper")
+ workDir := filepath.Join(containerDir, "work")
+ if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err)
+ }
+ if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err)
+ }
+ mergeDir := filepath.Join(containerDir, "merge")
+ if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err)
+ }
+
+ return containerDir, nil
+}
+
+// Mount creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
+}
+
+// MountReadOnly creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller. Note that no
+// upper layer will be created rendering it a read-only mount
+func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
+}
+
+// findMountProgram finds if any mount program is specified in the graph options.
+func findMountProgram(graphOptions []string) string {
+ mountMap := map[string]bool{
+ ".mount_program": true,
+ "overlay.mount_program": true,
+ "overlay2.mount_program": true,
+ }
+
+ for _, i := range graphOptions {
+ s := strings.SplitN(i, "=", 2)
+ if len(s) != 2 {
+ continue
+ }
+ key := s[0]
+ val := s[1]
+ if mountMap[key] {
+ return val
+ }
+ }
+
+ return ""
+}
+
+// mountWithMountProgram mount an overlay at mergeDir using the specified mount program
+// and overlay options.
+func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error {
+ cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir)
+
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("exec %s: %w", mountProgram, err)
+ }
+ return nil
+}
+
+// Convert ":" to "\:", the path which will be overlay mounted need to be escaped
+func escapeColon(source string) string {
+ return strings.ReplaceAll(source, ":", "\\:")
+}
+
+// RemoveTemp removes temporary mountpoint and all content from its parent
+// directory
+func RemoveTemp(contentDir string) error {
+ if err := Unmount(contentDir); err != nil {
+ return err
+ }
+
+ return os.RemoveAll(contentDir)
+}
+
+// Unmount the overlay mountpoint
+func Unmount(contentDir string) error {
+ mergeDir := filepath.Join(contentDir, "merge")
+
+ if unshare.IsRootless() {
+ // Attempt to unmount the FUSE mount using either fusermount or fusermount3.
+ // If they fail, fallback to unix.Unmount
+ for _, v := range []string{"fusermount3", "fusermount"} {
+ err := exec.Command(v, "-u", mergeDir).Run()
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
+ logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err)
+ }
+ if err == nil {
+ return nil
+ }
+ }
+ // If fusermount|fusermount3 failed to unmount the FUSE file system, attempt unmount
+ }
+
+ // Ignore EINVAL as the specified merge dir is not a mount point
+ if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL {
+ return fmt.Errorf("unmount overlay %s: %w", mergeDir, err)
+ }
+ return nil
+}
+
+func recreate(contentDir string) error {
+ st, err := system.Stat(contentDir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("failed to stat overlay upper directory: %w", err)
+ }
+
+ if err := os.RemoveAll(contentDir); err != nil {
+ return err
+ }
+
+ if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil {
+ return fmt.Errorf("failed to create overlay directory: %w", err)
+ }
+ return nil
+}
+
+// CleanupMount removes all temporary mountpoint content
+func CleanupMount(contentDir string) (Err error) {
+ if err := recreate(filepath.Join(contentDir, "upper")); err != nil {
+ return err
+ }
+ if err := recreate(filepath.Join(contentDir, "work")); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CleanupContent removes all temporary mountpoint and all content from
+// directory
+func CleanupContent(containerDir string) (Err error) {
+ contentDir := filepath.Join(containerDir, "overlay")
+
+ files, err := os.ReadDir(contentDir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("read directory: %w", err)
+ }
+ for _, f := range files {
+ dir := filepath.Join(contentDir, f.Name())
+ if err := Unmount(dir); err != nil {
+ return err
+ }
+ }
+
+ if err := os.RemoveAll(contentDir); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("failed to cleanup overlay directory: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/overlay/overlay_freebsd.go b/pkg/overlay/overlay_freebsd.go
new file mode 100644
index 0000000..e814a32
--- /dev/null
+++ b/pkg/overlay/overlay_freebsd.go
@@ -0,0 +1,31 @@
+package overlay
+
+import (
+ //"fmt"
+ //"os"
+ //"path/filepath"
+ //"strings"
+ //"syscall"
+ "errors"
+
+ //"github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// MountWithOptions creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+// But allows api to set custom workdir, upperdir and other overlay options
+// Following API is being used by podman at the moment
+func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
+ if opts.ReadOnly {
+ // Read-only overlay mounts can be simulated with nullfs
+ mount.Source = source
+ mount.Destination = dest
+ mount.Type = "nullfs"
+ mount.Options = []string{"ro"}
+ return mount, nil
+ } else {
+ return mount, errors.New("read/write overlay mounts not supported on freebsd")
+ }
+}
diff --git a/pkg/overlay/overlay_linux.go b/pkg/overlay/overlay_linux.go
new file mode 100644
index 0000000..9bd72bc
--- /dev/null
+++ b/pkg/overlay/overlay_linux.go
@@ -0,0 +1,80 @@
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// MountWithOptions creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+// But allows api to set custom workdir, upperdir and other overlay options
+// Following API is being used by podman at the moment
+func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
+ mergeDir := filepath.Join(contentDir, "merge")
+
+ // Create overlay mount options for rw/ro.
+ var overlayOptions string
+ if opts.ReadOnly {
+ // Read-only overlay mounts require two lower layer.
+ lowerTwo := filepath.Join(contentDir, "lower")
+ if err := os.Mkdir(lowerTwo, 0755); err != nil {
+ return mount, err
+ }
+ overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
+ } else {
+ // Read-write overlay mounts want a lower, upper and a work layer.
+ workDir := filepath.Join(contentDir, "work")
+ upperDir := filepath.Join(contentDir, "upper")
+
+ if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
+ workDir = opts.WorkDirOptionFragment
+ upperDir = opts.UpperDirOptionFragment
+ }
+
+ st, err := os.Stat(source)
+ if err != nil {
+ return mount, err
+ }
+ if err := os.Chmod(upperDir, st.Mode()); err != nil {
+ return mount, err
+ }
+ if stat, ok := st.Sys().(*syscall.Stat_t); ok {
+ if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
+ return mount, err
+ }
+ }
+ overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
+ }
+
+ mountProgram := findMountProgram(opts.GraphOpts)
+ if mountProgram != "" {
+ if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
+ return mount, err
+ }
+
+ mount.Source = mergeDir
+ mount.Destination = dest
+ mount.Type = "bind"
+ mount.Options = []string{"bind", "slave"}
+ return mount, nil
+ }
+
+ if unshare.IsRootless() {
+ /* If a mount_program is not specified, fallback to try mounting native overlay. */
+ overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
+ }
+
+ mount.Source = mergeDir
+ mount.Destination = dest
+ mount.Type = "overlay"
+ mount.Options = strings.Split(overlayOptions, ",")
+
+ return mount, nil
+}
diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go
new file mode 100644
index 0000000..d865f50
--- /dev/null
+++ b/pkg/parse/parse.go
@@ -0,0 +1,1198 @@
+package parse
+
+// this package should contain functions that parse and validate
+// user input and is shared either amongst buildah subcommands or
+// would be useful to projects vendoring buildah
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containers/buildah/define"
+ mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
+ internalParse "github.com/containers/buildah/internal/parse"
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/parse"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/unshare"
+ storageTypes "github.com/containers/storage/types"
+ securejoin "github.com/cyphar/filepath-securejoin"
+ units "github.com/docker/go-units"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/term"
+)
+
+const (
+ // SeccompDefaultPath defines the default seccomp path
+ SeccompDefaultPath = config.SeccompDefaultPath
+ // SeccompOverridePath if this exists it overrides the default seccomp path
+ SeccompOverridePath = config.SeccompOverridePath
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+ // TypeCache is the type for mounting a common persistent cache from host
+ TypeCache = "cache"
+ // mount=type=cache must create a persistent directory on host so it's available for all consecutive builds.
+ // Lifecycle of following directory will be inherited from how host machine treats temporary directory
+ BuildahCacheDir = "buildah-cache"
+)
+
+// RepoNamesToNamedReferences parse the raw string to Named reference
+func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
+ var result []reference.Named
+ for _, dest := range destList {
+ named, err := reference.ParseNormalizedNamed(dest)
+ if err != nil {
+ return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err)
+ }
+ if !reference.IsNameOnly(named) {
+ return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named)
+ }
+ result = append(result, named)
+ }
+ return result, nil
+}
+
+// CommonBuildOptions parses the build options from the bud cli
+func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
+ return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
+}
+
+// CommonBuildOptionsFromFlagSet parses the build options from the bud cli
+func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) {
+ var (
+ memoryLimit int64
+ memorySwap int64
+ noDNS bool
+ err error
+ )
+
+ memVal, _ := flags.GetString("memory")
+ if memVal != "" {
+ memoryLimit, err = units.RAMInBytes(memVal)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
+ }
+ }
+
+ memSwapValue, _ := flags.GetString("memory-swap")
+ if memSwapValue != "" {
+ if memSwapValue == "-1" {
+ memorySwap = -1
+ } else {
+ memorySwap, err = units.RAMInBytes(memSwapValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value for memory-swap: %w", err)
+ }
+ }
+ }
+
+ noHostname, _ := flags.GetBool("no-hostname")
+ noHosts, _ := flags.GetBool("no-hosts")
+
+ addHost, _ := flags.GetStringSlice("add-host")
+ if len(addHost) > 0 {
+ if noHosts {
+ return nil, errors.New("--no-hosts and --add-host conflict, can not be used together")
+ }
+ for _, host := range addHost {
+ if err := validateExtraHost(host); err != nil {
+ return nil, fmt.Errorf("invalid value for add-host: %w", err)
+ }
+ }
+ }
+
+ noDNS = false
+ dnsServers := []string{}
+ if flags.Changed("dns") {
+ dnsServers, _ = flags.GetStringSlice("dns")
+ for _, server := range dnsServers {
+ if strings.ToLower(server) == "none" {
+ noDNS = true
+ }
+ }
+ if noDNS && len(dnsServers) > 1 {
+ return nil, errors.New("invalid --dns, --dns=none may not be used with any other --dns options")
+ }
+ }
+
+ dnsSearch := []string{}
+ if flags.Changed("dns-search") {
+ dnsSearch, _ = flags.GetStringSlice("dns-search")
+ if noDNS && len(dnsSearch) > 0 {
+ return nil, errors.New("invalid --dns-search, --dns-search may not be used with --dns=none")
+ }
+ }
+
+ dnsOptions := []string{}
+ if flags.Changed("dns-option") {
+ dnsOptions, _ = flags.GetStringSlice("dns-option")
+ if noDNS && len(dnsOptions) > 0 {
+ return nil, errors.New("invalid --dns-option, --dns-option may not be used with --dns=none")
+ }
+ }
+
+ if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil {
+ return nil, fmt.Errorf("invalid --shm-size: %w", err)
+ }
+ volumes, _ := flags.GetStringArray("volume")
+ cpuPeriod, _ := flags.GetUint64("cpu-period")
+ cpuQuota, _ := flags.GetInt64("cpu-quota")
+ cpuShares, _ := flags.GetUint64("cpu-shares")
+ httpProxy, _ := flags.GetBool("http-proxy")
+ identityLabel, _ := flags.GetBool("identity-label")
+ omitHistory, _ := flags.GetBool("omit-history")
+
+ ulimit := []string{}
+ if flags.Changed("ulimit") {
+ ulimit, _ = flags.GetStringSlice("ulimit")
+ }
+
+ secrets, _ := flags.GetStringArray("secret")
+ sshsources, _ := flags.GetStringArray("ssh")
+ ociHooks, _ := flags.GetStringArray("hooks-dir")
+
+ commonOpts := &define.CommonBuildOptions{
+ AddHost: addHost,
+ CPUPeriod: cpuPeriod,
+ CPUQuota: cpuQuota,
+ CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(),
+ CPUSetMems: findFlagFunc("cpuset-mems").Value.String(),
+ CPUShares: cpuShares,
+ CgroupParent: findFlagFunc("cgroup-parent").Value.String(),
+ DNSOptions: dnsOptions,
+ DNSSearch: dnsSearch,
+ DNSServers: dnsServers,
+ HTTPProxy: httpProxy,
+ IdentityLabel: types.NewOptionalBool(identityLabel),
+ Memory: memoryLimit,
+ MemorySwap: memorySwap,
+ NoHostname: noHostname,
+ NoHosts: noHosts,
+ OmitHistory: omitHistory,
+ ShmSize: findFlagFunc("shm-size").Value.String(),
+ Ulimit: ulimit,
+ Volumes: volumes,
+ Secrets: secrets,
+ SSHSources: sshsources,
+ OCIHooksDir: ociHooks,
+ }
+ securityOpts, _ := flags.GetStringArray("security-opt")
+ if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
+ return nil, err
+ }
+ return commonOpts, nil
+}
+
+// GetAdditionalBuildContext consumes raw string and returns parsed AdditionalBuildContext
+func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, error) {
+ ret := define.AdditionalBuildContext{IsURL: false, IsImage: false, Value: value}
+ if strings.HasPrefix(value, "docker-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker-image://")
+ } else if strings.HasPrefix(value, "container-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "container-image://")
+ } else if strings.HasPrefix(value, "docker://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker://")
+ } else if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
+ ret.IsImage = false
+ ret.IsURL = true
+ } else {
+ path, err := filepath.Abs(value)
+ if err != nil {
+ return define.AdditionalBuildContext{}, fmt.Errorf("unable to convert additional build-context %q path to absolute: %w", value, err)
+ }
+ ret.Value = path
+ }
+ return ret, nil
+}
+
+func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
+ for _, opt := range securityOpts {
+ if opt == "no-new-privileges" {
+ commonOpts.NoNewPrivileges = true
+ continue
+ }
+
+ con := strings.SplitN(opt, "=", 2)
+ if len(con) != 2 {
+ return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
+ }
+ switch con[0] {
+ case "label":
+ commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1])
+ case "apparmor":
+ commonOpts.ApparmorProfile = con[1]
+ case "seccomp":
+ commonOpts.SeccompProfilePath = con[1]
+ default:
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
+ }
+
+ }
+
+ if commonOpts.SeccompProfilePath == "" {
+ if _, err := os.Stat(SeccompOverridePath); err == nil {
+ commonOpts.SeccompProfilePath = SeccompOverridePath
+ } else {
+ if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ if _, err := os.Stat(SeccompDefaultPath); err != nil {
+ if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ } else {
+ commonOpts.SeccompProfilePath = SeccompDefaultPath
+ }
+ }
+ }
+ return nil
+}
+
+// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator
+func SplitStringWithColonEscape(str string) []string {
+ return internalParse.SplitStringWithColonEscape(str)
+}
+
+// Volume parses the input of --volume
+func Volume(volume string) (specs.Mount, error) {
+ return internalParse.Volume(volume)
+}
+
+// Volumes validates the host and container paths passed in to the --volume flag
+func Volumes(volumes []string) error {
+ if len(volumes) == 0 {
+ return nil
+ }
+ for _, volume := range volumes {
+ if _, err := Volume(volume); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ValidateVolumeHostDir validates a volume mount's source directory
+func ValidateVolumeHostDir(hostDir string) error {
+ return parse.ValidateVolumeHostDir(hostDir)
+}
+
+// ValidateVolumeCtrDir validates a volume mount's destination directory.
+func ValidateVolumeCtrDir(ctrDir string) error {
+ return parse.ValidateVolumeCtrDir(ctrDir)
+}
+
+// ValidateVolumeOpts validates a volume's options
+func ValidateVolumeOpts(options []string) ([]string, error) {
+ return parse.ValidateVolumeOpts(options)
+}
+
+// validateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
+// for add-host flag
+func validateExtraHost(val string) error {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := validateIPAddress(arr[1]); err != nil {
+ return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return nil
+}
+
+// validateIPAddress validates an Ip address.
+// for dns, ip, and ip6 flags also
+func validateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// SystemContextFromOptions returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
+ return SystemContextFromFlagSet(c.Flags(), c.Flag)
+}
+
+// SystemContextFromFlagSet returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) {
+ certDir, err := flags.GetString("cert-dir")
+ if err != nil {
+ certDir = ""
+ }
+ ctx := &types.SystemContext{
+ DockerCertPath: certDir,
+ }
+ tlsVerify, err := flags.GetBool("tls-verify")
+ if err == nil && findFlagFunc("tls-verify").Changed {
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
+ ctx.OCIInsecureSkipTLSVerify = !tlsVerify
+ ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
+ }
+ insecure, err := flags.GetBool("insecure")
+ if err == nil && findFlagFunc("insecure").Changed {
+ if ctx.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ return nil, errors.New("--insecure may not be used with --tls-verify")
+ }
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(insecure)
+ ctx.OCIInsecureSkipTLSVerify = insecure
+ ctx.DockerDaemonInsecureSkipTLSVerify = insecure
+ }
+ disableCompression, err := flags.GetBool("disable-compression")
+ if err == nil {
+ if disableCompression {
+ ctx.OCIAcceptUncompressedLayers = true
+ } else {
+ ctx.DirForceCompress = true
+ }
+ }
+ creds, err := flags.GetString("creds")
+ if err == nil && findFlagFunc("creds").Changed {
+ var err error
+ ctx.DockerAuthConfig, err = AuthConfig(creds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ sigPolicy, err := flags.GetString("signature-policy")
+ if err == nil && findFlagFunc("signature-policy").Changed {
+ ctx.SignaturePolicyPath = sigPolicy
+ }
+ authfile, err := flags.GetString("authfile")
+ if err == nil {
+ ctx.AuthFilePath = getAuthFile(authfile)
+ }
+ regConf, err := flags.GetString("registries-conf")
+ if err == nil && findFlagFunc("registries-conf").Changed {
+ ctx.SystemRegistriesConfPath = regConf
+ }
+ regConfDir, err := flags.GetString("registries-conf-dir")
+ if err == nil && findFlagFunc("registries-conf-dir").Changed {
+ ctx.RegistriesDirPath = regConfDir
+ }
+ shortNameAliasConf, err := flags.GetString("short-name-alias-conf")
+ if err == nil && findFlagFunc("short-name-alias-conf").Changed {
+ ctx.UserShortNameAliasConfPath = shortNameAliasConf
+ }
+ ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version)
+ if findFlagFunc("os") != nil && findFlagFunc("os").Changed {
+ var os string
+ if os, err = flags.GetString("os"); err != nil {
+ return nil, err
+ }
+ ctx.OSChoice = os
+ }
+ if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed {
+ var arch string
+ if arch, err = flags.GetString("arch"); err != nil {
+ return nil, err
+ }
+ ctx.ArchitectureChoice = arch
+ }
+ if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed {
+ var variant string
+ if variant, err = flags.GetString("variant"); err != nil {
+ return nil, err
+ }
+ ctx.VariantChoice = variant
+ }
+ if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed {
+ var specs []string
+ if specs, err = flags.GetStringSlice("platform"); err != nil {
+ return nil, err
+ }
+ if len(specs) == 0 || specs[0] == "" {
+ return nil, fmt.Errorf("unable to parse --platform value %v", specs)
+ }
+ platform := specs[0]
+ os, arch, variant, err := Platform(platform)
+ if err != nil {
+ return nil, err
+ }
+ if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" {
+ return nil, errors.New("invalid --platform may not be used with --os, --arch, or --variant")
+ }
+ ctx.OSChoice = os
+ ctx.ArchitectureChoice = arch
+ ctx.VariantChoice = variant
+ }
+
+ ctx.BigFilesTemporaryDir = GetTempDir()
+ return ctx, nil
+}
+
+func getAuthFile(authfile string) string {
+ if authfile != "" {
+ absAuthfile, err := filepath.Abs(authfile)
+ if err == nil {
+ return absAuthfile
+ }
+ logrus.Warnf("ignoring passed-in auth file path, evaluating it: %v", err)
+ }
+ return auth.GetDefaultAuthFile()
+}
+
+// PlatformFromOptions parses the operating system (os) and architecture (arch)
+// from the provided command line options. Deprecated in favor of
+// PlatformsFromOptions(), but kept here because it's part of our API.
+func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) {
+ platforms, err := PlatformsFromOptions(c)
+ if err != nil {
+ return "", "", err
+ }
+ if len(platforms) < 1 {
+ return "", "", errors.New("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
+ }
+ return platforms[0].OS, platforms[0].Arch, nil
+}
+
+// PlatformsFromOptions parses the operating system (os) and architecture
+// (arch) from the provided command line options. If --platform used, it
+// also returns the list of platforms that were passed in as its argument.
+func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Variant string }, err error) {
+ var os, arch, variant string
+ if c.Flag("os").Changed {
+ if os, err = c.Flags().GetString("os"); err != nil {
+ return nil, err
+ }
+ }
+ if c.Flag("arch").Changed {
+ if arch, err = c.Flags().GetString("arch"); err != nil {
+ return nil, err
+ }
+ }
+ if c.Flag("variant").Changed {
+ if variant, err = c.Flags().GetString("variant"); err != nil {
+ return nil, err
+ }
+ }
+ platforms = []struct{ OS, Arch, Variant string }{{os, arch, variant}}
+ if c.Flag("platform").Changed {
+ platforms = nil
+ platformSpecs, err := c.Flags().GetStringSlice("platform")
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse platform: %w", err)
+ }
+ if os != "" || arch != "" || variant != "" {
+ return nil, fmt.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
+ }
+ for _, pf := range platformSpecs {
+ if os, arch, variant, err = Platform(pf); err != nil {
+ return nil, fmt.Errorf("unable to parse platform %q: %w", pf, err)
+ }
+ platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant})
+ }
+ }
+ return platforms, nil
+}
+
+// DefaultPlatform returns the standard platform for the current system
+func DefaultPlatform() string {
+ return platforms.DefaultString()
+}
+
+// Platform separates the platform string into os, arch and variant,
+// accepting any of $arch, $os/$arch, or $os/$arch/$variant.
+func Platform(platform string) (os, arch, variant string, err error) {
+ platform = strings.Trim(platform, "/")
+ if platform == "local" || platform == "" {
+ return Platform(DefaultPlatform())
+ }
+ platformSpec, err := platforms.Parse(platform)
+ if err != nil {
+ return "", "", "", fmt.Errorf("invalid platform syntax for --platform=%q: %w", platform, err)
+ }
+ return platformSpec.OS, platformSpec.Architecture, platformSpec.Variant, nil
+}
+
+func parseCreds(creds string) (string, string) {
+ if creds == "" {
+ return "", ""
+ }
+ up := strings.SplitN(creds, ":", 2)
+ if len(up) == 1 {
+ return up[0], ""
+ }
+ if up[0] == "" {
+ return "", up[1]
+ }
+ return up[0], up[1]
+}
+
+// AuthConfig parses the creds in format [username[:password] into an auth
+// config.
+func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
+ username, password := parseCreds(creds)
+ if username == "" {
+ fmt.Print("Username: ")
+ fmt.Scanln(&username)
+ }
+ if password == "" {
+ fmt.Print("Password: ")
+ termPassword, err := term.ReadPassword(0)
+ if err != nil {
+ return nil, fmt.Errorf("could not read password from terminal: %w", err)
+ }
+ password = string(termPassword)
+ }
+
+ return &types.DockerAuthConfig{
+ Username: username,
+ Password: password,
+ }, nil
+}
+
+// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag.
+// Takes `buildOutput` as string and returns BuildOutputOption
+func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
+ if len(buildOutput) == 1 && buildOutput == "-" {
+ // Feature parity with buildkit, output tar to stdout
+ // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs
+ return define.BuildOutputOption{Path: "",
+ IsDir: false,
+ IsStdout: true}, nil
+ }
+ if !strings.Contains(buildOutput, ",") {
+ // expect default --output <dirname>
+ return define.BuildOutputOption{Path: buildOutput,
+ IsDir: true,
+ IsStdout: false}, nil
+ }
+ isDir := true
+ isStdout := false
+ typeSelected := false
+ pathSelected := false
+ path := ""
+ tokens := strings.Split(buildOutput, ",")
+ for _, option := range tokens {
+ arr := strings.SplitN(option, "=", 2)
+ if len(arr) != 2 {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput)
+ }
+ switch arr[0] {
+ case "type":
+ if typeSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
+ }
+ typeSelected = true
+ if arr[1] == "local" {
+ isDir = true
+ } else if arr[1] == "tar" {
+ isDir = false
+ } else {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput)
+ }
+ case "dest":
+ if pathSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
+ }
+ pathSelected = true
+ path = arr[1]
+ default:
+ return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput)
+ }
+ }
+
+ if !typeSelected || !pathSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput)
+ }
+
+ if path == "-" {
+ if isDir {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput)
+ }
+ return define.BuildOutputOption{Path: "",
+ IsDir: false,
+ IsStdout: true}, nil
+ }
+
+ return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil
+}
+
+// TeeType parses a string value and returns a TeeType
+func TeeType(teeType string) define.TeeType {
+ return define.TeeType(strings.ToLower(teeType))
+}
+
+// GetConfidentialWorkloadOptions parses a confidential workload settings
+// argument, which controls both whether or not we produce an image that
+// expects to be run using krun, and how we handle things like encrypting
+// the disk image that the container image will contain.
+func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOptions, error) {
+ options := define.ConfidentialWorkloadOptions{
+ TempDir: GetTempDir(),
+ }
+ defaults := options
+ for _, option := range strings.Split(arg, ",") {
+ var err error
+ switch {
+ case strings.HasPrefix(option, "type="):
+ options.TeeType = TeeType(strings.TrimPrefix(option, "type="))
+ switch options.TeeType {
+ case define.SEV, define.SNP, mkcwtypes.SEV_NO_ES:
+ default:
+ return options, fmt.Errorf("parsing type= value %q: unrecognized value", options.TeeType)
+ }
+ case strings.HasPrefix(option, "attestation_url="), strings.HasPrefix(option, "attestation-url="):
+ options.Convert = true
+ options.AttestationURL = strings.TrimPrefix(option, "attestation_url=")
+ if options.AttestationURL == option {
+ options.AttestationURL = strings.TrimPrefix(option, "attestation-url=")
+ }
+ case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="):
+ options.Convert = true
+ options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=")
+ case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="):
+ options.WorkloadID = strings.TrimPrefix(option, "workload_id=")
+ if options.WorkloadID == option {
+ options.WorkloadID = strings.TrimPrefix(option, "workload-id=")
+ }
+ case strings.HasPrefix(option, "cpus="):
+ options.CPUs, err = strconv.Atoi(strings.TrimPrefix(option, "cpus="))
+ if err != nil {
+ return options, fmt.Errorf("parsing cpus= value %q: %w", strings.TrimPrefix(option, "cpus="), err)
+ }
+ case strings.HasPrefix(option, "memory="):
+ options.Memory, err = strconv.Atoi(strings.TrimPrefix(option, "memory="))
+ if err != nil {
+ return options, fmt.Errorf("parsing memory= value %q: %w", strings.TrimPrefix(option, "memorys"), err)
+ }
+ case option == "ignore_attestation_errors", option == "ignore-attestation-errors":
+ options.IgnoreAttestationErrors = true
+ case strings.HasPrefix(option, "ignore_attestation_errors="), strings.HasPrefix(option, "ignore-attestation-errors="):
+ val := strings.TrimPrefix(option, "ignore_attestation_errors=")
+ if val == option {
+ val = strings.TrimPrefix(option, "ignore-attestation-errors=")
+ }
+ options.IgnoreAttestationErrors = val == "true" || val == "yes" || val == "on" || val == "1"
+ case strings.HasPrefix(option, "firmware-library="), strings.HasPrefix(option, "firmware_library="):
+ val := strings.TrimPrefix(option, "firmware-library=")
+ if val == option {
+ val = strings.TrimPrefix(option, "firmware_library=")
+ }
+ options.FirmwareLibrary = val
+ case strings.HasPrefix(option, "slop="):
+ options.Slop = strings.TrimPrefix(option, "slop=")
+ default:
+ knownOptions := []string{"type", "attestation_url", "passphrase", "workload_id", "cpus", "memory", "firmware_library", "slop"}
+ return options, fmt.Errorf("expected one or more of %q as arguments for --cw, not %q", knownOptions, option)
+ }
+ }
+ if options != defaults && !options.Convert {
+ return options, fmt.Errorf("--cw arguments missing one or more of (%q, %q)", "passphrase", "attestation_url")
+ }
+ return options, nil
+}
+
+// IDMappingOptions parses the build options related to user namespaces and ID mapping.
+func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
+}
+
+// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically
+// a user namespace.
+func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
+ parts := strings.SplitN(base, ":", 2)
+ if parts[0] != "auto" {
+ return nil, errors.New("wrong user namespace mode")
+ }
+ options := storageTypes.AutoUserNsOptions{}
+ if len(parts) == 1 {
+ return &options, nil
+ }
+ for _, o := range strings.Split(parts[1], ",") {
+ v := strings.SplitN(o, "=", 2)
+ if len(v) != 2 {
+ return nil, fmt.Errorf("invalid option specified: %q", o)
+ }
+ switch v[0] {
+ case "size":
+ s, err := strconv.ParseUint(v[1], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ options.Size = uint32(s)
+ case "uidmapping":
+ mapping, err := storageTypes.ParseIDMapping([]string{v[1]}, nil, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...)
+ case "gidmapping":
+ mapping, err := storageTypes.ParseIDMapping(nil, []string{v[1]}, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...)
+ default:
+ return nil, fmt.Errorf("unknown option specified: %q", v[0])
+ }
+ }
+ return &options, nil
+}
+
+// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping.
+func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ isAuto := false
+ autoOpts := &storageTypes.AutoUserNsOptions{}
+ user := findFlagFunc("userns-uid-map-user").Value.String()
+ group := findFlagFunc("userns-gid-map-group").Value.String()
+ // If only the user or group was specified, use the same value for the
+ // other, since we need both in order to initialize the maps using the
+ // names.
+ if user == "" && group != "" {
+ user = group
+ }
+ if group == "" && user != "" {
+ group = user
+ }
+ // Either start with empty maps or the name-based maps.
+ mappings := idtools.NewIDMappingsFromMaps(nil, nil)
+ if user != "" && group != "" {
+ submappings, err := idtools.NewIDMappings(user, group)
+ if err != nil {
+ return nil, nil, err
+ }
+ mappings = submappings
+ }
+ globalOptions := persistentFlags
+ // We'll parse the UID and GID mapping options the same way.
+ buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) {
+ outmap := make([]specs.LinuxIDMapping, 0, len(basemap))
+ // Start with the name-based map entries.
+ for _, m := range basemap {
+ outmap = append(outmap, specs.LinuxIDMapping{
+ ContainerID: uint32(m.ContainerID),
+ HostID: uint32(m.HostID),
+ Size: uint32(m.Size),
+ })
+ }
+ // Parse the flag's value as one or more triples (if it's even
+ // been set), and append them.
+ var spec []string
+ if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed {
+ spec, _ = globalOptions.GetStringSlice(option)
+ }
+ if findFlagFunc(option).Changed {
+ spec, _ = flags.GetStringSlice(option)
+ }
+ idmap, err := parseIDMap(spec)
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range idmap {
+ outmap = append(outmap, specs.LinuxIDMapping{
+ ContainerID: m[0],
+ HostID: m[1],
+ Size: m[2],
+ })
+ }
+ return outmap, nil
+ }
+ uidmap, err := buildIDMap(mappings.UIDs(), "userns-uid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ gidmap, err := buildIDMap(mappings.GIDs(), "userns-gid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ // If we only have one map or the other populated at this point, then
+ // use the same mapping for both, since we know that no user or group
+ // name was specified, but a specific mapping was for one or the other.
+ if len(uidmap) == 0 && len(gidmap) != 0 {
+ uidmap = gidmap
+ }
+ if len(gidmap) == 0 && len(uidmap) != 0 {
+ gidmap = uidmap
+ }
+
+ // By default, having mappings configured means we use a user
+ // namespace. Otherwise, we don't.
+ usernsOption := define.NamespaceOption{
+ Name: string(specs.UserNamespace),
+ Host: len(uidmap) == 0 && len(gidmap) == 0,
+ }
+ // If the user specifically requested that we either use or don't use
+ // user namespaces, override that default.
+ if findFlagFunc("userns").Changed {
+ how := findFlagFunc("userns").Value.String()
+ if strings.HasPrefix(how, "auto") {
+ autoOpts, err = GetAutoOptions(how)
+ if err != nil {
+ return nil, nil, err
+ }
+ isAuto = true
+ usernsOption.Host = false
+ } else {
+ switch how {
+ case "", "container", "private":
+ usernsOption.Host = false
+ case "host":
+ usernsOption.Host = true
+ default:
+ how = strings.TrimPrefix(how, "ns:")
+ if _, err := os.Stat(how); err != nil {
+ return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err)
+ }
+ logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
+ usernsOption.Path = how
+ }
+ }
+ }
+ usernsOptions = define.NamespaceOptions{usernsOption}
+
+ // If the user requested that we use the host namespace, but also that
+ // we use mappings, that's not going to work.
+ if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host {
+ return nil, nil, fmt.Errorf("can not specify ID mappings while using host's user namespace")
+ }
+ return usernsOptions, &define.IDMappingOptions{
+ HostUIDMapping: usernsOption.Host,
+ HostGIDMapping: usernsOption.Host,
+ UIDMap: uidmap,
+ GIDMap: gidmap,
+ AutoUserNs: isAuto,
+ AutoUserNsOpts: *autoOpts,
+ }, nil
+}
+
+func parseIDMap(spec []string) (m [][3]uint32, err error) {
+ for _, s := range spec {
+ args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) })
+ if len(args)%3 != 0 {
+ return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
+ }
+ for len(args) >= 3 {
+ cid, err := strconv.ParseUint(args[0], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
+ }
+ hostid, err := strconv.ParseUint(args[1], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
+ }
+ size, err := strconv.ParseUint(args[2], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %q from mapping %q as a number: %w", args[2], s, err)
+ }
+ m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)})
+ args = args[3:]
+ }
+ }
+ return m, nil
+}
+
+// NamespaceOptions parses the build options for all namespaces except for user namespace.
+func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
+ return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag)
+}
+
+// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace.
+func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
+ options := make(define.NamespaceOptions, 0, 7)
+ policy := define.NetworkDefault
+ for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
+ if flags.Lookup(what) != nil && findFlagFunc(what).Changed {
+ how := findFlagFunc(what).Value.String()
+ switch what {
+ case "cgroupns":
+ what = string(specs.CgroupNamespace)
+ }
+ switch how {
+ case "", "container", "private":
+ logrus.Debugf("setting %q namespace to %q", what, "")
+ policy = define.NetworkEnabled
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ })
+ case "host":
+ logrus.Debugf("setting %q namespace to host", what)
+ policy = define.NetworkEnabled
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ Host: true,
+ })
+ default:
+ if what == string(specs.NetworkNamespace) {
+ if how == "none" {
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ })
+ policy = define.NetworkDisabled
+ logrus.Debugf("setting network to disabled")
+ break
+ }
+ }
+ how = strings.TrimPrefix(how, "ns:")
+ // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
+ if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
+ if _, err := os.Stat(how); err != nil {
+ return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err)
+ }
+ }
+ policy = define.NetworkEnabled
+ logrus.Debugf("setting %q namespace to %q", what, how)
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ Path: how,
+ })
+ }
+ }
+ }
+ return options, policy, nil
+}
+
+func defaultIsolation() (define.Isolation, error) {
+ isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION")
+ if isSet {
+ switch strings.ToLower(isolation) {
+ case "oci":
+ return define.IsolationOCI, nil
+ case "rootless":
+ return define.IsolationOCIRootless, nil
+ case "chroot":
+ return define.IsolationChroot, nil
+ default:
+ return 0, fmt.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
+ }
+ }
+ if unshare.IsRootless() {
+ return define.IsolationOCIRootless, nil
+ }
+ return define.IsolationDefault, nil
+}
+
+// IsolationOption parses the --isolation flag.
+func IsolationOption(isolation string) (define.Isolation, error) {
+ if isolation != "" {
+ switch strings.ToLower(isolation) {
+ case "oci", "default":
+ return define.IsolationOCI, nil
+ case "rootless":
+ return define.IsolationOCIRootless, nil
+ case "chroot":
+ return define.IsolationChroot, nil
+ default:
+ return 0, fmt.Errorf("unrecognized isolation type %q", isolation)
+ }
+ }
+ return defaultIsolation()
+}
+
+// Device parses device mapping string to a src, dest & permissions string
+// Valid values for device look like:
+//
+// '/dev/sdc"
+// '/dev/sdc:/dev/xvdc"
+// '/dev/sdc:/dev/xvdc:rwm"
+// '/dev/sdc:rm"
+func Device(device string) (string, string, string, error) {
+ src := ""
+ dst := ""
+ permissions := "rwm"
+ arr := strings.Split(device, ":")
+ switch len(arr) {
+ case 3:
+ if !isValidDeviceMode(arr[2]) {
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
+ }
+ permissions = arr[2]
+ fallthrough
+ case 2:
+ if isValidDeviceMode(arr[1]) {
+ permissions = arr[1]
+ } else {
+ if len(arr[1]) == 0 || arr[1][0] != '/' {
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
+ }
+ dst = arr[1]
+ }
+ fallthrough
+ case 1:
+ if len(arr[0]) > 0 {
+ src = arr[0]
+ break
+ }
+ fallthrough
+ default:
+ return "", "", "", fmt.Errorf("invalid device specification: %s", device)
+ }
+
+ if dst == "" {
+ dst = src
+ }
+ return src, dst, permissions, nil
+}
+
+// isValidDeviceMode checks if the mode for device is valid or not.
+// isValid mode is a composition of r (read), w (write), and m (mknod).
+func isValidDeviceMode(mode string) bool {
+ var legalDeviceMode = map[rune]bool{
+ 'r': true,
+ 'w': true,
+ 'm': true,
+ }
+ if mode == "" {
+ return false
+ }
+ for _, c := range mode {
+ if !legalDeviceMode[c] {
+ return false
+ }
+ legalDeviceMode[c] = false
+ }
+ return true
+}
+
+// GetTempDir returns the path of the preferred temporary directory on the host.
+func GetTempDir() string {
+ return tmpdir.GetTempDir()
+}
+
+// Secrets parses the --secret flag
+func Secrets(secrets []string) (map[string]define.Secret, error) {
+ invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
+ parsed := make(map[string]define.Secret)
+ for _, secret := range secrets {
+ tokens := strings.Split(secret, ",")
+ var id, src, typ string
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "id":
+ id = kv[1]
+ case "src":
+ src = kv[1]
+ case "env":
+ src = kv[1]
+ typ = "env"
+ case "type":
+ if kv[1] != "file" && kv[1] != "env" {
+ return nil, errors.New("invalid secret type, must be file or env")
+ }
+ typ = kv[1]
+ }
+ }
+ if id == "" {
+ return nil, invalidSyntax
+ }
+ if src == "" {
+ src = id
+ }
+ if typ == "" {
+ if _, ok := os.LookupEnv(id); ok {
+ typ = "env"
+ } else {
+ typ = "file"
+ }
+ }
+
+ if typ == "file" {
+ fullPath, err := filepath.Abs(src)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
+ }
+ _, err = os.Stat(fullPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
+ }
+ src = fullPath
+ }
+ newSecret := define.Secret{
+ Source: src,
+ SourceType: typ,
+ }
+ parsed[id] = newSecret
+
+ }
+ return parsed, nil
+}
+
+// SSH parses the --ssh flag
+func SSH(sshSources []string) (map[string]*sshagent.Source, error) {
+ parsed := make(map[string]*sshagent.Source)
+ var paths []string
+ for _, v := range sshSources {
+ parts := strings.SplitN(v, "=", 2)
+ if len(parts) > 1 {
+ paths = strings.Split(parts[1], ",")
+ }
+
+ source, err := sshagent.NewSource(paths)
+ if err != nil {
+ return nil, err
+ }
+ parsed[parts[0]] = source
+ }
+ return parsed, nil
+}
+
+// ContainerIgnoreFile consumes path to `dockerignore` or `containerignore`
+// and returns list of files to exclude along with the path to processed ignore
+// file. Deprecated since this might become internal only, please avoid relying
+// on this function.
+func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]string, string, error) {
+ if path != "" {
+ excludes, err := imagebuilder.ParseIgnore(path)
+ return excludes, path, err
+ }
+ // If path was not supplied give priority to `<containerfile>.containerignore` first.
+ for _, containerfile := range containerFiles {
+ if !filepath.IsAbs(containerfile) {
+ containerfile = filepath.Join(contextDir, containerfile)
+ }
+ containerfileIgnore := ""
+ if _, err := os.Stat(containerfile + ".containerignore"); err == nil {
+ containerfileIgnore = containerfile + ".containerignore"
+ }
+ if _, err := os.Stat(containerfile + ".dockerignore"); err == nil {
+ containerfileIgnore = containerfile + ".dockerignore"
+ }
+ if containerfileIgnore != "" {
+ excludes, err := imagebuilder.ParseIgnore(containerfileIgnore)
+ return excludes, containerfileIgnore, err
+ }
+ }
+ path, symlinkErr := securejoin.SecureJoin(contextDir, ".containerignore")
+ if symlinkErr != nil {
+ return nil, "", symlinkErr
+ }
+ excludes, err := imagebuilder.ParseIgnore(path)
+ if errors.Is(err, os.ErrNotExist) {
+ path, symlinkErr = securejoin.SecureJoin(contextDir, ".dockerignore")
+ if symlinkErr != nil {
+ return nil, "", symlinkErr
+ }
+ excludes, err = imagebuilder.ParseIgnore(path)
+ }
+ if errors.Is(err, os.ErrNotExist) {
+ return excludes, "", nil
+ }
+ return excludes, path, err
+}
diff --git a/pkg/parse/parse_test.go b/pkg/parse/parse_test.go
new file mode 100644
index 0000000..c1f8833
--- /dev/null
+++ b/pkg/parse/parse_test.go
@@ -0,0 +1,224 @@
+package parse
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/image/v5/types"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCommonBuildOptionsFromFlagSet(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.String("memory", "1GB", "")
+ fs.String("shm-size", "5TB", "")
+ fs.String("cpuset-cpus", "1", "")
+ fs.String("cpuset-mems", "2", "")
+ fs.String("cgroup-parent", "none", "")
+ err := fs.Parse([]string{"--memory", "2GB"})
+ assert.NoError(t, err)
+ cbo, err := CommonBuildOptionsFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, cbo.Memory, int64(2147483648))
+}
+
+// TestDeviceParser verifies the given device strings is parsed correctly
+func TestDeviceParser(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+
+ // Test defaults
+ src, dest, permissions, err := Device("/dev/foo")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/foo")
+ assert.Equal(t, permissions, "rwm")
+
+ // Test defaults, different dest
+ src, dest, permissions, err = Device("/dev/foo:/dev/bar")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/bar")
+ assert.Equal(t, permissions, "rwm")
+
+ // Test fully specified
+ src, dest, permissions, err = Device("/dev/foo:/dev/bar:rm")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/bar")
+ assert.Equal(t, permissions, "rm")
+
+ // Test device, permissions
+ src, dest, permissions, err = Device("/dev/foo:rm")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/foo")
+ assert.Equal(t, permissions, "rm")
+
+ //test bogus permissions
+ _, _, _, err = Device("/dev/fuse1:BOGUS")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("/dev/foo:/dev/bar:rm:")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("/dev/foo::rm")
+ assert.Error(t, err)
+}
+
+func TestIsValidDeviceMode(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+ assert.False(t, isValidDeviceMode("BOGUS"))
+ assert.False(t, isValidDeviceMode("rwx"))
+ assert.True(t, isValidDeviceMode("r"))
+ assert.True(t, isValidDeviceMode("rw"))
+ assert.True(t, isValidDeviceMode("rm"))
+ assert.True(t, isValidDeviceMode("rwm"))
+}
+
+func TestDeviceFromPath(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+ // Path is valid
+ dev, err := DeviceFromPath("/dev/null")
+ assert.NoError(t, err)
+ assert.Equal(t, len(dev), 1)
+ assert.Equal(t, dev[0].Major, int64(1))
+ assert.Equal(t, dev[0].Minor, int64(3))
+ assert.Equal(t, string(dev[0].Permissions), "rwm")
+ assert.Equal(t, dev[0].Uid, uint32(0))
+ assert.Equal(t, dev[0].Gid, uint32(0))
+
+ // Path does not exists
+ _, err = DeviceFromPath("/dev/BOGUS")
+ assert.Error(t, err)
+
+ // Path is a directory of devices
+ _, err = DeviceFromPath("/dev/pts")
+ assert.NoError(t, err)
+
+ // path of directory has no device
+ _, err = DeviceFromPath("/etc/passwd")
+ assert.Error(t, err)
+}
+
+func TestIDMappingOptions(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ pfs := pflag.NewFlagSet("persist", pflag.PanicOnError)
+ fs.String("userns-uid-map-user", "", "")
+ fs.String("userns-gid-map-group", "", "")
+ fs.String("userns-uid-map", "", "")
+ fs.String("userns-gid-map", "", "")
+ fs.String("userns", "", "")
+ err := fs.Parse([]string{})
+ assert.NoError(t, err)
+ uos, _, err := IDMappingOptionsFromFlagSet(fs, pfs, fs.Lookup)
+ assert.NoError(t, err)
+ nso := uos.Find(string(specs.UserNamespace))
+ assert.Equal(t, *nso, define.NamespaceOption{
+ Host: true,
+ Name: string(specs.UserNamespace),
+ })
+}
+
+func TestIsolation(t *testing.T) {
+ def, err := defaultIsolation()
+ if err != nil {
+ assert.Error(t, err)
+ }
+
+ isolations := []string{"", "default", "oci", "chroot", "rootless"}
+ for _, i := range isolations {
+ isolation, err := IsolationOption(i)
+ if err != nil {
+ assert.Error(t, fmt.Errorf("isolation %q not supported", i))
+ }
+ var expected string
+ switch i {
+ case "":
+ expected = def.String()
+ case "default":
+ expected = "oci"
+ default:
+ expected = i
+ }
+
+ if isolation.String() != expected {
+ assert.Error(t, fmt.Errorf("isolation %q not equal to user input %q", isolation.String(), expected))
+ }
+ }
+}
+
+func TestNamespaceOptions(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.String("cgroupns", "", "")
+ err := fs.Parse([]string{"--cgroupns", "private"})
+ assert.NoError(t, err)
+ nsos, np, err := NamespaceOptionsFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, np, define.NetworkEnabled)
+ nso := nsos.Find(string(specs.CgroupNamespace))
+ assert.Equal(t, *nso, define.NamespaceOption{
+ Name: string(specs.CgroupNamespace),
+ })
+}
+
+func TestParsePlatform(t *testing.T) {
+ os, arch, variant, err := Platform("a/b/c")
+ assert.NoError(t, err)
+ assert.NoError(t, err)
+ assert.Equal(t, os, "a")
+ assert.Equal(t, arch, "b")
+ assert.Equal(t, variant, "c")
+
+ os, arch, variant, err = Platform("a/b")
+ assert.NoError(t, err)
+ assert.NoError(t, err)
+ assert.Equal(t, os, "a")
+ assert.Equal(t, arch, "b")
+ assert.Equal(t, variant, "")
+
+ _, _, _, err = Platform("a")
+ assert.Error(t, err)
+}
+
+func TestSplitStringWithColonEscape(t *testing.T) {
+ tests := []struct {
+ volume string
+ expectedResult []string
+ }{
+ {"/root/a:/root/test:O", []string{"/root/a", "/root/test", "O"}},
+ {"/root/a\\:b/c:/root/test:O", []string{"/root/a:b/c", "/root/test", "O"}},
+ {"/root/a:/root/test\\:test1/a:O", []string{"/root/a", "/root/test:test1/a", "O"}},
+ {"/root/a\\:b/c:/root/test\\:test1/a:O", []string{"/root/a:b/c", "/root/test:test1/a", "O"}},
+ }
+ for _, args := range tests {
+ val := SplitStringWithColonEscape(args.volume)
+ assert.Equal(t, val, args.expectedResult)
+ }
+}
+
+func TestSystemContextFromFlagSet(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.Bool("tls-verify", false, "")
+ err := fs.Parse([]string{"--tls-verify", "false"})
+ assert.NoError(t, err)
+ sc, err := SystemContextFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, sc, &types.SystemContext{
+ BigFilesTemporaryDir: GetTempDir(),
+ DockerInsecureSkipTLSVerify: types.OptionalBoolFalse,
+ DockerRegistryUserAgent: fmt.Sprintf("Buildah/%s", define.Version),
+ })
+}
diff --git a/pkg/parse/parse_unix.go b/pkg/parse/parse_unix.go
new file mode 100644
index 0000000..ff8ce85
--- /dev/null
+++ b/pkg/parse/parse_unix.go
@@ -0,0 +1,49 @@
+//go:build linux || darwin
+// +build linux darwin
+
+package parse
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/buildah/define"
+ "github.com/opencontainers/runc/libcontainer/devices"
+)
+
+func DeviceFromPath(device string) (define.ContainerDevices, error) {
+ var devs define.ContainerDevices
+ src, dst, permissions, err := Device(device)
+ if err != nil {
+ return nil, err
+ }
+ srcInfo, err := os.Stat(src)
+ if err != nil {
+ return nil, fmt.Errorf("getting info of source device %s: %w", src, err)
+ }
+
+ if !srcInfo.IsDir() {
+ dev, err := devices.DeviceFromPath(src, permissions)
+ if err != nil {
+ return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
+ }
+ dev.Path = dst
+ device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst}
+ devs = append(devs, device)
+ return devs, nil
+ }
+
+ // If source device is a directory
+ srcDevices, err := devices.GetDevices(src)
+ if err != nil {
+ return nil, fmt.Errorf("getting source devices from directory %s: %w", src, err)
+ }
+ for _, d := range srcDevices {
+ d.Path = filepath.Join(dst, filepath.Base(d.Path))
+ d.Permissions = devices.Permissions(permissions)
+ device := define.BuildahDevice{Device: *d, Source: src, Destination: dst}
+ devs = append(devs, device)
+ }
+ return devs, nil
+}
diff --git a/pkg/parse/parse_unsupported.go b/pkg/parse/parse_unsupported.go
new file mode 100644
index 0000000..e3d3a71
--- /dev/null
+++ b/pkg/parse/parse_unsupported.go
@@ -0,0 +1,18 @@
+//go:build !linux && !darwin
+// +build !linux,!darwin
+
+package parse
+
+import (
+ "errors"
+
+ "github.com/containers/buildah/define"
+)
+
+func getDefaultProcessLimits() []string {
+ return []string{}
+}
+
+func DeviceFromPath(device string) (define.ContainerDevices, error) {
+ return nil, errors.New("devices not supported")
+}
diff --git a/pkg/rusage/rusage.go b/pkg/rusage/rusage.go
new file mode 100644
index 0000000..7b1226d
--- /dev/null
+++ b/pkg/rusage/rusage.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "fmt"
+ "time"
+
+ units "github.com/docker/go-units"
+)
+
+// Rusage is a subset of a Unix-style resource usage counter for the current
+// process and its children. The counters are always 0 on platforms where the
+// system call is not available (i.e., systems where getrusage() doesn't
+// exist).
+type Rusage struct {
+ Date time.Time
+ Elapsed time.Duration
+ Utime, Stime time.Duration
+ Inblock, Outblock int64
+}
+
+// FormatDiff formats the result of rusage.Rusage.Subtract() for logging.
+func FormatDiff(diff Rusage) string {
+ return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512)))
+}
+
+// Subtract subtracts the items in delta from r, and returns the difference.
+// The Date field is zeroed for easier comparison with the zero value for the
+// Rusage type.
+func (r Rusage) Subtract(baseline Rusage) Rusage {
+ return Rusage{
+ Elapsed: r.Date.Sub(baseline.Date),
+ Utime: r.Utime - baseline.Utime,
+ Stime: r.Stime - baseline.Stime,
+ Inblock: r.Inblock - baseline.Inblock,
+ Outblock: r.Outblock - baseline.Outblock,
+ }
+}
+
+// Get returns the counters for the current process and its children,
+// subtracting any values in the passed in "since" value, or an error.
+// The Elapsed field will always be set to zero.
+func Get() (Rusage, error) {
+ counters, err := get()
+ if err != nil {
+ return Rusage{}, err
+ }
+ return counters, nil
+}
diff --git a/pkg/rusage/rusage_test.go b/pkg/rusage/rusage_test.go
new file mode 100644
index 0000000..62ac573
--- /dev/null
+++ b/pkg/rusage/rusage_test.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ noopCommand = "noop"
+)
+
+func noopMain() {
+}
+
+func init() {
+ reexec.Register(noopCommand, noopMain)
+}
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ flag.Parse()
+ if testing.Verbose() {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+func TestRusage(t *testing.T) {
+ if !Supported() {
+ t.Skip("not supported on this platform")
+ }
+ before, err := Get()
+ require.Nil(t, err, "unexpected error from GetRusage before running child: %v", err)
+ cmd := reexec.Command(noopCommand)
+ err = cmd.Run()
+ require.Nil(t, err, "unexpected error running child process: %v", err)
+ after, err := Get()
+ require.Nil(t, err, "unexpected error from GetRusage after running child: %v", err)
+ t.Logf("rusage from child: %#v", FormatDiff(after.Subtract(before)))
+ require.NotZero(t, after.Subtract(before), "running a child process didn't use any resources?")
+}
diff --git a/pkg/rusage/rusage_unix.go b/pkg/rusage/rusage_unix.go
new file mode 100644
index 0000000..317046f
--- /dev/null
+++ b/pkg/rusage/rusage_unix.go
@@ -0,0 +1,35 @@
+//go:build !windows
+// +build !windows
+
+package rusage
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+)
+
+func mkduration(tv syscall.Timeval) time.Duration {
+ return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond
+}
+
+func get() (Rusage, error) {
+ var rusage syscall.Rusage
+ err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
+ if err != nil {
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", err)
+ }
+ r := Rusage{
+ Date: time.Now(),
+ Utime: mkduration(rusage.Utime),
+ Stime: mkduration(rusage.Stime),
+ Inblock: int64(rusage.Inblock), // nolint: unconvert
+ Outblock: int64(rusage.Oublock), // nolint: unconvert
+ }
+ return r, nil
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return true
+}
diff --git a/pkg/rusage/rusage_unsupported.go b/pkg/rusage/rusage_unsupported.go
new file mode 100644
index 0000000..54ed77f
--- /dev/null
+++ b/pkg/rusage/rusage_unsupported.go
@@ -0,0 +1,18 @@
+//go:build windows
+// +build windows
+
+package rusage
+
+import (
+ "fmt"
+ "syscall"
+)
+
+func get() (Rusage, error) {
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", syscall.ENOTSUP)
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return false
+}
diff --git a/pkg/sshagent/sshagent.go b/pkg/sshagent/sshagent.go
new file mode 100644
index 0000000..ec28482
--- /dev/null
+++ b/pkg/sshagent/sshagent.go
@@ -0,0 +1,254 @@
+package sshagent
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/opencontainers/selinux/go-selinux"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+// AgentServer is an ssh agent that can be served and shutdown at a later time
+type AgentServer struct {
+ agent agent.Agent
+ wg sync.WaitGroup
+ conn *net.Conn
+ listener net.Listener
+ shutdown chan bool
+ servePath string
+ serveDir string
+}
+
+// NewAgentServer creates a new agent on the host
+func NewAgentServer(source *Source) (*AgentServer, error) {
+ if source.Keys != nil {
+ return newAgentServerKeyring(source.Keys)
+ }
+ return newAgentServerSocket(source.Socket)
+}
+
+// newAgentServerKeyring creates a new agent from scratch and adds keys
+func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) {
+ a := agent.NewKeyring()
+ for _, k := range keys {
+ if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
+ return nil, fmt.Errorf("failed to create ssh agent: %w", err)
+ }
+ }
+ return &AgentServer{
+ agent: a,
+ shutdown: make(chan bool, 1),
+ }, nil
+}
+
+// newAgentServerSocket creates a new agent from an existing agent on the host
+func newAgentServerSocket(socketPath string) (*AgentServer, error) {
+ conn, err := net.Dial("unix", socketPath)
+ if err != nil {
+ return nil, err
+ }
+ a := &readOnlyAgent{agent.NewClient(conn)}
+
+ return &AgentServer{
+ agent: a,
+ conn: &conn,
+ shutdown: make(chan bool, 1),
+ }, nil
+
+}
+
+// Serve starts the SSH agent on the host and returns the path of the socket where the agent is serving
+func (a *AgentServer) Serve(processLabel string) (string, error) {
+ // Calls to `selinux.SetSocketLabel` should be wrapped in
+ // runtime.LockOSThread()/runtime.UnlockOSThread() until
+ // the the socket is created to guarantee another goroutine
+ // does not migrate to the current thread before execution
+ // is complete.
+ // Ref: https://github.com/opencontainers/selinux/blob/main/go-selinux/selinux.go#L158
+ runtime.LockOSThread()
+ err := selinux.SetSocketLabel(processLabel)
+ if err != nil {
+ return "", err
+ }
+ serveDir, err := os.MkdirTemp(tmpdir.GetTempDir(), ".buildah-ssh-sock")
+ if err != nil {
+ return "", err
+ }
+ servePath := filepath.Join(serveDir, "ssh_auth_sock")
+ a.serveDir = serveDir
+ a.servePath = servePath
+ listener, err := net.Listen("unix", servePath)
+ if err != nil {
+ return "", err
+ }
+ // Reset socket label.
+ err = selinux.SetSocketLabel("")
+ // Unlock the thread only if the process label could be restored
+ // successfully. Otherwise leave the thread locked and the Go runtime
+ // will terminate it once it returns to the threads pool.
+ runtime.UnlockOSThread()
+ if err != nil {
+ return "", err
+ }
+ a.listener = listener
+
+ go func() {
+ for {
+ //listener.Accept blocks
+ c, err := listener.Accept()
+ if err != nil {
+ select {
+ case <-a.shutdown:
+ return
+ default:
+ logrus.Errorf("error accepting SSH connection: %v", err)
+ continue
+ }
+ }
+ a.wg.Add(1)
+ go func() {
+ // agent.ServeAgent will only ever return with error,
+ err := agent.ServeAgent(a.agent, c)
+ if err != io.EOF {
+ logrus.Errorf("error serving agent: %v", err)
+ }
+ a.wg.Done()
+ }()
+ // the only way to get agent.ServeAgent is to close the connection it's serving on
+ // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection.
+ go func() {
+ time.Sleep(2000 * time.Millisecond)
+ c.Close()
+ }()
+ }
+ }()
+ return a.servePath, nil
+}
+
+// Shutdown shuts down the agent and closes the socket
+func (a *AgentServer) Shutdown() error {
+ if a.listener != nil {
+ a.shutdown <- true
+ a.listener.Close()
+ }
+ if a.conn != nil {
+ conn := *a.conn
+ conn.Close()
+ }
+ a.wg.Wait()
+ err := os.RemoveAll(a.serveDir)
+ if err != nil {
+ return err
+ }
+ a.serveDir = ""
+ a.servePath = ""
+ return nil
+}
+
+// ServePath returns the path where the agent is serving
+func (a *AgentServer) ServePath() string {
+ return a.servePath
+}
+
+// readOnlyAgent and its functions originally from github.com/mopby/buildkit/session/sshforward/sshprovider/agentprovider.go
+
+// readOnlyAgent implemetnts the agent.Agent interface
+// readOnlyAgent allows reads only to prevent keys from being added from the build to the forwarded ssh agent on the host
+type readOnlyAgent struct {
+ agent.ExtendedAgent
+}
+
+func (a *readOnlyAgent) Add(_ agent.AddedKey) error {
+ return errors.New("adding new keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error {
+ return errors.New("removing keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) RemoveAll() error {
+ return errors.New("removing keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Lock(_ []byte) error {
+ return errors.New("locking agent not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Extension(_ string, _ []byte) ([]byte, error) {
+ return nil, errors.New("extensions not allowed by buildah")
+}
+
+// Source is what the forwarded agent's source is
+// The source of the forwarded agent can be from a socket on the host, or from individual key files
+type Source struct {
+ Socket string
+ Keys []interface{}
+}
+
+// NewSource takes paths and checks of they are keys or sockets, and creates a source
+func NewSource(paths []string) (*Source, error) {
+ var keys []interface{}
+ var socket string
+ if len(paths) == 0 {
+ socket = os.Getenv("SSH_AUTH_SOCK")
+ if socket == "" {
+ return nil, errors.New("SSH_AUTH_SOCK not set in environment")
+ }
+ absSocket, err := filepath.Abs(socket)
+ if err != nil {
+ return nil, fmt.Errorf("evaluating SSH_AUTH_SOCK in environment: %w", err)
+ }
+ socket = absSocket
+ }
+ for _, p := range paths {
+ if socket != "" {
+ return nil, errors.New("only one socket is allowed")
+ }
+
+ fi, err := os.Stat(p)
+ if err != nil {
+ return nil, err
+ }
+ if fi.Mode()&os.ModeSocket > 0 {
+ if len(keys) == 0 {
+ socket = p
+ } else {
+ return nil, errors.New("cannot mix keys and socket file")
+ }
+ continue
+ }
+
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
+ if err != nil {
+ return nil, err
+ }
+
+ k, err := ssh.ParseRawPrivateKey(dt)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse ssh key: %w", err)
+ }
+ keys = append(keys, k)
+ }
+ if socket != "" {
+ return &Source{
+ Socket: socket,
+ }, nil
+ }
+ return &Source{
+ Keys: keys,
+ }, nil
+}
diff --git a/pkg/sshagent/sshagent_test.go b/pkg/sshagent/sshagent_test.go
new file mode 100644
index 0000000..bf0d68b
--- /dev/null
+++ b/pkg/sshagent/sshagent_test.go
@@ -0,0 +1,55 @@
+package sshagent
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+func testNewKeySource() (*Source, error) {
+ k, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+ return &Source{
+ Keys: []interface{}{k},
+ }, nil
+}
+
+func testClient(path string) ([]*agent.Key, error) {
+ conn, err := net.Dial("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ ac := agent.NewClient(conn)
+ keys, err := ac.List()
+ if err != nil {
+ return nil, err
+ }
+ return keys, nil
+
+}
+
+func TestAgentServer(t *testing.T) {
+ src, err := testNewKeySource()
+ require.NoError(t, err)
+ ag, err := NewAgentServer(src)
+ require.NoError(t, err)
+ sock, err := ag.Serve("")
+ require.NoError(t, err)
+ // Get key from agent
+ keys, err := testClient(sock)
+ require.NoError(t, err)
+ require.Equal(t, len(keys), 1)
+ require.Equal(t, keys[0].Type(), "ssh-rsa")
+ // Check for proper shutdown
+ err = ag.Shutdown()
+ require.NoError(t, err)
+
+ _, err = testClient(sock)
+ require.Error(t, err)
+}
diff --git a/pkg/supplemented/compat.go b/pkg/supplemented/compat.go
new file mode 100644
index 0000000..5689648
--- /dev/null
+++ b/pkg/supplemented/compat.go
@@ -0,0 +1,26 @@
+// This package is deprecated. Its functionality has been moved to
+// github.com/containers/common/pkg/supplemented, which provides the same API.
+// The stubs and aliases here are present for compatibility with older code.
+// New implementations should use github.com/containers/common/pkg/supplemented
+// directly.
+package supplemented
+
+import (
+ "github.com/containers/common/pkg/manifests"
+ "github.com/containers/common/pkg/supplemented"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrDigestNotFound is an alias for github.com/containers/common/pkg/manifests.ErrDigestNotFound.
+ ErrDigestNotFound = manifests.ErrDigestNotFound
+ // ErrBlobNotFound is an alias for github.com/containers/common/pkg/supplemented.ErrBlobNotFound.
+ ErrBlobNotFound = supplemented.ErrBlobNotFound
+)
+
+// Reference wraps github.com/containers/common/pkg/supplemented.Reference().
+func Reference(ref types.ImageReference, supplemental []types.ImageReference, multiple cp.ImageListSelection, instances []digest.Digest) types.ImageReference {
+ return supplemented.Reference(ref, supplemental, multiple, instances)
+}
diff --git a/pkg/umask/umask.go b/pkg/umask/umask.go
new file mode 100644
index 0000000..c5edead
--- /dev/null
+++ b/pkg/umask/umask.go
@@ -0,0 +1,13 @@
+package umask
+
+import (
+ "github.com/containers/common/pkg/umask"
+)
+
+func CheckUmask() {
+ umask.Check()
+}
+
+func SetUmask(value int) int {
+ return umask.Set(value)
+}
diff --git a/pkg/util/resource_unix.go b/pkg/util/resource_unix.go
new file mode 100644
index 0000000..4f7c08c
--- /dev/null
+++ b/pkg/util/resource_unix.go
@@ -0,0 +1,38 @@
+//go:build linux || freebsd || darwin
+// +build linux freebsd darwin
+
+package util
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/docker/go-units"
+)
+
+func ParseUlimit(ulimit string) (*units.Ulimit, error) {
+ ul, err := units.ParseUlimit(ulimit)
+ if err != nil {
+ return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
+ }
+
+ if ul.Hard != -1 && ul.Soft == -1 {
+ return ul, nil
+ }
+
+ rl, err := ul.GetRlimit()
+ if err != nil {
+ return nil, err
+ }
+ var limit syscall.Rlimit
+ if err := syscall.Getrlimit(rl.Type, &limit); err != nil {
+ return nil, err
+ }
+ if ul.Soft == -1 {
+ ul.Soft = int64(limit.Cur)
+ }
+ if ul.Hard == -1 {
+ ul.Hard = int64(limit.Max)
+ }
+ return ul, nil
+}
diff --git a/pkg/util/resource_unix_test.go b/pkg/util/resource_unix_test.go
new file mode 100644
index 0000000..6ee95dc
--- /dev/null
+++ b/pkg/util/resource_unix_test.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "syscall"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseUlimit(t *testing.T) {
+ _, err := ParseUlimit("bogus")
+ assert.NotNil(t, err)
+
+ ul, err := ParseUlimit("memlock=100:200")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(100))
+ assert.Equal(t, ul.Hard, int64(200))
+
+ var limit syscall.Rlimit
+ err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit)
+ assert.Nil(t, err)
+
+ ul, err = ParseUlimit("nofile=-1:-1")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(limit.Cur))
+ assert.Equal(t, ul.Hard, int64(limit.Max))
+
+ ul, err = ParseUlimit("nofile=100:-1")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(100))
+ assert.Equal(t, ul.Hard, int64(limit.Max))
+}
diff --git a/pkg/util/resource_windows.go b/pkg/util/resource_windows.go
new file mode 100644
index 0000000..3717091
--- /dev/null
+++ b/pkg/util/resource_windows.go
@@ -0,0 +1,16 @@
+package util
+
+import (
+ "fmt"
+
+ "github.com/docker/go-units"
+)
+
+func ParseUlimit(ulimit string) (*units.Ulimit, error) {
+ ul, err := units.ParseUlimit(ulimit)
+ if err != nil {
+ return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
+ }
+
+ return ul, nil
+}
diff --git a/pkg/util/test/test1/Containerfile b/pkg/util/test/test1/Containerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test1/Containerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/test/test1/Dockerfile b/pkg/util/test/test1/Dockerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test1/Dockerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/test/test2/Dockerfile b/pkg/util/test/test2/Dockerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test2/Dockerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/uptime_darwin.go b/pkg/util/uptime_darwin.go
new file mode 100644
index 0000000..d185cb4
--- /dev/null
+++ b/pkg/util/uptime_darwin.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+ "time"
+)
+
+func ReadUptime() (time.Duration, error) {
+ return 0, errors.New("readUptime not supported on darwin")
+}
diff --git a/pkg/util/uptime_freebsd.go b/pkg/util/uptime_freebsd.go
new file mode 100644
index 0000000..7112aba
--- /dev/null
+++ b/pkg/util/uptime_freebsd.go
@@ -0,0 +1,25 @@
+package util
+
+import (
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// For some reason, unix.ClockGettime isn't implemented by x/sys/unix on FreeBSD
+func clockGettime(clockid int32, time *unix.Timespec) (err error) {
+ _, _, e1 := unix.Syscall(unix.SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
+
+func ReadUptime() (time.Duration, error) {
+ var uptime unix.Timespec
+ if err := clockGettime(unix.CLOCK_UPTIME, &uptime); err != nil {
+ return 0, err
+ }
+ return time.Duration(unix.TimespecToNsec(uptime)), nil
+}
diff --git a/pkg/util/uptime_linux.go b/pkg/util/uptime_linux.go
new file mode 100644
index 0000000..a27a480
--- /dev/null
+++ b/pkg/util/uptime_linux.go
@@ -0,0 +1,28 @@
+package util
+
+import (
+ "bytes"
+ "errors"
+ "time"
+ "os"
+)
+
+func ReadUptime() (time.Duration, error) {
+ buf, err := os.ReadFile("/proc/uptime")
+ if err != nil {
+ return 0, err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 1 {
+ return 0, errors.New("invalid uptime")
+ }
+
+ // Convert uptime in seconds to a human-readable format
+ up := string(f[0])
+ upSeconds := up + "s"
+ upDuration, err := time.ParseDuration(upSeconds)
+ if err != nil {
+ return 0, err
+ }
+ return upDuration, nil
+}
diff --git a/pkg/util/uptime_windows.go b/pkg/util/uptime_windows.go
new file mode 100644
index 0000000..ef3adac
--- /dev/null
+++ b/pkg/util/uptime_windows.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+ "time"
+)
+
+func ReadUptime() (time.Duration, error) {
+ return 0, errors.New("readUptime not supported on windows")
+}
diff --git a/pkg/util/util.go b/pkg/util/util.go
new file mode 100644
index 0000000..17ad360
--- /dev/null
+++ b/pkg/util/util.go
@@ -0,0 +1,82 @@
+package util
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/pkg/parse"
+)
+
+// Mirrors path to a tmpfile if path points to a
+// file descriptor instead of actual file on filesystem
+// reason: operations with file descriptors are can lead
+// to edge cases where content on FD is not in a consumable
+// state after first consumption.
+// returns path as string and bool to confirm if temp file
+// was created and needs to be cleaned up.
+func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) {
+ // one use-case is discussed here
+ // https://github.com/containers/buildah/issues/3070
+ if !strings.HasPrefix(file, "/dev/fd/") {
+ return file, false
+ }
+ b, err := os.ReadFile(file)
+ if err != nil {
+ // if anything goes wrong return original path
+ return file, false
+ }
+ tmpfile, err := os.CreateTemp(parse.GetTempDir(), "buildah-temp-file")
+ if err != nil {
+ return file, false
+ }
+ defer tmpfile.Close()
+ if _, err := tmpfile.Write(b); err != nil {
+ // if anything goes wrong return original path
+ return file, false
+ }
+
+ return tmpfile.Name(), true
+}
+
+// DiscoverContainerfile tries to find a Containerfile or a Dockerfile within the provided `path`.
+func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
+ // Test for existence of the file
+ target, err := os.Stat(path)
+ if err != nil {
+ return "", fmt.Errorf("discovering Containerfile: %w", err)
+ }
+
+ switch mode := target.Mode(); {
+ case mode.IsDir():
+ // If the path is a real directory, we assume a Containerfile or a Dockerfile within it
+ ctrfile := filepath.Join(path, "Containerfile")
+
+ // Test for existence of the Containerfile file
+ file, err := os.Stat(ctrfile)
+ if err != nil {
+ // See if we have a Dockerfile within it
+ ctrfile = filepath.Join(path, "Dockerfile")
+
+ // Test for existence of the Dockerfile file
+ file, err = os.Stat(ctrfile)
+ if err != nil {
+ return "", fmt.Errorf("cannot find Containerfile or Dockerfile in context directory: %w", err)
+ }
+ }
+
+ // The file exists, now verify the correct mode
+ if mode := file.Mode(); mode.IsRegular() {
+ foundCtrFile = ctrfile
+ } else {
+ return "", fmt.Errorf("assumed Containerfile %q is not a file", ctrfile)
+ }
+
+ case mode.IsRegular():
+ // If the context dir is a file, we assume this as Containerfile
+ foundCtrFile = path
+ }
+
+ return foundCtrFile, nil
+}
diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go
new file mode 100644
index 0000000..a39108e
--- /dev/null
+++ b/pkg/util/util_test.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDiscoverContainerfile(t *testing.T) {
+ _, err := DiscoverContainerfile("./bogus")
+ assert.NotNil(t, err)
+
+ _, err = DiscoverContainerfile("./")
+ assert.NotNil(t, err)
+
+ name, err := DiscoverContainerfile("test/test1/Dockerfile")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Dockerfile")
+
+ name, err = DiscoverContainerfile("test/test1/Containerfile")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Containerfile")
+
+ name, err = DiscoverContainerfile("test/test1")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Containerfile")
+
+ name, err = DiscoverContainerfile("test/test2")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test2/Dockerfile")
+
+}
diff --git a/pkg/util/version_unix.go b/pkg/util/version_unix.go
new file mode 100644
index 0000000..88e8b58
--- /dev/null
+++ b/pkg/util/version_unix.go
@@ -0,0 +1,19 @@
+//go:build linux || freebsd || darwin
+// +build linux freebsd darwin
+
+package util
+
+import (
+ "bytes"
+
+ "golang.org/x/sys/unix"
+)
+
+func ReadKernelVersion() (string, error) {
+ var uname unix.Utsname
+ if err := unix.Uname(&uname); err != nil {
+ return "", err
+ }
+ n := bytes.IndexByte(uname.Release[:], 0)
+ return string(uname.Release[:n]), nil
+}
diff --git a/pkg/util/version_windows.go b/pkg/util/version_windows.go
new file mode 100644
index 0000000..9acf469
--- /dev/null
+++ b/pkg/util/version_windows.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+)
+
+func ReadKernelVersion() (string, error) {
+ return "", errors.New("readKernelVersion not supported on windows")
+
+}
diff --git a/pkg/volumes/volumes.go b/pkg/volumes/volumes.go
new file mode 100644
index 0000000..aa469a2
--- /dev/null
+++ b/pkg/volumes/volumes.go
@@ -0,0 +1,13 @@
+package volumes
+
+import (
+ "os"
+
+ "github.com/containers/buildah/internal/volumes"
+)
+
+// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
+func CleanCacheMount() error {
+ cacheParent := volumes.CacheParent()
+ return os.RemoveAll(cacheParent)
+}
diff --git a/pull.go b/pull.go
new file mode 100644
index 0000000..343c61f
--- /dev/null
+++ b/pull.go
@@ -0,0 +1,100 @@
+package buildah
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+)
+
+// PullOptions can be used to alter how an image is copied in from somewhere.
+type PullOptions struct {
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to log the writing
+ // of the new image.
+ ReportWriter io.Writer
+ // Store is the local storage store which holds the source image.
+ Store storage.Store
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // BlobDirectory is the name of a directory in which we'll attempt to
+ // store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ BlobDirectory string
+ // AllTags is a boolean value that determines if all tagged images
+ // will be downloaded from the repository. The default is false.
+ AllTags bool
+ // RemoveSignatures causes any existing signatures for the image to be
+ // discarded when pulling it.
+ RemoveSignatures bool
+ // MaxRetries is the maximum number of attempts we'll make to pull any
+ // one image from the external registry if the first attempt fails.
+ MaxRetries int
+ // RetryDelay is how long to wait before retrying a pull attempt.
+ RetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
+ // PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+ PullPolicy define.PullPolicy
+}
+
+// Pull copies the contents of the image from somewhere else to local storage. Returns the
+// ID of the local image or an error.
+func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) {
+ libimageOptions := &libimage.PullOptions{}
+ libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ libimageOptions.Writer = options.ReportWriter
+ libimageOptions.RemoveSignatures = options.RemoveSignatures
+ libimageOptions.OciDecryptConfig = options.OciDecryptConfig
+ libimageOptions.AllTags = options.AllTags
+ libimageOptions.RetryDelay = &options.RetryDelay
+ libimageOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
+
+ if options.MaxRetries > 0 {
+ retries := uint(options.MaxRetries)
+ libimageOptions.MaxRetries = &retries
+ }
+
+ pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
+ if err != nil {
+ return "", err
+ }
+
+ // Note: It is important to do this before we pull any images/create containers.
+ // The default backend detection logic needs an empty store to correctly detect
+ // that we can use netavark, if the store was not empty it will use CNI to not break existing installs.
+ _, err = getNetworkInterface(options.Store, "", "")
+ if err != nil {
+ return "", err
+ }
+
+ runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext})
+ if err != nil {
+ return "", err
+ }
+
+ pulledImages, err := runtime.Pull(context.Background(), imageName, pullPolicy, libimageOptions)
+ if err != nil {
+ return "", err
+ }
+
+ if len(pulledImages) == 0 {
+ return "", fmt.Errorf("internal error pulling %s: no image pulled and no error", imageName)
+ }
+
+ return pulledImages[0].ID(), nil
+}
diff --git a/push.go b/push.go
new file mode 100644
index 0000000..2e2b949
--- /dev/null
+++ b/push.go
@@ -0,0 +1,155 @@
+package buildah
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/containers/buildah/pkg/blobcache"
+ "github.com/containers/common/libimage"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// cacheLookupReferenceFunc wraps a BlobCache into a
+// libimage.LookupReferenceFunc to allow for using a BlobCache during
+// image-copy operations.
+func cacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc {
+ // Using a closure here allows us to reference a BlobCache without
+ // having to explicitly maintain it in the libimage API.
+ return func(ref types.ImageReference) (types.ImageReference, error) {
+ if directory == "" {
+ return ref, nil
+ }
+ ref, err := blobcache.NewBlobCache(ref, directory, compress)
+ if err != nil {
+ return nil, fmt.Errorf("using blobcache %q: %w", directory, err)
+ }
+ return ref, nil
+ }
+}
+
+// PushOptions can be used to alter how an image is copied somewhere.
+type PushOptions struct {
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ // OBSOLETE: Use CompressionFormat instead.
+ Compression archive.Compression
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to log the writing
+ // of the new image.
+ ReportWriter io.Writer
+ // Store is the local storage store which holds the source image.
+ Store storage.Store
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // ManifestType is the format to use
+ // possible options are oci, v2s1, and v2s2
+ ManifestType string
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers, substituting them in the list of
+ // blobs to copy whenever possible.
+ BlobDirectory string
+ // Quiet is a boolean value that determines if minimal output to
+ // the user will be displayed, this is best used for logging.
+ // The default is false.
+ Quiet bool
+ // SignBy is the fingerprint of a GPG key to use for signing the image.
+ SignBy string
+ // RemoveSignatures causes any existing signatures for the image to be
+ // discarded for the pushed copy.
+ RemoveSignatures bool
+ // MaxRetries is the maximum number of attempts we'll make to push any
+ // one image to the external registry if the first attempt fails.
+ MaxRetries int
+ // RetryDelay is how long to wait before retrying a push attempt.
+ RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+
+ // CompressionFormat is the format to use for the compression of the blobs
+ CompressionFormat *compression.Algorithm
+ // CompressionLevel specifies what compression level is used
+ CompressionLevel *int
+ // ForceCompressionFormat ensures that the compression algorithm set in
+ // CompressionFormat is used exclusively, and blobs of other compression
+ // algorithms are not reused.
+ ForceCompressionFormat bool
+}
+
+// Push copies the contents of the image to a new location.
+func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
+ libimageOptions := &libimage.PushOptions{}
+ libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath
+ libimageOptions.Writer = options.ReportWriter
+ libimageOptions.ManifestMIMEType = options.ManifestType
+ libimageOptions.SignBy = options.SignBy
+ libimageOptions.RemoveSignatures = options.RemoveSignatures
+ libimageOptions.RetryDelay = &options.RetryDelay
+ libimageOptions.OciEncryptConfig = options.OciEncryptConfig
+ libimageOptions.OciEncryptLayers = options.OciEncryptLayers
+ libimageOptions.CompressionFormat = options.CompressionFormat
+ libimageOptions.CompressionLevel = options.CompressionLevel
+ libimageOptions.ForceCompressionFormat = options.ForceCompressionFormat
+ libimageOptions.PolicyAllowStorage = true
+
+ if options.Quiet {
+ libimageOptions.Writer = nil
+ }
+
+ compress := types.PreserveOriginal
+ if options.Compression == archive.Gzip {
+ compress = types.Compress
+ }
+ libimageOptions.SourceLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, compress)
+
+ runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext})
+ if err != nil {
+ return nil, "", err
+ }
+
+ destString := fmt.Sprintf("%s:%s", dest.Transport().Name(), dest.StringWithinTransport())
+ manifestBytes, err := runtime.Push(ctx, image, destString, libimageOptions)
+ if err != nil {
+ return nil, "", err
+ }
+
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
+ }
+
+ var ref reference.Canonical
+ if name := dest.DockerReference(); name != nil {
+ ref, err = reference.WithDigest(name, manifestDigest)
+ if err != nil {
+ logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
+ }
+ }
+
+ return ref, manifestDigest, nil
+}
diff --git a/release.sh b/release.sh
new file mode 100755
index 0000000..508ae90
--- /dev/null
+++ b/release.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+#
+# Cut a buildah release. Usage:
+#
+# $ hack/release.sh <version> <next-version>
+#
+# For example:
+#
+# $ hack/release.sh 1.2.3 1.3.0
+#
+# for "I'm cutting 1.2.3, and want to use 1.3.0-dev for future work".
+
+VERSION="$1"
+NEXT_VERSION="$2"
+DATE=$(date '+%Y-%m-%d')
+LAST_TAG=$(git describe --tags --abbrev=0)
+
+write_go_version()
+{
+ LOCAL_VERSION="$1"
+ sed -i "s/^\(.*Version = \"\).*/\1${LOCAL_VERSION}\"/" define/types.go
+}
+
+write_makefile_epoch()
+{
+ LOCAL_EPOCH="$1"
+ sed -i "s/^\(EPOCH_TEST_COMMIT ?= \).*/\1${LOCAL_EPOCH}/" Makefile
+}
+
+write_changelog()
+{
+ echo "- Changelog for v${VERSION} (${DATE})" >.changelog.txt &&
+ git log --no-merges --format=' * %s' "${LAST_TAG}..HEAD" >>.changelog.txt &&
+ echo >>.changelog.txt &&
+ cat changelog.txt >>.changelog.txt &&
+ mv -f .changelog.txt changelog.txt
+
+ echo "
+## v${VERSION} (${DATE})
+" >.CHANGELOG.md &&
+ git log --no-merges --format=' %s' "${LAST_TAG}..HEAD" >>.CHANGELOG.md &&
+ sed -i -e '/# Changelog/r .CHANGELOG.md' CHANGELOG.md &&
+ rm -f .CHANGELOG.md
+}
+
+release_commit()
+{
+ write_go_version "${VERSION}" &&
+ write_changelog &&
+ git commit -asm "Bump to v${VERSION}
+
+[NO TESTS NEEDED]
+"
+}
+
+dev_version_commit()
+{
+ write_go_version "${NEXT_VERSION}-dev" &&
+ git commit -asm "Bump to v${NEXT_VERSION}-dev
+
+[NO TESTS NEEDED]
+"
+}
+
+epoch_commit()
+{
+ LOCAL_EPOCH="$1"
+ write_makefile_epoch "${LOCAL_EPOCH}" &&
+ git commit -asm 'Bump gitvalidation epoch
+
+ [NO TESTS NEEDED]
+'
+}
+
+git fetch origin &&
+git checkout -b "bump-${VERSION}" origin/main &&
+EPOCH=$(git rev-parse HEAD) &&
+release_commit &&
+git tag -s -m "version ${VERSION}" "v${VERSION}" &&
+dev_version_commit &&
+epoch_commit "${EPOCH}"
diff --git a/rpm/buildah.spec b/rpm/buildah.spec
new file mode 100644
index 0000000..06a0c8b
--- /dev/null
+++ b/rpm/buildah.spec
@@ -0,0 +1,173 @@
+%global with_debug 1
+
+%if 0%{?with_debug}
+%global _find_debuginfo_dwz_opts %{nil}
+%global _dwz_low_mem_die_limit 0
+%else
+%global debug_package %{nil}
+%endif
+
+# RHEL's default %%gobuild macro doesn't account for the BUILDTAGS variable, so we
+# set it separately here and do not depend on RHEL's go-[s]rpm-macros package
+# until that's fixed.
+# c9s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227328
+# c8s bz: https://bugzilla.redhat.com/show_bug.cgi?id=2227331
+%if %{defined rhel}
+%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "-linkmode=external -compressdwarf=false ${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**};
+%endif
+
+%global gomodulesmode GO111MODULE=on
+
+%if 0%{defined fedora}
+%define build_with_btrfs 1
+%endif
+
+%global git0 https://github.com/containers/%{name}
+
+Name: buildah
+# Set different Epoch for copr
+%if %{defined copr_username}
+Epoch: 102
+%endif
+# DO NOT TOUCH the Version string!
+# The TRUE source of this specfile is:
+# https://github.com/containers/skopeo/blob/main/rpm/skopeo.spec
+# If that's what you're reading, Version must be 0, and will be updated by Packit for
+# copr and koji builds.
+# If you're reading this on dist-git, the version is automatically filled in by Packit.
+Version: 0
+# The `AND` needs to be uppercase in the License for SPDX compatibility
+License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND ISC AND MIT AND MPL-2.0
+Release: %autorelease
+%if %{defined golang_arches_future}
+ExclusiveArch: %{golang_arches_future}
+%else
+ExclusiveArch: aarch64 ppc64le s390x x86_64
+%endif
+Summary: A command line tool used for creating OCI Images
+URL: https://%{name}.io
+# Tarball fetched from upstream
+Source: %{git0}/archive/v%{version}.tar.gz
+BuildRequires: device-mapper-devel
+BuildRequires: git-core
+BuildRequires: golang >= 1.16.6
+BuildRequires: glib2-devel
+BuildRequires: glibc-static
+%if !%{defined gobuild}
+BuildRequires: go-rpm-macros
+%endif
+BuildRequires: gpgme-devel
+BuildRequires: libassuan-devel
+BuildRequires: make
+BuildRequires: ostree-devel
+%if %{defined build_with_btrfs}
+BuildRequires: btrfs-progs-devel
+%endif
+BuildRequires: shadow-utils-subid-devel
+Requires: containers-common-extra
+%if %{defined fedora}
+BuildRequires: libseccomp-static
+%else
+BuildRequires: libseccomp-devel
+%endif
+Requires: libseccomp >= 2.4.1-0
+Suggests: cpp
+
+%description
+The %{name} package provides a command line tool which can be used to
+* create a working container from scratch
+or
+* create a working container from an image as a starting point
+* mount/umount a working container's root file system for manipulation
+* save container's root file system layer to create a new image
+* delete a working container or an image
+
+%package tests
+Summary: Tests for %{name}
+
+Requires: %{name} = %{version}-%{release}
+Requires: bats
+Requires: bzip2
+Requires: podman
+Requires: golang
+Requires: jq
+Requires: httpd-tools
+Requires: openssl
+Requires: nmap-ncat
+Requires: git-daemon
+
+%description tests
+%{summary}
+
+This package contains system tests for %{name}
+
+%prep
+%autosetup -Sgit -n %{name}-%{version}
+
+%build
+%set_build_flags
+export CGO_CFLAGS=$CFLAGS
+
+# These extra flags present in $CFLAGS have been skipped for now as they break the build
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
+CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
+
+%ifarch x86_64
+export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
+%endif
+
+export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'`
+export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}"
+
+export BUILDTAGS="seccomp exclude_graphdriver_devicemapper $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh)"
+%if !%{defined build_with_btrfs}
+export BUILDTAGS+=" btrfs_noversion exclude_graphdriver_btrfs"
+%endif
+
+%gobuild -o bin/%{name} ./cmd/%{name}
+%gobuild -o bin/imgtype ./tests/imgtype
+%gobuild -o bin/copy ./tests/copy
+%gobuild -o bin/tutorial ./tests/tutorial
+%{__make} docs
+
+%install
+make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
+
+install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
+cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system
+cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype
+cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy
+cp bin/tutorial %{buildroot}/%{_bindir}/%{name}-tutorial
+
+rm %{buildroot}%{_datadir}/%{name}/test/system/tools/build/*
+
+#define license tag if not already defined
+%{!?_licensedir:%global license %doc}
+
+%files
+%license LICENSE
+%doc README.md
+%{_bindir}/%{name}
+%{_mandir}/man1/%{name}*
+%dir %{_datadir}/bash-completion
+%dir %{_datadir}/bash-completion/completions
+%{_datadir}/bash-completion/completions/%{name}
+
+%files tests
+%license LICENSE
+%{_bindir}/%{name}-imgtype
+%{_bindir}/%{name}-copy
+%{_bindir}/%{name}-tutorial
+%{_datadir}/%{name}/test
+
+%changelog
+%if %{defined autochangelog}
+%autochangelog
+%else
+# NOTE: This changelog will be visible on CentOS 8 Stream builds
+# Other envs are capable of handling autochangelog
+* Fri Jun 16 2023 RH Container Bot <rhcontainerbot@fedoraproject.org>
+- Placeholder changelog for envs that are not autochangelog-ready.
+- Contact upstream if you need to report an issue with the build.
+%endif
diff --git a/run.go b/run.go
new file mode 100644
index 0000000..77887df
--- /dev/null
+++ b/run.go
@@ -0,0 +1,209 @@
+package buildah
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // runUsingRuntimeCommand is a command we use as a key for reexec
+ runUsingRuntimeCommand = define.Package + "-oci-runtime"
+)
+
+// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal.
+type TerminalPolicy int
+
+const (
+ // DefaultTerminal indicates that this Run invocation should be
+ // connected to a pseudoterminal if we're connected to a terminal.
+ DefaultTerminal TerminalPolicy = iota
+ // WithoutTerminal indicates that this Run invocation should NOT be
+ // connected to a pseudoterminal.
+ WithoutTerminal
+ // WithTerminal indicates that this Run invocation should be connected
+ // to a pseudoterminal.
+ WithTerminal
+)
+
+// String converts a TerminalPolicy into a string.
+func (t TerminalPolicy) String() string {
+ switch t {
+ case DefaultTerminal:
+ return "DefaultTerminal"
+ case WithoutTerminal:
+ return "WithoutTerminal"
+ case WithTerminal:
+ return "WithTerminal"
+ }
+ return fmt.Sprintf("unrecognized terminal setting %d", t)
+}
+
+// NamespaceOption controls how we set up a namespace when launching processes.
+type NamespaceOption = define.NamespaceOption
+
+// NamespaceOptions provides some helper methods for a slice of NamespaceOption
+// structs.
+type NamespaceOptions = define.NamespaceOptions
+
+// IDMappingOptions controls how we set up UID/GID mapping when we set up a
+// user namespace.
+type IDMappingOptions = define.IDMappingOptions
+
+// Isolation provides a way to specify whether we're supposed to use a proper
+// OCI runtime, or some other method for running commands.
+type Isolation = define.Isolation
+
+const (
+ // IsolationDefault is whatever we think will work best.
+ IsolationDefault = define.IsolationDefault
+ // IsolationOCI is a proper OCI runtime.
+ IsolationOCI = define.IsolationOCI
+ // IsolationChroot is a more chroot-like environment: less isolation,
+ // but with fewer requirements.
+ IsolationChroot = define.IsolationChroot
+ // IsolationOCIRootless is a proper OCI runtime in rootless mode.
+ IsolationOCIRootless = define.IsolationOCIRootless
+)
+
+// RunOptions can be used to alter how a command is run in the container.
+type RunOptions struct {
+ // Logger is the logrus logger to write log messages with
+ Logger *logrus.Logger `json:"-"`
+ // Hostname is the hostname we set for the running container.
+ Hostname string
+ // Isolation is either IsolationDefault, IsolationOCI, IsolationChroot, or IsolationOCIRootless.
+ Isolation define.Isolation
+ // Runtime is the name of the runtime to run. It should accept the
+ // same arguments that runc does, and produce similar output.
+ Runtime string
+ // Args adds global arguments for the runtime.
+ Args []string
+ // NoHostname won't create new /etc/hostname file
+ NoHostname bool
+ // NoHosts won't create new /etc/hosts file
+ NoHosts bool
+ // NoPivot adds the --no-pivot runtime flag.
+ NoPivot bool
+ // Mounts are additional mount points which we want to provide.
+ Mounts []specs.Mount
+ // Env is additional environment variables to set.
+ Env []string
+ // User is the user as whom to run the command.
+ User string
+ // WorkingDir is an override for the working directory.
+ WorkingDir string
+ // ContextDir is used as the root directory for the source location for mounts that are of type "bind".
+ ContextDir string
+ // Shell is default shell to run in a container.
+ Shell string
+ // Cmd is an override for the configured default command.
+ Cmd []string
+ // Entrypoint is an override for the configured entry point.
+ Entrypoint []string
+ // NamespaceOptions controls how we set up the namespaces for the process.
+ NamespaceOptions define.NamespaceOptions
+ // ConfigureNetwork controls whether or not network interfaces and
+ // routing are configured for a new network namespace (i.e., when not
+ // joining another's namespace and not just using the host's
+ // namespace), effectively deciding whether or not the process has a
+ // usable network.
+ ConfigureNetwork define.NetworkConfigurationPolicy
+ // CNIPluginPath is the location of CNI plugin helpers, if they should be
+ // run from a location other than the default location.
+ CNIPluginPath string
+ // CNIConfigDir is the location of CNI configuration files, if the files in
+ // the default configuration directory shouldn't be used.
+ CNIConfigDir string
+ // Terminal provides a way to specify whether or not the command should
+ // be run with a pseudoterminal. By default (DefaultTerminal), a
+ // terminal is used if os.Stdout is connected to a terminal, but that
+ // decision can be overridden by specifying either WithTerminal or
+ // WithoutTerminal.
+ Terminal TerminalPolicy
+ // TerminalSize provides a way to set the number of rows and columns in
+ // a pseudo-terminal, if we create one, and Stdin/Stdout/Stderr aren't
+ // connected to a terminal.
+ TerminalSize *specs.Box
+ // The stdin/stdout/stderr descriptors to use. If set to nil, the
+ // corresponding files in the "os" package are used as defaults.
+ Stdin io.Reader `json:"-"`
+ Stdout io.Writer `json:"-"`
+ Stderr io.Writer `json:"-"`
+ // Quiet tells the run to turn off output to stdout.
+ Quiet bool
+ // AddCapabilities is a list of capabilities to add to the default set.
+ AddCapabilities []string
+ // DropCapabilities is a list of capabilities to remove from the default set,
+ // after processing the AddCapabilities set. If a capability appears in both
+ // lists, it will be dropped.
+ DropCapabilities []string
+ // Devices are the additional devices to add to the containers
+ Devices define.ContainerDevices
+ // Secrets are the available secrets to use in a RUN
+ Secrets map[string]define.Secret
+ // SSHSources is the available ssh agents to use in a RUN
+ SSHSources map[string]*sshagent.Source `json:"-"`
+ // RunMounts are mounts for this run. RunMounts for this run
+ // will not show up in subsequent runs.
+ RunMounts []string
+ // Map of stages and container mountpoint if any from stage executor
+ StageMountPoints map[string]internal.StageMountDetails
+ // External Image mounts to be cleaned up.
+ // Buildah run --mount could mount image before RUN calls, RUN could cleanup
+ // them up as well
+ ExternalImageMounts []string
+ // System context of current build
+ SystemContext *types.SystemContext
+ // CgroupManager to use for running OCI containers
+ CgroupManager string
+}
+
+// RunMountArtifacts are the artifacts created when using a run mount.
+type runMountArtifacts struct {
+ // RunMountTargets are the run mount targets inside the container
+ RunMountTargets []string
+ // TmpFiles are artifacts that need to be removed outside the container
+ TmpFiles []string
+ // Any external images which were mounted inside container
+ MountedImages []string
+ // Agents are the ssh agents started
+ Agents []*sshagent.AgentServer
+ // SSHAuthSock is the path to the ssh auth sock inside the container
+ SSHAuthSock string
+ // TargetLocks to be unlocked if there are any.
+ TargetLocks []*lockfile.LockFile
+}
+
+// RunMountInfo are the available run mounts for this run
+type runMountInfo struct {
+ // WorkDir is the current working directory inside the container.
+ WorkDir string
+ // ContextDir is the root directory for the source location for bind mounts.
+ ContextDir string
+ // Secrets are the available secrets to use in a RUN
+ Secrets map[string]define.Secret
+ // SSHSources is the available ssh agents to use in a RUN
+ SSHSources map[string]*sshagent.Source `json:"-"`
+ // Map of stages and container mountpoint if any from stage executor
+ StageMountPoints map[string]internal.StageMountDetails
+ // System context of current build
+ SystemContext *types.SystemContext
+}
+
+// IDMaps are the UIDs, GID, and maps for the run
+type IDMaps struct {
+ uidmap []specs.LinuxIDMapping
+ gidmap []specs.LinuxIDMapping
+ rootUID int
+ rootGID int
+ processUID int
+ processGID int
+}
diff --git a/run_common.go b/run_common.go
new file mode 100644
index 0000000..93550f6
--- /dev/null
+++ b/run_common.go
@@ -0,0 +1,1961 @@
+//go:build linux || freebsd
+// +build linux freebsd
+
+package buildah
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/buildah/internal/volumes"
+ "github.com/containers/buildah/pkg/overlay"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libnetwork/etchosts"
+ "github.com/containers/common/libnetwork/network"
+ "github.com/containers/common/libnetwork/resolvconf"
+ netTypes "github.com/containers/common/libnetwork/types"
+ netUtil "github.com/containers/common/libnetwork/util"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/subscriptions"
+ imageTypes "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/unshare"
+ storageTypes "github.com/containers/storage/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+ "golang.org/x/term"
+)
+
+// addResolvConf copies files from host and sets them up to bind mount into container
+func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaces []specs.LinuxNamespace) (string, error) {
+ defaultConfig, err := config.Default()
+ if err != nil {
+ return "", fmt.Errorf("failed to get config: %w", err)
+ }
+
+ nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers.Get())+len(dnsServers))
+ nameservers = append(nameservers, defaultConfig.Containers.DNSServers.Get()...)
+ nameservers = append(nameservers, dnsServers...)
+
+ keepHostServers := false
+ // special check for slirp ip
+ if len(nameservers) == 0 && b.Isolation == IsolationOCIRootless {
+ for _, ns := range namespaces {
+ if ns.Type == specs.NetworkNamespace && ns.Path == "" {
+ keepHostServers = true
+ // if we are using slirp4netns, also add the built-in DNS server.
+ logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server")
+ nameservers = append([]string{"10.0.2.3"}, nameservers...)
+ }
+ }
+ }
+
+ searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches.Get())+len(dnsSearch))
+ searches = append(searches, defaultConfig.Containers.DNSSearches.Get()...)
+ searches = append(searches, dnsSearch...)
+
+ options := make([]string, 0, len(defaultConfig.Containers.DNSOptions.Get())+len(dnsOptions))
+ options = append(options, defaultConfig.Containers.DNSOptions.Get()...)
+ options = append(options, dnsOptions...)
+
+ cfile := filepath.Join(rdir, "resolv.conf")
+ if err := resolvconf.New(&resolvconf.Params{
+ Path: cfile,
+ Namespaces: namespaces,
+ IPv6Enabled: true, // TODO we should check if we have ipv6
+ KeepHostServers: keepHostServers,
+ Nameservers: nameservers,
+ Searches: searches,
+ Options: options,
+ }); err != nil {
+ return "", fmt.Errorf("building resolv.conf for container %s: %w", b.ContainerID, err)
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(cfile, uid, gid); err != nil {
+ return "", err
+ }
+
+ if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+ return cfile, nil
+}
+
+// generateHosts creates a containers hosts file
+func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string, spec *specs.Spec) (string, error) {
+ conf, err := config.Default()
+ if err != nil {
+ return "", err
+ }
+
+ path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot)
+ if err != nil {
+ return "", err
+ }
+
+ var entries etchosts.HostEntries
+ isHost := true
+ if spec.Linux != nil {
+ for _, ns := range spec.Linux.Namespaces {
+ if ns.Type == specs.NetworkNamespace {
+ isHost = false
+ break
+ }
+ }
+ }
+ // add host entry for local ip when running in host network
+ if spec.Hostname != "" && isHost {
+ ip := netUtil.GetLocalIP()
+ if ip != "" {
+ entries = append(entries, etchosts.HostEntry{
+ Names: []string{spec.Hostname},
+ IP: ip,
+ })
+ }
+ }
+
+ targetfile := filepath.Join(rdir, "hosts")
+ if err := etchosts.New(&etchosts.Params{
+ BaseFile: path,
+ ExtraHosts: b.CommonBuildOpts.AddHost,
+ HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil),
+ TargetFile: targetfile,
+ ContainerIPs: entries,
+ }); err != nil {
+ return "", err
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(targetfile, uid, gid); err != nil {
+ return "", err
+ }
+ if err := label.Relabel(targetfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return targetfile, nil
+}
+
+// generateHostname creates a containers /etc/hostname file
+func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) {
+ var err error
+ hostnamePath := "/etc/hostname"
+
+ var hostnameBuffer bytes.Buffer
+ hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname)))
+
+ cfile := filepath.Join(rdir, filepath.Base(hostnamePath))
+ if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil {
+ return "", fmt.Errorf("writing /etc/hostname into the container: %w", err)
+ }
+
+ uid := 0
+ gid := 0
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(cfile, uid, gid); err != nil {
+ return "", err
+ }
+ if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
+ return "", err
+ }
+
+ return cfile, nil
+}
+
+func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) {
+ switch terminalPolicy {
+ case DefaultTerminal:
+ onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr)
+ if onTerminal {
+ logrus.Debugf("stdio is a terminal, defaulting to using a terminal")
+ } else {
+ logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal")
+ }
+ g.SetProcessTerminal(onTerminal)
+ case WithTerminal:
+ g.SetProcessTerminal(true)
+ case WithoutTerminal:
+ g.SetProcessTerminal(false)
+ }
+ if terminalSize != nil {
+ g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height)
+ }
+}
+
+// Search for a command that isn't given as an absolute path using the $PATH
+// under the rootfs. We can't resolve absolute symbolic links without
+// chroot()ing, which we may not be able to do, so just accept a link as a
+// valid resolution.
+func runLookupPath(g *generate.Generator, command []string) []string {
+ // Look for the configured $PATH.
+ spec := g.Config
+ envPath := ""
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "PATH=") {
+ envPath = spec.Process.Env[i]
+ }
+ }
+ // If there is no configured $PATH, supply one.
+ if envPath == "" {
+ defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin"
+ envPath = "PATH=" + defaultPath
+ g.AddProcessEnv("PATH", defaultPath)
+ }
+ // No command, nothing to do.
+ if len(command) == 0 {
+ return command
+ }
+ // Command is already an absolute path, use it as-is.
+ if filepath.IsAbs(command[0]) {
+ return command
+ }
+ // For each element in the PATH,
+ for _, pathEntry := range filepath.SplitList(envPath[5:]) {
+ // if it's the empty string, it's ".", which is the Cwd,
+ if pathEntry == "" {
+ pathEntry = spec.Process.Cwd
+ }
+ // build the absolute path which it might be,
+ candidate := filepath.Join(pathEntry, command[0])
+ // check if it's there,
+ if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil {
+ // and if it's not a directory, and either a symlink or executable,
+ if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) {
+ // use that.
+ return append([]string{candidate}, command[1:]...)
+ }
+ }
+ }
+ return command
+}
+
+func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) {
+ // Set the user UID/GID/supplemental group list/capabilities lists.
+ user, homeDir, err := b.userForRun(mountPoint, options.User)
+ if err != nil {
+ return "", err
+ }
+ if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil {
+ return "", err
+ }
+ g.SetProcessUID(user.UID)
+ g.SetProcessGID(user.GID)
+ g.AddProcessAdditionalGid(user.GID)
+ for _, gid := range user.AdditionalGids {
+ g.AddProcessAdditionalGid(gid)
+ }
+ for _, group := range b.GroupAdd {
+ if group == "keep-groups" {
+ if len(b.GroupAdd) > 1 {
+ return "", errors.New("the '--group-add keep-groups' option is not allowed with any other --group-add options")
+ }
+ g.AddAnnotation("run.oci.keep_original_groups", "1")
+ continue
+ }
+ gid, err := strconv.ParseUint(group, 10, 32)
+ if err != nil {
+ return "", err
+ }
+ g.AddProcessAdditionalGid(uint32(gid))
+ }
+
+ // Remove capabilities if not running as root except Bounding set
+ if user.UID != 0 && g.Config.Process.Capabilities != nil {
+ bounding := g.Config.Process.Capabilities.Bounding
+ g.ClearProcessCapabilities()
+ g.Config.Process.Capabilities.Bounding = bounding
+ }
+
+ return homeDir, nil
+}
+
+func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) {
+ g.ClearProcessEnv()
+
+ if b.CommonBuildOpts.HTTPProxy {
+ for _, envSpec := range config.ProxyEnv {
+ if envVal, ok := os.LookupEnv(envSpec); ok {
+ g.AddProcessEnv(envSpec, envVal)
+ }
+ }
+ }
+
+ for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) {
+ env := strings.SplitN(envSpec, "=", 2)
+ if len(env) > 1 {
+ g.AddProcessEnv(env[0], env[1])
+ }
+ }
+}
+
+// getNetworkInterface creates the network interface
+func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (netTypes.ContainerNetwork, error) {
+ conf, err := config.Default()
+ if err != nil {
+ return nil, err
+ }
+ // copy the config to not modify the default by accident
+ newconf := *conf
+ if len(cniConfDir) > 0 {
+ newconf.Network.NetworkConfigDir = cniConfDir
+ }
+ if len(cniPluginPath) > 0 {
+ plugins := strings.Split(cniPluginPath, string(os.PathListSeparator))
+ newconf.Network.CNIPluginDirs.Set(plugins)
+ }
+
+ _, netInt, err := network.NetworkBackend(store, &newconf, false)
+ if err != nil {
+ return nil, err
+ }
+ return netInt, nil
+}
+
+// DefaultNamespaceOptions returns the default namespace settings from the
+// runtime-tools generator library.
+func DefaultNamespaceOptions() (define.NamespaceOptions, error) {
+ cfg, err := config.Default()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+ options := define.NamespaceOptions{
+ {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"},
+ {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"},
+ {Name: string(specs.MountNamespace), Host: false},
+ {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"},
+ {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"},
+ {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "" || cfg.Containers.UserNS == "host"},
+ {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"},
+ }
+ return options, nil
+}
+
+func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error {
+ switch isolation {
+ case IsolationOCIRootless:
+ // only change the netns if the caller did not set it
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil {
+ if _, err := exec.LookPath("slirp4netns"); err != nil {
+ // if slirp4netns is not installed we have to use the hosts net namespace
+ options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ }
+ }
+ fallthrough
+ case IsolationOCI:
+ pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace))
+ userns := options.NamespaceOptions.Find(string(specs.UserNamespace))
+ if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) {
+ return fmt.Errorf("not allowed to mix host PID namespace with container user namespace")
+ }
+ case IsolationChroot:
+ logrus.Info("network namespace isolation not supported with chroot isolation, forcing host network")
+ options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ }
+ return nil
+}
+
+// fileCloser is a helper struct to prevent closing the file twice in the code
+// users must call (fileCloser).Close() and not fileCloser.File.Close()
+type fileCloser struct {
+ file *os.File
+ closed bool
+}
+
+func (f *fileCloser) Close() {
+ if !f.closed {
+ if err := f.file.Close(); err != nil {
+ logrus.Errorf("failed to close file: %v", err)
+ }
+ f.closed = true
+ }
+}
+
+// waitForSync waits for a maximum of 4 minutes to read something from the file
+func waitForSync(pipeR *os.File) error {
+ if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil {
+ return err
+ }
+ b := make([]byte, 16)
+ _, err := pipeR.Read(b)
+ return err
+}
+
+func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string,
+ containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) {
+ if options.Logger == nil {
+ options.Logger = logrus.StandardLogger()
+ }
+
+ // Lock the caller to a single OS-level thread.
+ runtime.LockOSThread()
+
+ // Set up bind mounts for things that a namespaced user might not be able to get to directly.
+ unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath)
+ if unmountAll != nil {
+ defer func() {
+ if err := unmountAll(); err != nil {
+ options.Logger.Error(err)
+ }
+ }()
+ }
+ if err != nil {
+ return 1, err
+ }
+
+ // Write the runtime configuration.
+ specbytes, err := json.Marshal(spec)
+ if err != nil {
+ return 1, fmt.Errorf("encoding configuration %#v as json: %w", spec, err)
+ }
+ if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
+ return 1, fmt.Errorf("storing runtime configuration: %w", err)
+ }
+
+ logrus.Debugf("config = %v", string(specbytes))
+
+ // Decide which runtime to use.
+ runtime := options.Runtime
+ if runtime == "" {
+ runtime = util.Runtime()
+ }
+ localRuntime := util.FindLocalRuntime(runtime)
+ if localRuntime != "" {
+ runtime = localRuntime
+ }
+
+ // Default to just passing down our stdio.
+ getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ return os.Stdin, os.Stdout, os.Stderr
+ }
+
+ // Figure out how we're doing stdio handling, and create pipes and sockets.
+ var stdio sync.WaitGroup
+ var consoleListener *net.UnixListener
+ var errorFds, closeBeforeReadingErrorFds []int
+ stdioPipe := make([][]int, 3)
+ copyConsole := false
+ copyPipes := false
+ finishCopy := make([]int, 2)
+ if err = unix.Pipe(finishCopy); err != nil {
+ return 1, fmt.Errorf("creating pipe for notifying to stop stdio: %w", err)
+ }
+ finishedCopy := make(chan struct{}, 1)
+ var pargs []string
+ if spec.Process != nil {
+ pargs = spec.Process.Args
+ if spec.Process.Terminal {
+ copyConsole = true
+ // Create a listening socket for accepting the container's terminal's PTY master.
+ socketPath := filepath.Join(bundlePath, "console.sock")
+ consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
+ if err != nil {
+ return 1, fmt.Errorf("creating socket %q to receive terminal descriptor: %w", consoleListener.Addr(), err)
+ }
+ // Add console socket arguments.
+ moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath)
+ } else {
+ copyPipes = true
+ // Figure out who should own the pipes.
+ uid, gid, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return 1, err
+ }
+ // Create stdio pipes.
+ if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil {
+ return 1, err
+ }
+ if spec.Linux != nil {
+ if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil {
+ return 1, err
+ }
+ }
+ errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]}
+ closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]}
+ // Set stdio to our pipes.
+ getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin")
+ stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout")
+ stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr")
+ return stdin, stdout, stderr
+ }
+ }
+ } else {
+ if options.Quiet {
+ // Discard stdout.
+ getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) {
+ return os.Stdin, nil, os.Stderr
+ }
+ }
+ }
+
+ runtimeArgs := options.Args[:]
+ if options.CgroupManager == config.SystemdCgroupsManager {
+ runtimeArgs = append(runtimeArgs, "--systemd-cgroup")
+ }
+
+ // Build the commands that we'll execute.
+ pidFile := filepath.Join(bundlePath, "pid")
+ args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName)
+ create := exec.Command(runtime, args...)
+ setPdeathsig(create)
+ create.Dir = bundlePath
+ stdin, stdout, stderr := getCreateStdio()
+ create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr
+
+ args = append(options.Args, "start", containerName)
+ start := exec.Command(runtime, args...)
+ setPdeathsig(start)
+ start.Dir = bundlePath
+ start.Stderr = os.Stderr
+
+ kill := func(signal string) *exec.Cmd {
+ args := append(options.Args, "kill", containerName)
+ if signal != "" {
+ args = append(args, signal)
+ }
+ kill := exec.Command(runtime, args...)
+ kill.Dir = bundlePath
+ kill.Stderr = os.Stderr
+ return kill
+ }
+
+ args = append(options.Args, "delete", containerName)
+ del := exec.Command(runtime, args...)
+ del.Dir = bundlePath
+ del.Stderr = os.Stderr
+
+ // Actually create the container.
+ logrus.Debugf("Running %q", create.Args)
+ err = create.Run()
+ if err != nil {
+ return 1, fmt.Errorf("from %s creating container for %v: %s: %w", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds), err)
+ }
+ defer func() {
+ err2 := del.Run()
+ if err2 != nil {
+ if err == nil {
+ err = fmt.Errorf("deleting container: %w", err2)
+ } else {
+ options.Logger.Infof("error from %s deleting container: %v", runtime, err2)
+ }
+ }
+ }()
+
+ // Make sure we read the container's exit status when it exits.
+ pidValue, err := os.ReadFile(pidFile)
+ if err != nil {
+ return 1, err
+ }
+ pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
+ if err != nil {
+ return 1, fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err)
+ }
+ var stopped uint32
+ var reaping sync.WaitGroup
+ reaping.Add(1)
+ go func() {
+ defer reaping.Done()
+ var err error
+ _, err = unix.Wait4(pid, &wstatus, 0, nil)
+ if err != nil {
+ wstatus = 0
+ options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err)
+ }
+ atomic.StoreUint32(&stopped, 1)
+ }()
+
+ if configureNetwork {
+ if _, err := containerCreateW.Write([]byte{1}); err != nil {
+ return 1, err
+ }
+ containerCreateW.Close()
+ logrus.Debug("waiting for parent start message")
+ b := make([]byte, 1)
+ if _, err := containerStartR.Read(b); err != nil {
+ return 1, fmt.Errorf("did not get container start message from parent: %w", err)
+ }
+ containerStartR.Close()
+ }
+
+ if copyPipes {
+ // We don't need the ends of the pipes that belong to the container.
+ stdin.Close()
+ if stdout != nil {
+ stdout.Close()
+ }
+ stderr.Close()
+ }
+
+ // Handle stdio for the container in the background.
+ stdio.Add(1)
+ go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec)
+
+ // Start the container.
+ logrus.Debugf("Running %q", start.Args)
+ err = start.Run()
+ if err != nil {
+ return 1, fmt.Errorf("from %s starting container: %w", runtime, err)
+ }
+ defer func() {
+ if atomic.LoadUint32(&stopped) == 0 {
+ if err := kill("").Run(); err != nil {
+ options.Logger.Infof("error from %s stopping container: %v", runtime, err)
+ }
+ atomic.StoreUint32(&stopped, 1)
+ }
+ }()
+
+ // Wait for the container to exit.
+ interrupted := make(chan os.Signal, 100)
+ go func() {
+ for range interrupted {
+ if err := kill("SIGKILL").Run(); err != nil {
+ logrus.Errorf("%v sending SIGKILL", err)
+ }
+ }
+ }()
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ for {
+ now := time.Now()
+ var state specs.State
+ args = append(options.Args, "state", containerName)
+ stat := exec.Command(runtime, args...)
+ stat.Dir = bundlePath
+ stat.Stderr = os.Stderr
+ stateOutput, err := stat.Output()
+ if err != nil {
+ if atomic.LoadUint32(&stopped) != 0 {
+ // container exited
+ break
+ }
+ return 1, fmt.Errorf("reading container state from %s (got output: %q): %w", runtime, string(stateOutput), err)
+ }
+ if err = json.Unmarshal(stateOutput, &state); err != nil {
+ return 1, fmt.Errorf("parsing container state %q from %s: %w", string(stateOutput), runtime, err)
+ }
+ switch state.Status {
+ case "running":
+ case "stopped":
+ atomic.StoreUint32(&stopped, 1)
+ default:
+ return 1, fmt.Errorf("container status unexpectedly changed to %q", state.Status)
+ }
+ if atomic.LoadUint32(&stopped) != 0 {
+ break
+ }
+ select {
+ case <-finishedCopy:
+ atomic.StoreUint32(&stopped, 1)
+ case <-time.After(time.Until(now.Add(100 * time.Millisecond))):
+ continue
+ }
+ if atomic.LoadUint32(&stopped) != 0 {
+ break
+ }
+ }
+ signal.Stop(interrupted)
+ close(interrupted)
+
+ // Close the writing end of the stop-handling-stdio notification pipe.
+ unix.Close(finishCopy[1])
+ // Wait for the stdio copy goroutine to flush.
+ stdio.Wait()
+ // Wait until we finish reading the exit status.
+ reaping.Wait()
+
+ return wstatus, nil
+}
+
+func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer
+ for _, fd := range closeBeforeReadingFds {
+ unix.Close(fd)
+ }
+ var b bytes.Buffer
+ buf := make([]byte, 8192)
+ for _, fd := range fds {
+ nread, err := unix.Read(fd, buf)
+ if err != nil {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ switch errno {
+ default:
+ logger.Errorf("error reading from pipe %d: %v", fd, err)
+ case syscall.EINTR, syscall.EAGAIN:
+ }
+ } else {
+ logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
+ }
+ continue
+ }
+ for nread > 0 {
+ r := buf[:nread]
+ if nwritten, err := b.Write(r); err != nil || nwritten != len(r) {
+ if nwritten != len(r) {
+ logger.Errorf("error buffering data from pipe %d: %v", fd, err)
+ break
+ }
+ }
+ nread, err = unix.Read(fd, buf)
+ if err != nil {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ switch errno {
+ default:
+ logger.Errorf("error reading from pipe %d: %v", fd, err)
+ case syscall.EINTR, syscall.EAGAIN:
+ }
+ } else {
+ logger.Errorf("unable to wait for data from pipe %d: %v", fd, err)
+ }
+ break
+ }
+ }
+ }
+ return b.String()
+}
+
+func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer
+ mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
+ if err != nil {
+ return false, err
+ }
+ blocked := mask&unix.O_NONBLOCK == 0
+
+ if err := unix.SetNonblock(fd, nonblocking); err != nil {
+ if nonblocking {
+ logger.Errorf("error setting %s to nonblocking: %v", description, err)
+ } else {
+ logger.Errorf("error setting descriptor %s blocking: %v", description, err)
+ }
+ }
+ return blocked, err
+}
+
+func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) {
+ defer func() {
+ unix.Close(finishCopy[0])
+ if copyPipes {
+ unix.Close(stdioPipe[unix.Stdin][1])
+ unix.Close(stdioPipe[unix.Stdout][0])
+ unix.Close(stdioPipe[unix.Stderr][0])
+ }
+ stdio.Done()
+ finishedCopy <- struct{}{}
+ close(finishedCopy)
+ }()
+ // Map describing where data on an incoming descriptor should go.
+ relayMap := make(map[int]int)
+ // Map describing incoming and outgoing descriptors.
+ readDesc := make(map[int]string)
+ writeDesc := make(map[int]string)
+ // Buffers.
+ relayBuffer := make(map[int]*bytes.Buffer)
+ // Set up the terminal descriptor or pipes for polling.
+ if copyConsole {
+ // Accept a connection over our listening socket.
+ fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize)
+ if err != nil {
+ logger.Errorf("%v", err)
+ return
+ }
+ terminalFD := fd
+ // Input from our stdin, output from the terminal descriptor.
+ relayMap[unix.Stdin] = terminalFD
+ readDesc[unix.Stdin] = "stdin"
+ relayBuffer[terminalFD] = new(bytes.Buffer)
+ writeDesc[terminalFD] = "container terminal input"
+ relayMap[terminalFD] = unix.Stdout
+ readDesc[terminalFD] = "container terminal output"
+ relayBuffer[unix.Stdout] = new(bytes.Buffer)
+ writeDesc[unix.Stdout] = "output"
+ // Set our terminal's mode to raw, to pass handling of special
+ // terminal input to the terminal in the container.
+ if term.IsTerminal(unix.Stdin) {
+ if state, err := term.MakeRaw(unix.Stdin); err != nil {
+ logger.Warnf("error setting terminal state: %v", err)
+ } else {
+ defer func() {
+ if err = term.Restore(unix.Stdin, state); err != nil {
+ logger.Errorf("unable to restore terminal state: %v", err)
+ }
+ }()
+ }
+ }
+ }
+ if copyPipes {
+ // Input from our stdin, output from the stdout and stderr pipes.
+ relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
+ readDesc[unix.Stdin] = "stdin"
+ relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer)
+ writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin"
+ relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
+ readDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
+ relayBuffer[unix.Stdout] = new(bytes.Buffer)
+ writeDesc[unix.Stdout] = "stdout"
+ relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
+ readDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
+ relayBuffer[unix.Stderr] = new(bytes.Buffer)
+ writeDesc[unix.Stderr] = "stderr"
+ }
+ // Set our reading descriptors to non-blocking.
+ for rfd, wfd := range relayMap {
+ blocked, err := setNonblock(logger, rfd, readDesc[rfd], true)
+ if err != nil {
+ return
+ }
+ if blocked {
+ defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck
+ }
+ setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck
+ }
+
+ if copyPipes {
+ setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck
+ }
+
+ runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc)
+}
+
+func canRetry(err error) bool {
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ return errno == syscall.EINTR || errno == syscall.EAGAIN
+ }
+ return false
+}
+
+func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) {
+ closeStdin := false
+
+ // Pass data back and forth.
+ pollTimeout := -1
+ for len(relayMap) > 0 {
+ // Start building the list of descriptors to poll.
+ pollFds := make([]unix.PollFd, 0, len(relayMap)+1)
+ // Poll for a notification that we should stop handling stdio.
+ pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP})
+ // Poll on our reading descriptors.
+ for rfd := range relayMap {
+ pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP})
+ }
+ buf := make([]byte, 8192)
+ // Wait for new data from any input descriptor, or a notification that we're done.
+ _, err := unix.Poll(pollFds, pollTimeout)
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
+ return
+ }
+ removes := make(map[int]struct{})
+ for _, pollFd := range pollFds {
+ // If this descriptor's just been closed from the other end, mark it for
+ // removal from the set that we're checking for.
+ if pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
+ removes[int(pollFd.Fd)] = struct{}{}
+ }
+ // If the descriptor was closed elsewhere, remove it from our list.
+ if pollFd.Revents&unix.POLLNVAL != 0 {
+ logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)])
+ removes[int(pollFd.Fd)] = struct{}{}
+ }
+ // If the POLLIN flag isn't set, then there's no data to be read from this descriptor.
+ if pollFd.Revents&unix.POLLIN == 0 {
+ continue
+ }
+ // Read whatever there is to be read.
+ readFD := int(pollFd.Fd)
+ writeFD, needToRelay := relayMap[readFD]
+ if needToRelay {
+ n, err := unix.Read(readFD, buf)
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
+ return
+ }
+ // If it's zero-length on our stdin and we're
+ // using pipes, it's an EOF, so close the stdin
+ // pipe's writing end.
+ if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin {
+ removes[int(pollFd.Fd)] = struct{}{}
+ } else if n > 0 {
+ // Buffer the data in case we get blocked on where they need to go.
+ nwritten, err := relayBuffer[writeFD].Write(buf[:n])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ continue
+ }
+ if nwritten != n {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten)
+ continue
+ }
+ // If this is the last of the data we'll be able to read from this
+ // descriptor, read all that there is to read.
+ for pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
+ nr, err := unix.Read(readFD, buf)
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
+ if nr <= 0 {
+ break
+ }
+ nwritten, err := relayBuffer[writeFD].Write(buf[:nr])
+ if err != nil {
+ logrus.Debugf("buffer: %v", err)
+ break
+ }
+ if nwritten != nr {
+ logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten)
+ break
+ }
+ }
+ }
+ }
+ }
+ // Try to drain the output buffers. Set the default timeout
+ // for the next poll() to 100ms if we still have data to write.
+ pollTimeout = -1
+ for writeFD := range relayBuffer {
+ if relayBuffer[writeFD].Len() > 0 {
+ n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
+ return
+ }
+ if n > 0 {
+ relayBuffer[writeFD].Next(n)
+ }
+ if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
+ }
+ if relayBuffer[writeFD].Len() > 0 {
+ pollTimeout = 100
+ }
+ }
+ // Remove any descriptors which we don't need to poll any more from the poll descriptor list.
+ for remove := range removes {
+ if copyPipes && remove == unix.Stdin {
+ closeStdin = true
+ if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 {
+ logrus.Debugf("closing stdin")
+ unix.Close(stdioPipe[unix.Stdin][1])
+ stdioPipe[unix.Stdin][1] = -1
+ }
+ }
+ delete(relayMap, remove)
+ }
+ // If the we-can-return pipe had anything for us, we're done.
+ for _, pollFd := range pollFds {
+ if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 {
+ // The pipe is closed, indicating that we can stop now.
+ return
+ }
+ }
+ }
+}
+
+func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) {
+ defer consoleListener.Close()
+ c, err := consoleListener.AcceptUnix()
+ if err != nil {
+ return -1, fmt.Errorf("accepting socket descriptor connection: %w", err)
+ }
+ defer c.Close()
+ // Expect a control message over our new connection.
+ b := make([]byte, 8192)
+ oob := make([]byte, 8192)
+ n, oobn, _, _, err := c.ReadMsgUnix(b, oob)
+ if err != nil {
+ return -1, fmt.Errorf("reading socket descriptor: %w", err)
+ }
+ if n > 0 {
+ logrus.Debugf("socket descriptor is for %q", string(b[:n]))
+ }
+ if oobn > len(oob) {
+ return -1, fmt.Errorf("too much out-of-bounds data (%d bytes)", oobn)
+ }
+ // Parse the control message.
+ scm, err := unix.ParseSocketControlMessage(oob[:oobn])
+ if err != nil {
+ return -1, fmt.Errorf("parsing out-of-bound data as a socket control message: %w", err)
+ }
+ logrus.Debugf("control messages: %v", scm)
+ // Expect to get a descriptor.
+ terminalFD := -1
+ for i := range scm {
+ fds, err := unix.ParseUnixRights(&scm[i])
+ if err != nil {
+ return -1, fmt.Errorf("parsing unix rights control message: %v: %w", &scm[i], err)
+ }
+ logrus.Debugf("fds: %v", fds)
+ if len(fds) == 0 {
+ continue
+ }
+ terminalFD = fds[0]
+ break
+ }
+ if terminalFD == -1 {
+ return -1, fmt.Errorf("unable to read terminal descriptor")
+ }
+ // Set the pseudoterminal's size to the configured size, or our own.
+ winsize := &unix.Winsize{}
+ if terminalSize != nil {
+ // Use configured sizes.
+ winsize.Row = uint16(terminalSize.Height)
+ winsize.Col = uint16(terminalSize.Width)
+ } else {
+ if term.IsTerminal(unix.Stdin) {
+ // Use the size of our terminal.
+ if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil {
+ logger.Warnf("error reading size of controlling terminal: %v", err)
+ winsize.Row = 0
+ winsize.Col = 0
+ }
+ }
+ }
+ if winsize.Row != 0 && winsize.Col != 0 {
+ if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil {
+ logger.Warnf("error setting size of container pseudoterminal: %v", err)
+ }
+ // FIXME - if we're connected to a terminal, we should
+ // be passing the updated terminal size down when we
+ // receive a SIGWINCH.
+ }
+ return terminalFD, nil
+}
+
+func runUsingRuntimeMain() {
+ var options runUsingRuntimeSubprocOptions
+ // Set logging.
+ if level := os.Getenv("LOGLEVEL"); level != "" {
+ if ll, err := strconv.Atoi(level); err == nil {
+ logrus.SetLevel(logrus.Level(ll))
+ }
+ }
+ // Unpack our configuration.
+ confPipe := os.NewFile(3, "confpipe")
+ if confPipe == nil {
+ fmt.Fprintf(os.Stderr, "error reading options pipe\n")
+ os.Exit(1)
+ }
+ defer confPipe.Close()
+ if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
+ fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
+ os.Exit(1)
+ }
+ // Set ourselves up to read the container's exit status. We're doing this in a child process
+ // so that we won't mess with the setting in a caller of the library.
+ if err := setChildProcess(); err != nil {
+ os.Exit(1)
+ }
+ ospec := options.Spec
+ if ospec == nil {
+ fmt.Fprintf(os.Stderr, "options spec not specified\n")
+ os.Exit(1)
+ }
+
+ // open the pipes used to communicate with the parent process
+ var containerCreateW *os.File
+ var containerStartR *os.File
+ if options.ConfigureNetwork {
+ containerCreateW = os.NewFile(4, "containercreatepipe")
+ if containerCreateW == nil {
+ fmt.Fprintf(os.Stderr, "could not open fd 4\n")
+ os.Exit(1)
+ }
+ containerStartR = os.NewFile(5, "containerstartpipe")
+ if containerStartR == nil {
+ fmt.Fprintf(os.Stderr, "could not open fd 5\n")
+ os.Exit(1)
+ }
+ }
+
+ // Run the container, start to finish.
+ status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
+ os.Exit(1)
+ }
+ // Pass the container's exit status back to the caller by exiting with the same status.
+ if status.Exited() {
+ os.Exit(status.ExitStatus())
+ } else if status.Signaled() {
+ fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal())
+ os.Exit(1)
+ }
+ os.Exit(1)
+}
+
+func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, networkString string,
+ moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) {
+ // Lock the caller to a single OS-level thread.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ var confwg sync.WaitGroup
+ config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
+ Options: options,
+ Spec: spec,
+ RootPath: rootPath,
+ BundlePath: bundlePath,
+ ConfigureNetwork: configureNetwork,
+ MoreCreateArgs: moreCreateArgs,
+ ContainerName: containerName,
+ Isolation: isolation,
+ })
+ if conferr != nil {
+ return fmt.Errorf("encoding configuration for %q: %w", runUsingRuntimeCommand, conferr)
+ }
+ cmd := reexec.Command(runUsingRuntimeCommand)
+ setPdeathsig(cmd)
+ cmd.Dir = bundlePath
+ cmd.Stdin = options.Stdin
+ if cmd.Stdin == nil {
+ cmd.Stdin = os.Stdin
+ }
+ cmd.Stdout = options.Stdout
+ if cmd.Stdout == nil {
+ cmd.Stdout = os.Stdout
+ }
+ cmd.Stderr = options.Stderr
+ if cmd.Stderr == nil {
+ cmd.Stderr = os.Stderr
+ }
+ cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())})
+ preader, pwriter, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating configuration pipe: %w", err)
+ }
+ confwg.Add(1)
+ go func() {
+ _, conferr = io.Copy(pwriter, bytes.NewReader(config))
+ if conferr != nil {
+ conferr = fmt.Errorf("while copying configuration down pipe to child process: %w", conferr)
+ }
+ confwg.Done()
+ }()
+
+ // create network configuration pipes
+ var containerCreateR, containerCreateW fileCloser
+ var containerStartR, containerStartW fileCloser
+ if configureNetwork {
+ containerCreateR.file, containerCreateW.file, err = os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating container create pipe: %w", err)
+ }
+ defer containerCreateR.Close()
+ defer containerCreateW.Close()
+
+ containerStartR.file, containerStartW.file, err = os.Pipe()
+ if err != nil {
+ return fmt.Errorf("creating container start pipe: %w", err)
+ }
+ defer containerStartR.Close()
+ defer containerStartW.Close()
+ cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file}
+ }
+
+ cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
+ defer preader.Close()
+ defer pwriter.Close()
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("while starting runtime: %w", err)
+ }
+
+ interrupted := make(chan os.Signal, 100)
+ go func() {
+ for receivedSignal := range interrupted {
+ if err := cmd.Process.Signal(receivedSignal); err != nil {
+ logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal)
+ }
+ }
+ }()
+ signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+
+ if configureNetwork {
+ // we already passed the fd to the child, now close the writer so we do not hang if the child closes it
+ containerCreateW.Close()
+ if err := waitForSync(containerCreateR.file); err != nil {
+ // we do not want to return here since we want to capture the exit code from the child via cmd.Wait()
+ // close the pipes here so that the child will not hang forever
+ containerCreateR.Close()
+ containerStartW.Close()
+ logrus.Errorf("did not get container create message from subprocess: %v", err)
+ } else {
+ pidFile := filepath.Join(bundlePath, "pid")
+ pidValue, err := os.ReadFile(pidFile)
+ if err != nil {
+ return err
+ }
+ pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
+ if err != nil {
+ return fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err)
+ }
+
+ teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, networkString, containerName)
+ if teardown != nil {
+ defer teardown()
+ }
+ if err != nil {
+ return fmt.Errorf("setup network: %w", err)
+ }
+
+ // only add hosts if we manage the hosts file
+ if hostsFile != "" {
+ entries := etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName)
+ // make sure to sync this with (b *Builder) generateHosts()
+ err = etchosts.Add(hostsFile, entries)
+ if err != nil {
+ return err
+ }
+ }
+
+ logrus.Debug("network namespace successfully setup, send start message to child")
+ _, err = containerStartW.file.Write([]byte{1})
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := cmd.Wait(); err != nil {
+ return fmt.Errorf("while running runtime: %w", err)
+ }
+ confwg.Wait()
+ signal.Stop(interrupted)
+ close(interrupted)
+ if err == nil {
+ return conferr
+ }
+ if conferr != nil {
+ logrus.Debugf("%v", conferr)
+ }
+ return err
+}
+
+type runUsingRuntimeSubprocOptions struct {
+ Options RunOptions
+ Spec *specs.Spec
+ RootPath string
+ BundlePath string
+ ConfigureNetwork bool
+ MoreCreateArgs []string
+ ContainerName string
+ Isolation define.Isolation
+}
+
+func init() {
+ reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
+}
+
+// If this succeeds, the caller must call cleanupMounts().
+func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) {
+ // Start building a new list of mounts.
+ var mounts []specs.Mount
+ haveMount := func(destination string) bool {
+ for _, mount := range mounts {
+ if mount.Destination == destination {
+ // Already have something to mount there.
+ return true
+ }
+ }
+ return false
+ }
+
+ specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of files we need to bind into the container.
+ bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles)
+
+ // After this point we need to know the per-container persistent storage directory.
+ cdir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("determining work directory for container %q: %w", b.ContainerID, err)
+ }
+
+ // Figure out which UID and GID to tell the subscriptions package to use
+ // for files that it creates.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get host UID and GID of the container process.
+ var uidMap = []specs.LinuxIDMapping{}
+ var gidMap = []specs.LinuxIDMapping{}
+ if spec.Linux != nil {
+ uidMap = spec.Linux.UIDMappings
+ gidMap = spec.Linux.GIDMappings
+ }
+ processUID, processGID, err := util.GetHostIDs(uidMap, gidMap, spec.Process.User.UID, spec.Process.User.GID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of subscriptions mounts.
+ subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false)
+
+ idMaps := IDMaps{
+ uidmap: uidMap,
+ gidmap: gidMap,
+ rootUID: int(rootUID),
+ rootGID: int(rootGID),
+ processUID: int(processUID),
+ processGID: int(processGID),
+ }
+ // Get the list of mounts that are just for this Run() call.
+ runMounts, mountArtifacts, err := b.runSetupRunMounts(mountPoint, runFileMounts, runMountInfo, idMaps)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ volumes.UnlockLockArray(mountArtifacts.TargetLocks)
+ }
+ }()
+ // Add temporary copies of the contents of volume locations at the
+ // volume locations, unless we already have something there.
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the list of explicitly-specified volume mounts.
+ var mountLabel = ""
+ if spec.Linux != nil {
+ mountLabel = spec.Linux.MountLabel
+ }
+ volumes, err := b.runSetupVolumeMounts(mountLabel, volumeMounts, optionMounts, idMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ // prepare list of mount destinations which can be cleaned up safely.
+ // we can clean bindFiles, subscriptionMounts and specMounts
+ // everything other than these might have users content
+ mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...)
+
+ allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...))
+ // Add them all, in the preferred order, except where they conflict with something that was previously added.
+ for _, mount := range allMounts {
+ if haveMount(mount.Destination) {
+ // Already mounting something there, no need to bother with this one.
+ continue
+ }
+ // Add the mount.
+ mounts = append(mounts, mount)
+ }
+
+ // Set the list in the spec.
+ spec.Mounts = mounts
+ succeeded = true
+ return mountArtifacts, nil
+}
+
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
+ var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
+ // Add temporary copies of the contents of volume locations at the
+ // volume locations, unless we already have something there.
+ for _, volume := range builtinVolumes {
+ volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex())
+ initializeVolume := false
+ // If we need to, create the directory that we'll use to hold
+ // the volume contents. If we do need to create it, then we'll
+ // need to populate it, too, so make a note of that.
+ if _, err := os.Stat(volumePath); err != nil {
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
+ if err = os.MkdirAll(volumePath, 0755); err != nil {
+ return nil, err
+ }
+ if err = label.Relabel(volumePath, mountLabel, false); err != nil {
+ return nil, err
+ }
+ initializeVolume = true
+ }
+ // Make sure the volume exists in the rootfs and read its attributes.
+ createDirPerms := os.FileMode(0755)
+ err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{
+ ChownNew: &hostOwner,
+ ChmodNew: &createDirPerms,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("ensuring volume path %q: %w", filepath.Join(mountPoint, volume), err)
+ }
+ srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("evaluating path %q: %w", srcPath, err)
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ // If we need to populate the mounted volume's contents with
+ // content from the rootfs, set it up now.
+ if initializeVolume {
+ if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
+ return nil, err
+ }
+ if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
+ return nil, err
+ }
+ logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
+ if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("populating directory %q for volume %q using contents of %q: %w", volumePath, volume, srcPath, err)
+ }
+ }
+ // Add the bind mount.
+ mounts = append(mounts, specs.Mount{
+ Source: volumePath,
+ Destination: volume,
+ Type: define.TypeBind,
+ Options: define.BindOptions,
+ })
+ }
+ return mounts, nil
+}
+
+// Destinations which can be cleaned up after every RUN
+func cleanableDestinationListFromMounts(mounts []specs.Mount) []string {
+ mountDest := []string{}
+ for _, mount := range mounts {
+ // Add all destination to mountArtifacts so that they can be cleaned up later
+ if mount.Destination != "" {
+ cleanPath := true
+ for _, prefix := range nonCleanablePrefixes {
+ if strings.HasPrefix(mount.Destination, prefix) {
+ cleanPath = false
+ break
+ }
+ }
+ if cleanPath {
+ mountDest = append(mountDest, mount.Destination)
+ }
+ }
+ }
+ return mountDest
+}
+
+func checkIfMountDestinationPreExists(root string, dest string) (bool, error) {
+ statResults, err := copier.Stat(root, "", copier.StatOptions{}, []string{dest})
+ if err != nil {
+ return false, err
+ }
+ if len(statResults) > 0 {
+ // We created exact path for globbing so it will
+ // return only one result.
+ if statResults[0].Error != "" && len(statResults[0].Globbed) == 0 {
+ // Path do not exist.
+ return false, nil
+ }
+ // Path exists.
+ return true, nil
+ }
+ return false, nil
+}
+
+// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
+//
+// If this function succeeds, the caller must unlock runMountArtifacts.TargetLocks (when??)
+func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources runMountInfo, idMaps IDMaps) ([]specs.Mount, *runMountArtifacts, error) {
+ mountTargets := make([]string, 0, 10)
+ tmpFiles := make([]string, 0, len(mounts))
+ mountImages := make([]string, 0, 10)
+ finalMounts := make([]specs.Mount, 0, len(mounts))
+ agents := make([]*sshagent.AgentServer, 0, len(mounts))
+ sshCount := 0
+ defaultSSHSock := ""
+ targetLocks := []*lockfile.LockFile{}
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ volumes.UnlockLockArray(targetLocks)
+ }
+ }()
+ for _, mount := range mounts {
+ var mountSpec *specs.Mount
+ var err error
+ var envFile, image string
+ var agent *sshagent.AgentServer
+ var tl *lockfile.LockFile
+ tokens := strings.Split(mount, ",")
+
+ // If `type` is not set default to TypeBind
+ mountType := define.TypeBind
+
+ for _, field := range tokens {
+ if strings.HasPrefix(field, "type=") {
+ kv := strings.Split(field, "=")
+ if len(kv) != 2 {
+ return nil, nil, errors.New("invalid mount type")
+ }
+ mountType = kv[1]
+ }
+ }
+ switch mountType {
+ case "secret":
+ mountSpec, envFile, err = b.getSecretMount(tokens, sources.Secrets, idMaps, sources.WorkDir)
+ if err != nil {
+ return nil, nil, err
+ }
+ if mountSpec != nil {
+ finalMounts = append(finalMounts, *mountSpec)
+ if envFile != "" {
+ tmpFiles = append(tmpFiles, envFile)
+ }
+ }
+ case "ssh":
+ mountSpec, agent, err = b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ if mountSpec != nil {
+ finalMounts = append(finalMounts, *mountSpec)
+ agents = append(agents, agent)
+ if sshCount == 0 {
+ defaultSSHSock = mountSpec.Destination
+ }
+ // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i}
+ sshCount++
+ }
+ case define.TypeBind:
+ mountSpec, image, err = b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps, sources.WorkDir)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mountSpec)
+ // only perform cleanup if image was mounted ignore everything else
+ if image != "" {
+ mountImages = append(mountImages, image)
+ }
+ case "tmpfs":
+ mountSpec, err = b.getTmpfsMount(tokens, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mountSpec)
+ case "cache":
+ mountSpec, tl, err = b.getCacheMount(tokens, sources.StageMountPoints, idMaps, sources.WorkDir)
+ if err != nil {
+ return nil, nil, err
+ }
+ finalMounts = append(finalMounts, *mountSpec)
+ if tl != nil {
+ targetLocks = append(targetLocks, tl)
+ }
+ default:
+ return nil, nil, fmt.Errorf("invalid mount type %q", mountType)
+ }
+
+ if mountSpec != nil {
+ pathPreExists, err := checkIfMountDestinationPreExists(mountPoint, mountSpec.Destination)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !pathPreExists {
+ // In such case it means that the path did not exists before
+ // creating any new mounts therefore we must clean the newly
+ // created directory after this step.
+ mountTargets = append(mountTargets, mountSpec.Destination)
+ }
+ }
+ }
+ succeeded = true
+ artifacts := &runMountArtifacts{
+ RunMountTargets: mountTargets,
+ TmpFiles: tmpFiles,
+ Agents: agents,
+ MountedImages: mountImages,
+ SSHAuthSock: defaultSSHSock,
+ TargetLocks: targetLocks,
+ }
+ return finalMounts, artifacts, nil
+}
+
+func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, string, error) {
+ if contextDir == "" {
+ return nil, "", errors.New("Context Directory for current run invocation is not configured")
+ }
+ var optionMounts []specs.Mount
+ mount, image, err := volumes.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir)
+ if err != nil {
+ return nil, image, err
+ }
+ optionMounts = append(optionMounts, mount)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
+ if err != nil {
+ return nil, image, err
+ }
+ return &volumes[0], image, nil
+}
+
+func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) {
+ var optionMounts []specs.Mount
+ mount, err := volumes.GetTmpfsMount(tokens)
+ if err != nil {
+ return nil, err
+ }
+ optionMounts = append(optionMounts, mount)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
+ if err != nil {
+ return nil, err
+ }
+ return &volumes[0], nil
+}
+
+func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps, workdir string) (*specs.Mount, string, error) {
+ errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
+ if len(tokens) == 0 {
+ return nil, "", errInvalidSyntax
+ }
+ var err error
+ var id, target string
+ var required bool
+ var uid, gid uint32
+ var mode uint32 = 0400
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "id":
+ id = kv[1]
+ case "target", "dst", "destination":
+ target = kv[1]
+ if !filepath.IsAbs(target) {
+ target = filepath.Join(workdir, target)
+ }
+ case "required":
+ required = true
+ if len(kv) > 1 {
+ required, err = strconv.ParseBool(kv[1])
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ }
+ case "mode":
+ mode64, err := strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ mode = uint32(mode64)
+ case "uid":
+ uid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ uid = uint32(uid64)
+ case "gid":
+ gid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, "", errInvalidSyntax
+ }
+ gid = uint32(gid64)
+ default:
+ return nil, "", errInvalidSyntax
+ }
+ }
+
+ if id == "" {
+ return nil, "", errInvalidSyntax
+ }
+ // Default location for secretis is /run/secrets/id
+ if target == "" {
+ target = "/run/secrets/" + id
+ }
+
+ secr, ok := secrets[id]
+ if !ok {
+ if required {
+ return nil, "", fmt.Errorf("secret required but no secret with id %s found", id)
+ }
+ return nil, "", nil
+ }
+ var data []byte
+ var envFile string
+ var ctrFileOnHost string
+
+ switch secr.SourceType {
+ case "env":
+ data = []byte(os.Getenv(secr.Source))
+ tmpFile, err := os.CreateTemp(define.TempDir, "buildah*")
+ if err != nil {
+ return nil, "", err
+ }
+ envFile = tmpFile.Name()
+ ctrFileOnHost = tmpFile.Name()
+ case "file":
+ containerWorkingDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, "", err
+ }
+ data, err = os.ReadFile(secr.Source)
+ if err != nil {
+ return nil, "", err
+ }
+ ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
+ default:
+ return nil, "", errors.New("invalid source secret type")
+ }
+
+ // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod,
+ // chown and relabel it for the container user and we don't want to mess with the original file
+ if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
+ return nil, "", err
+ }
+ if err := os.WriteFile(ctrFileOnHost, data, 0644); err != nil {
+ return nil, "", err
+ }
+
+ if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
+ return nil, "", err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
+ if err != nil {
+ return nil, "", err
+ }
+ if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil {
+ return nil, "", err
+ }
+ if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil {
+ return nil, "", err
+ }
+ newMount := specs.Mount{
+ Destination: target,
+ Type: define.TypeBind,
+ Source: ctrFileOnHost,
+ Options: append(define.BindOptions, "rprivate", "ro"),
+ }
+ return &newMount, envFile, nil
+}
+
+// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container
+func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*specs.Mount, *sshagent.AgentServer, error) {
+ errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint")
+
+ var err error
+ var id, target string
+ var required bool
+ var uid, gid uint32
+ var mode uint32 = 400
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ if len(kv) < 2 {
+ return nil, nil, errInvalidSyntax
+ }
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "id":
+ id = kv[1]
+ case "target", "dst", "destination":
+ target = kv[1]
+ case "required":
+ required, err = strconv.ParseBool(kv[1])
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ case "mode":
+ mode64, err := strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ mode = uint32(mode64)
+ case "uid":
+ uid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ uid = uint32(uid64)
+ case "gid":
+ gid64, err := strconv.ParseUint(kv[1], 10, 32)
+ if err != nil {
+ return nil, nil, errInvalidSyntax
+ }
+ gid = uint32(gid64)
+ default:
+ return nil, nil, errInvalidSyntax
+ }
+ }
+
+ if id == "" {
+ id = "default"
+ }
+ // Default location for secretis is /run/buildkit/ssh_agent.{i}
+ if target == "" {
+ target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count)
+ }
+
+ sshsource, ok := sshsources[id]
+ if !ok {
+ if required {
+ return nil, nil, fmt.Errorf("ssh required but no ssh with id %s found", id)
+ }
+ return nil, nil, nil
+ }
+ // Create new agent from keys or socket
+ fwdAgent, err := sshagent.NewAgentServer(sshsource)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Start ssh server, and get the host sock we're mounting in the container
+ hostSock, err := fwdAgent.Serve(b.ProcessLabel)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := label.Relabel(hostSock, b.MountLabel, false); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
+ if err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil {
+ if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
+ b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
+ }
+ return nil, nil, err
+ }
+ newMount := specs.Mount{
+ Destination: target,
+ Type: define.TypeBind,
+ Source: hostSock,
+ Options: append(define.BindOptions, "rprivate", "ro"),
+ }
+ return &newMount, fwdAgent, nil
+}
+
+func (b *Builder) cleanupTempVolumes() {
+ for tempVolume, val := range b.TempVolumes {
+ if val {
+ if err := overlay.RemoveTemp(tempVolume); err != nil {
+ b.Logger.Errorf(err.Error())
+ }
+ b.TempVolumes[tempVolume] = false
+ }
+ }
+}
+
+// cleanupRunMounts cleans up run mounts so they only appear in this run.
+func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error {
+ for _, agent := range artifacts.Agents {
+ err := agent.Shutdown()
+ if err != nil {
+ return err
+ }
+ }
+
+ //cleanup any mounted images for this run
+ for _, image := range artifacts.MountedImages {
+ if image != "" {
+ // if flow hits here some image was mounted for this run
+ i, err := internalUtil.LookupImage(context, b.store, image)
+ if err == nil {
+ // silently try to unmount and do nothing
+ // if image is being used by something else
+ _ = i.Unmount(false)
+ }
+ if errors.Is(err, storageTypes.ErrImageUnknown) {
+ // Ignore only if ErrImageUnknown
+ // Reason: Image is already unmounted do nothing
+ continue
+ }
+ return err
+ }
+ }
+ opts := copier.RemoveOptions{
+ All: true,
+ }
+ for _, path := range artifacts.RunMountTargets {
+ err := copier.Remove(mountpoint, path, opts)
+ if err != nil {
+ return err
+ }
+ }
+ var prevErr error
+ for _, path := range artifacts.TmpFiles {
+ err := os.Remove(path)
+ if !errors.Is(err, os.ErrNotExist) {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ // unlock if any locked files from this RUN statement
+ volumes.UnlockLockArray(artifacts.TargetLocks)
+ return prevErr
+}
+
+// setPdeathsig sets a parent-death signal for the process
+// the goroutine that starts the child process should lock itself to
+// a native thread using runtime.LockOSThread() until the child exits
+func setPdeathsig(cmd *exec.Cmd) {
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+}
diff --git a/run_freebsd.go b/run_freebsd.go
new file mode 100644
index 0000000..9344876
--- /dev/null
+++ b/run_freebsd.go
@@ -0,0 +1,584 @@
+//go:build freebsd
+// +build freebsd
+
+package buildah
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/chroot"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/containers/buildah/pkg/jail"
+ "github.com/containers/buildah/pkg/overlay"
+ "github.com/containers/buildah/pkg/parse"
+ butil "github.com/containers/buildah/pkg/util"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libnetwork/resolvconf"
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/common/pkg/config"
+ cutil "github.com/containers/common/pkg/util"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/docker/go-units"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ P_PID = 0
+ P_PGID = 2
+ PROC_REAP_ACQUIRE = 2
+ PROC_REAP_RELEASE = 3
+)
+
+var (
+ // We dont want to remove destinations with /etc, /dev as
+ // rootfs already contains these files and unionfs will create
+ // a `whiteout` i.e `.wh` files on removal of overlapping
+ // files from these directories. everything other than these
+ // will be cleaned up
+ nonCleanablePrefixes = []string{
+ "/etc", "/dev",
+ }
+)
+
+func procctl(idtype int, id int, cmd int, arg *byte) error {
+ _, _, e1 := unix.Syscall6(
+ unix.SYS_PROCCTL, uintptr(idtype), uintptr(id),
+ uintptr(cmd), uintptr(unsafe.Pointer(arg)), 0, 0)
+ if e1 != 0 {
+ return unix.Errno(e1)
+ }
+ return nil
+}
+
+func setChildProcess() error {
+ if err := procctl(P_PID, unix.Getpid(), PROC_REAP_ACQUIRE, nil); err != nil {
+ fmt.Fprintf(os.Stderr, "procctl(PROC_REAP_ACQUIRE): %v\n", err)
+ return err
+ }
+ return nil
+}
+
+func (b *Builder) Run(command []string, options RunOptions) error {
+ p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
+ if err != nil {
+ return err
+ }
+ // On some hosts like AH, /tmp is a symlink and we need an
+ // absolute path.
+ path, err := filepath.EvalSymlinks(p)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("using %q to hold bundle data", path)
+ defer func() {
+ if err2 := os.RemoveAll(path); err2 != nil {
+ logrus.Errorf("error removing %q: %v", path, err2)
+ }
+ }()
+
+ gp, err := generate.New("freebsd")
+ if err != nil {
+ return fmt.Errorf("generating new 'freebsd' runtime spec: %w", err)
+ }
+ g := &gp
+
+ isolation := options.Isolation
+ if isolation == IsolationDefault {
+ isolation = b.Isolation
+ if isolation == IsolationDefault {
+ isolation, err = parse.IsolationOption("")
+ if err != nil {
+ logrus.Debugf("got %v while trying to determine default isolation, guessing OCI", err)
+ isolation = IsolationOCI
+ } else if isolation == IsolationDefault {
+ isolation = IsolationOCI
+ }
+ }
+ }
+ if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil {
+ return err
+ }
+
+ // hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf
+ b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
+
+ if b.CommonBuildOpts == nil {
+ return fmt.Errorf("invalid format on container you must recreate the container")
+ }
+
+ if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
+ return err
+ }
+
+ if options.WorkingDir != "" {
+ g.SetProcessCwd(options.WorkingDir)
+ } else if b.WorkDir() != "" {
+ g.SetProcessCwd(b.WorkDir())
+ }
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return fmt.Errorf("mounting container %q: %w", b.ContainerID, err)
+ }
+ defer func() {
+ if err := b.Unmount(); err != nil {
+ logrus.Errorf("error unmounting container: %v", err)
+ }
+ }()
+ g.SetRootPath(mountPoint)
+ if len(command) > 0 {
+ command = runLookupPath(g, command)
+ g.SetProcessArgs(command)
+ } else {
+ g.SetProcessArgs(nil)
+ }
+
+ setupTerminal(g, options.Terminal, options.TerminalSize)
+
+ configureNetwork, networkString, err := b.configureNamespaces(g, &options)
+ if err != nil {
+ return err
+ }
+
+ containerName := Package + "-" + filepath.Base(path)
+ if configureNetwork {
+ g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet")
+ }
+
+ homeDir, err := b.configureUIDGID(g, mountPoint, options)
+ if err != nil {
+ return err
+ }
+
+ // Now grab the spec from the generator. Set the generator to nil so that future contributors
+ // will quickly be able to tell that they're supposed to be modifying the spec directly from here.
+ spec := g.Config
+ g = nil
+
+ // Set the seccomp configuration using the specified profile name. Some syscalls are
+ // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
+ // so we sorted out the capabilities lists first.
+ if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil {
+ return err
+ }
+
+ uid, gid := spec.Process.User.UID, spec.Process.User.GID
+ idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
+
+ mode := os.FileMode(0755)
+ coptions := copier.MkdirOptions{
+ ChownNew: idPair,
+ ChmodNew: &mode,
+ }
+ if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
+ return err
+ }
+
+ bindFiles := make(map[string]string)
+ volumes := b.Volumes()
+
+ // Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return err
+ }
+ rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
+
+ hostFile := ""
+ if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled {
+ hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec)
+ if err != nil {
+ return err
+ }
+ bindFiles[config.DefaultHostsFile] = hostFile
+ }
+
+ if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
+ resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil)
+ if err != nil {
+ return err
+ }
+ bindFiles[resolvconf.DefaultResolvConf] = resolvFile
+ }
+
+ runMountInfo := runMountInfo{
+ ContextDir: options.ContextDir,
+ Secrets: options.Secrets,
+ SSHSources: options.SSHSources,
+ StageMountPoints: options.StageMountPoints,
+ SystemContext: options.SystemContext,
+ }
+
+ runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
+ if err != nil {
+ return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
+ }
+ if runArtifacts.SSHAuthSock != "" {
+ sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
+ spec.Process.Env = append(spec.Process.Env, sshenv)
+ }
+
+ // following run was called from `buildah run`
+ // and some images were mounted for this run
+ // add them to cleanup artifacts
+ if len(options.ExternalImageMounts) > 0 {
+ runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...)
+ }
+
+ defer func() {
+ if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
+ options.Logger.Errorf("unable to cleanup run mounts %v", err)
+ }
+ }()
+
+ defer b.cleanupTempVolumes()
+
+ // If we are creating a network, make the vnet here so that we
+ // can execute the OCI runtime inside it.
+ if configureNetwork {
+ mynetns := containerName + "-vnet"
+
+ jconf := jail.NewConfig()
+ jconf.Set("name", mynetns)
+ jconf.Set("vnet", jail.NEW)
+ jconf.Set("children.max", 1)
+ jconf.Set("persist", true)
+ jconf.Set("enforce_statfs", 0)
+ jconf.Set("devfs_ruleset", 4)
+ jconf.Set("allow.raw_sockets", true)
+ jconf.Set("allow.chflags", true)
+ jconf.Set("securelevel", -1)
+ netjail, err := jail.Create(jconf)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ jconf := jail.NewConfig()
+ jconf.Set("persist", false)
+ err2 := netjail.Set(jconf)
+ if err2 != nil {
+ logrus.Errorf("error releasing vnet jail %q: %v", mynetns, err2)
+ }
+ }()
+ }
+
+ switch isolation {
+ case IsolationOCI:
+ var moreCreateArgs []string
+ if options.NoPivot {
+ moreCreateArgs = []string{"--no-pivot"}
+ } else {
+ moreCreateArgs = nil
+ }
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile)
+ case IsolationChroot:
+ err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
+ default:
+ err = errors.New("don't know how to run this command")
+ }
+ return err
+}
+
+func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fmt.Errorf("failed to get container config: %w", err)
+ }
+ // Other process resource limits
+ if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits.Get()); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Resources: %#v", commonOpts)
+ return nil
+}
+
+// setupSpecialMountSpecChanges creates special mounts for depending
+// on the namespaces - nothing yet for freebsd
+func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) {
+ return spec.Mounts, nil
+}
+
+// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
+func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*spec.Mount, *lockfile.LockFile, error) {
+ return nil, nil, errors.New("cache mounts not supported on freebsd")
+}
+
+func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
+ // Make sure the overlay directory is clean before running
+ _, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err)
+ }
+
+ parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
+ var foundrw, foundro, foundO bool
+ var upperDir string
+ for _, opt := range options {
+ switch opt {
+ case "rw":
+ foundrw = true
+ case "ro":
+ foundro = true
+ case "O":
+ foundO = true
+ }
+ if strings.HasPrefix(opt, "upperdir") {
+ splitOpt := strings.SplitN(opt, "=", 2)
+ if len(splitOpt) > 1 {
+ upperDir = splitOpt[1]
+ }
+ }
+ }
+ if !foundrw && !foundro {
+ options = append(options, "rw")
+ }
+ if mountType == "bind" || mountType == "rbind" {
+ mountType = "nullfs"
+ }
+ if foundO {
+ containerDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return specs.Mount{}, err
+ }
+
+ contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
+ if err != nil {
+ return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
+ }
+
+ overlayOpts := overlay.Options{
+ RootUID: idMaps.rootUID,
+ RootGID: idMaps.rootGID,
+ UpperDirOptionFragment: upperDir,
+ GraphOpts: b.store.GraphOptions(),
+ }
+
+ overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
+ if err == nil {
+ b.TempVolumes[contentDir] = true
+ }
+ return overlayMount, err
+ }
+ return specs.Mount{
+ Destination: container,
+ Type: mountType,
+ Source: host,
+ Options: options,
+ }, nil
+ }
+
+ // Bind mount volumes specified for this particular Run() invocation
+ for _, i := range optionMounts {
+ logrus.Debugf("setting up mounted volume at %q", i.Destination)
+ mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ // Bind mount volumes given by the user when the container was created
+ for _, i := range volumeMounts {
+ var options []string
+ spliti := strings.Split(i, ":")
+ if len(spliti) > 2 {
+ options = strings.Split(spliti[2], ",")
+ }
+ mount, err := parseMount("nullfs", spliti[0], spliti[1], options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ return mounts, nil
+}
+
+func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error {
+ return nil
+}
+
+func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, networkString string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
+ //if isolation == IsolationOCIRootless {
+ //return setupRootlessNetwork(pid)
+ //}
+
+ var configureNetworks []string
+ if len(networkString) > 0 {
+ configureNetworks = strings.Split(networkString, ",")
+ }
+
+ if len(configureNetworks) == 0 {
+ configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
+ }
+ logrus.Debugf("configureNetworks: %v", configureNetworks)
+
+ mynetns := containerName + "-vnet"
+
+ networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks))
+ for i, network := range configureNetworks {
+ networks[network] = nettypes.PerNetworkOptions{
+ InterfaceName: fmt.Sprintf("eth%d", i),
+ }
+ }
+
+ opts := nettypes.NetworkOptions{
+ ContainerID: containerName,
+ ContainerName: containerName,
+ Networks: networks,
+ }
+ _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teardown = func() {
+ err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts})
+ if err != nil {
+ logrus.Errorf("failed to cleanup network: %v", err)
+ }
+ }
+
+ return teardown, nil, nil
+}
+
+func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
+ // Set namespace options in the container configuration.
+ for _, namespaceOption := range namespaceOptions {
+ switch namespaceOption.Name {
+ case string(specs.NetworkNamespace):
+ configureNetwork = false
+ if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
+ if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
+ networkString = namespaceOption.Path
+ namespaceOption.Path = ""
+ }
+ configureNetwork = (policy != define.NetworkDisabled)
+ }
+ case string(specs.UTSNamespace):
+ configureUTS = false
+ if !namespaceOption.Host && namespaceOption.Path == "" {
+ configureUTS = true
+ }
+ }
+ // TODO: re-visit this when there is consensus on a
+ // FreeBSD runtime-spec. FreeBSD jails have rough
+ // equivalents for UTS and and network namespaces.
+ }
+
+ return configureNetwork, networkString, configureUTS, nil
+}
+
+func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) {
+ defaultNamespaceOptions, err := DefaultNamespaceOptions()
+ if err != nil {
+ return false, "", err
+ }
+
+ namespaceOptions := defaultNamespaceOptions
+ namespaceOptions.AddOrReplace(b.NamespaceOptions...)
+ namespaceOptions.AddOrReplace(options.NamespaceOptions...)
+
+ networkPolicy := options.ConfigureNetwork
+ //Nothing was specified explicitly so network policy should be inherited from builder
+ if networkPolicy == NetworkDefault {
+ networkPolicy = b.ConfigureNetwork
+
+ // If builder policy was NetworkDisabled and
+ // we want to disable network for this run.
+ // reset options.ConfigureNetwork to NetworkDisabled
+ // since it will be treated as source of truth later.
+ if networkPolicy == NetworkDisabled {
+ options.ConfigureNetwork = networkPolicy
+ }
+ }
+
+ configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
+ if err != nil {
+ return false, "", err
+ }
+
+ if configureUTS {
+ if options.Hostname != "" {
+ g.SetHostname(options.Hostname)
+ } else if b.Hostname() != "" {
+ g.SetHostname(b.Hostname())
+ } else {
+ g.SetHostname(stringid.TruncateID(b.ContainerID))
+ }
+ } else {
+ g.SetHostname("")
+ }
+
+ found := false
+ spec := g.Config
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
+ }
+
+ return configureNetwork, networkString, nil
+}
+
+func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
+ for dest, src := range bindFiles {
+ options := []string{}
+ if strings.HasPrefix(src, bundlePath) {
+ options = append(options, bind.NoBindOption)
+ }
+ mounts = append(mounts, specs.Mount{
+ Source: src,
+ Destination: dest,
+ Type: "nullfs",
+ Options: options,
+ })
+ }
+ return mounts
+}
+
+func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error {
+ var (
+ ul *units.Ulimit
+ err error
+ )
+
+ ulimit = append(defaultUlimits, ulimit...)
+ for _, u := range ulimit {
+ if ul, err = butil.ParseUlimit(u); err != nil {
+ return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
+ }
+
+ g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
+ }
+ return nil
+}
+
+// Create pipes to use for relaying stdio.
+func runMakeStdioPipe(uid, gid int) ([][]int, error) {
+ stdioPipe := make([][]int, 3)
+ for i := range stdioPipe {
+ stdioPipe[i] = make([]int, 2)
+ if err := unix.Pipe(stdioPipe[i]); err != nil {
+ return nil, fmt.Errorf("creating pipe for container FD %d: %w", i, err)
+ }
+ }
+ return stdioPipe, nil
+}
diff --git a/run_linux.go b/run_linux.go
new file mode 100644
index 0000000..5263abe
--- /dev/null
+++ b/run_linux.go
@@ -0,0 +1,1240 @@
+//go:build linux
+// +build linux
+
+package buildah
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/containers/buildah/bind"
+ "github.com/containers/buildah/chroot"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/containers/buildah/internal/volumes"
+ "github.com/containers/buildah/pkg/overlay"
+ "github.com/containers/buildah/pkg/parse"
+ butil "github.com/containers/buildah/pkg/util"
+ "github.com/containers/buildah/util"
+ "github.com/containers/common/libnetwork/pasta"
+ "github.com/containers/common/libnetwork/resolvconf"
+ "github.com/containers/common/libnetwork/slirp4netns"
+ nettypes "github.com/containers/common/libnetwork/types"
+ netUtil "github.com/containers/common/libnetwork/util"
+ "github.com/containers/common/pkg/capabilities"
+ "github.com/containers/common/pkg/chown"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/hooks"
+ hooksExec "github.com/containers/common/pkg/hooks/exec"
+ cutil "github.com/containers/common/pkg/util"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/docker/go-units"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
+type ContainerDevices define.ContainerDevices
+
+var (
+ // We dont want to remove destinations with /etc, /dev, /sys,
+ // /proc as rootfs already contains these files and unionfs
+ // will create a `whiteout` i.e `.wh` files on removal of
+ // overlapping files from these directories. everything other
+ // than these will be cleaned up
+ nonCleanablePrefixes = []string{
+ "/etc", "/dev", "/sys", "/proc",
+ }
+)
+
+func setChildProcess() error {
+ if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil {
+ fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err)
+ return err
+ }
+ return nil
+}
+
+// Run runs the specified command in the container's root filesystem.
+func (b *Builder) Run(command []string, options RunOptions) error {
+ p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
+ if err != nil {
+ return err
+ }
+ // On some hosts like AH, /tmp is a symlink and we need an
+ // absolute path.
+ path, err := filepath.EvalSymlinks(p)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("using %q to hold bundle data", path)
+ defer func() {
+ if err2 := os.RemoveAll(path); err2 != nil {
+ options.Logger.Error(err2)
+ }
+ }()
+
+ gp, err := generate.New("linux")
+ if err != nil {
+ return fmt.Errorf("generating new 'linux' runtime spec: %w", err)
+ }
+ g := &gp
+
+ isolation := options.Isolation
+ if isolation == define.IsolationDefault {
+ isolation = b.Isolation
+ if isolation == define.IsolationDefault {
+ isolation, err = parse.IsolationOption("")
+ if err != nil {
+ logrus.Debugf("got %v while trying to determine default isolation, guessing OCI", err)
+ isolation = IsolationOCI
+ } else if isolation == IsolationDefault {
+ isolation = IsolationOCI
+ }
+ }
+ }
+ if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil {
+ return err
+ }
+
+ // hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf
+ b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
+
+ if b.CommonBuildOpts == nil {
+ return fmt.Errorf("invalid format on container you must recreate the container")
+ }
+
+ if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
+ return err
+ }
+
+ workDir := b.WorkDir()
+ if options.WorkingDir != "" {
+ g.SetProcessCwd(options.WorkingDir)
+ workDir = options.WorkingDir
+ } else if b.WorkDir() != "" {
+ g.SetProcessCwd(b.WorkDir())
+ }
+ setupSelinux(g, b.ProcessLabel, b.MountLabel)
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return fmt.Errorf("mounting container %q: %w", b.ContainerID, err)
+ }
+ defer func() {
+ if err := b.Unmount(); err != nil {
+ options.Logger.Errorf("error unmounting container: %v", err)
+ }
+ }()
+ g.SetRootPath(mountPoint)
+ if len(command) > 0 {
+ command = runLookupPath(g, command)
+ g.SetProcessArgs(command)
+ } else {
+ g.SetProcessArgs(nil)
+ }
+
+ // Mount devices if any and if session is rootless attempt a bind-mount
+ // just like podman.
+ if unshare.IsRootless() {
+ // We are going to create bind mounts for devices
+ // but we need to make sure that we don't override
+ // anything which is already in OCI spec.
+ mounts := make(map[string]interface{})
+ for _, m := range g.Mounts() {
+ mounts[m.Destination] = true
+ }
+ newMounts := []specs.Mount{}
+ for _, d := range b.Devices {
+ // Default permission is read-only.
+ perm := "ro"
+ // Get permission configured for this device but only process `write`
+ // permission in rootless since `mknod` is not supported anyways.
+ if strings.Contains(string(d.Rule.Permissions), "w") {
+ perm = "rw"
+ }
+ devMnt := specs.Mount{
+ Destination: d.Destination,
+ Type: parse.TypeBind,
+ Source: d.Source,
+ Options: []string{"slave", "nosuid", "noexec", perm, "rbind"},
+ }
+ // Podman parity: podman skips these two devices hence we do the same.
+ if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") {
+ continue
+ }
+ // Device is already in OCI spec do not re-mount.
+ if _, found := mounts[d.Path]; found {
+ continue
+ }
+ newMounts = append(newMounts, devMnt)
+ }
+ g.Config.Mounts = append(newMounts, g.Config.Mounts...)
+ } else {
+ for _, d := range b.Devices {
+ sDev := specs.LinuxDevice{
+ Type: string(d.Type),
+ Path: d.Path,
+ Major: d.Major,
+ Minor: d.Minor,
+ FileMode: &d.FileMode,
+ UID: &d.Uid,
+ GID: &d.Gid,
+ }
+ g.AddDevice(sDev)
+ g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions))
+ }
+ }
+
+ setupMaskedPaths(g)
+ setupReadOnlyPaths(g)
+
+ setupTerminal(g, options.Terminal, options.TerminalSize)
+
+ configureNetwork, networkString, err := b.configureNamespaces(g, &options)
+ if err != nil {
+ return err
+ }
+
+ homeDir, err := b.configureUIDGID(g, mountPoint, options)
+ if err != nil {
+ return err
+ }
+
+ g.SetProcessNoNewPrivileges(b.CommonBuildOpts.NoNewPrivileges)
+
+ g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
+
+ // Now grab the spec from the generator. Set the generator to nil so that future contributors
+ // will quickly be able to tell that they're supposed to be modifying the spec directly from here.
+ spec := g.Config
+ g = nil
+
+ // Set the seccomp configuration using the specified profile name. Some syscalls are
+ // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
+ // so we sorted out the capabilities lists first.
+ if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil {
+ return err
+ }
+
+ uid, gid := spec.Process.User.UID, spec.Process.User.GID
+ if spec.Linux != nil {
+ uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid)
+ if err != nil {
+ return err
+ }
+ }
+
+ idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
+
+ mode := os.FileMode(0755)
+ coptions := copier.MkdirOptions{
+ ChownNew: idPair,
+ ChmodNew: &mode,
+ }
+ if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
+ return err
+ }
+
+ bindFiles := make(map[string]string)
+ volumes := b.Volumes()
+
+ // Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
+ rootUID, rootGID, err := util.GetHostRootIDs(spec)
+ if err != nil {
+ return err
+ }
+ rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
+
+ hostFile := ""
+ if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled {
+ hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec)
+ if err != nil {
+ return err
+ }
+ bindFiles[config.DefaultHostsFile] = hostFile
+ }
+
+ if !options.NoHostname && !(cutil.StringInSlice("/etc/hostname", volumes)) {
+ hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair)
+ if err != nil {
+ return err
+ }
+ // Bind /etc/hostname
+ bindFiles["/etc/hostname"] = hostFile
+ }
+
+ if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
+ resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, spec.Linux.Namespaces)
+ if err != nil {
+ return err
+ }
+ bindFiles[resolvconf.DefaultResolvConf] = resolvFile
+ }
+ // Empty file, so no need to recreate if it exists
+ if _, ok := bindFiles["/run/.containerenv"]; !ok {
+ containerenvPath := filepath.Join(path, "/run/.containerenv")
+ if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil {
+ return err
+ }
+
+ rootless := 0
+ if unshare.IsRootless() {
+ rootless = 1
+ }
+ // Populate the .containerenv with container information
+ containerenv := fmt.Sprintf(`
+engine="buildah-%s"
+name=%q
+id=%q
+image=%q
+imageid=%q
+rootless=%d
+`, define.Version, b.Container, b.ContainerID, b.FromImage, b.FromImageID, rootless)
+
+ if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil {
+ return err
+ }
+ if err := label.Relabel(containerenvPath, b.MountLabel, false); err != nil {
+ return err
+ }
+
+ bindFiles["/run/.containerenv"] = containerenvPath
+ }
+
+ // Setup OCI hooks
+ _, err = b.setupOCIHooks(spec, (len(options.Mounts) > 0 || len(volumes) > 0))
+ if err != nil {
+ return fmt.Errorf("unable to setup OCI hooks: %w", err)
+ }
+
+ runMountInfo := runMountInfo{
+ WorkDir: workDir,
+ ContextDir: options.ContextDir,
+ Secrets: options.Secrets,
+ SSHSources: options.SSHSources,
+ StageMountPoints: options.StageMountPoints,
+ SystemContext: options.SystemContext,
+ }
+
+ runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
+ if err != nil {
+ return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
+ }
+ if runArtifacts.SSHAuthSock != "" {
+ sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
+ spec.Process.Env = append(spec.Process.Env, sshenv)
+ }
+
+ // following run was called from `buildah run`
+ // and some images were mounted for this run
+ // add them to cleanup artifacts
+ if len(options.ExternalImageMounts) > 0 {
+ runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...)
+ }
+
+ defer func() {
+ if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
+ options.Logger.Errorf("unable to cleanup run mounts %v", err)
+ }
+ }()
+
+ defer b.cleanupTempVolumes()
+
+ switch isolation {
+ case define.IsolationOCI:
+ var moreCreateArgs []string
+ if options.NoPivot {
+ moreCreateArgs = append(moreCreateArgs, "--no-pivot")
+ }
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
+ mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile)
+ case IsolationChroot:
+ err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
+ case IsolationOCIRootless:
+ moreCreateArgs := []string{"--no-new-keyring"}
+ if options.NoPivot {
+ moreCreateArgs = append(moreCreateArgs, "--no-pivot")
+ }
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
+ mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile)
+ default:
+ err = errors.New("don't know how to run this command")
+ }
+ return err
+}
+
+func (b *Builder) setupOCIHooks(config *specs.Spec, hasVolumes bool) (map[string][]specs.Hook, error) {
+ allHooks := make(map[string][]specs.Hook)
+ if len(b.CommonBuildOpts.OCIHooksDir) == 0 {
+ if unshare.IsRootless() {
+ return nil, nil
+ }
+ for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} {
+ manager, err := hooks.New(context.Background(), []string{hDir}, []string{})
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return nil, err
+ }
+ ociHooks, err := manager.Hooks(config, b.ImageAnnotations, hasVolumes)
+ if err != nil {
+ return nil, err
+ }
+ if len(ociHooks) > 0 || config.Hooks != nil {
+ logrus.Warnf("Implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
+ }
+ for i, hook := range ociHooks {
+ allHooks[i] = hook
+ }
+ }
+ } else {
+ manager, err := hooks.New(context.Background(), b.CommonBuildOpts.OCIHooksDir, []string{})
+ if err != nil {
+ return nil, err
+ }
+
+ allHooks, err = manager.Hooks(config, b.ImageAnnotations, hasVolumes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) //nolint:staticcheck
+ if err != nil {
+ logrus.Warnf("Container: precreate hook: %v", err)
+ if hookErr != nil && hookErr != err {
+ logrus.Debugf("container: precreate hook (hook error): %v", hookErr)
+ }
+ return nil, err
+ }
+ return allHooks, nil
+}
+
+func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
+ // Resources - CPU
+ if commonOpts.CPUPeriod != 0 {
+ g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod)
+ }
+ if commonOpts.CPUQuota != 0 {
+ g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota)
+ }
+ if commonOpts.CPUShares != 0 {
+ g.SetLinuxResourcesCPUShares(commonOpts.CPUShares)
+ }
+ if commonOpts.CPUSetCPUs != "" {
+ g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs)
+ }
+ if commonOpts.CPUSetMems != "" {
+ g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems)
+ }
+
+ // Resources - Memory
+ if commonOpts.Memory != 0 {
+ g.SetLinuxResourcesMemoryLimit(commonOpts.Memory)
+ }
+ if commonOpts.MemorySwap != 0 {
+ g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap)
+ }
+
+ // cgroup membership
+ if commonOpts.CgroupParent != "" {
+ g.SetLinuxCgroupsPath(commonOpts.CgroupParent)
+ }
+
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fmt.Errorf("failed to get container config: %w", err)
+ }
+ // Other process resource limits
+ if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits.Get()); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Resources: %#v", commonOpts)
+ return nil
+}
+
+func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options []string) (func(), map[string]nettypes.StatusBlock, error) {
+ // we need the TmpDir for the slirp4netns code
+ if err := os.MkdirAll(config.Engine.TmpDir, 0o751); err != nil {
+ return nil, nil, fmt.Errorf("failed to create tempdir: %w", err)
+ }
+ res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{
+ Config: config,
+ ContainerID: cid,
+ Netns: netns,
+ ExtraOptions: options,
+ Pdeathsig: syscall.SIGKILL,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ip, err := slirp4netns.GetIP(res.Subnet)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get slirp4netns ip: %w", err)
+ }
+
+ // create fake status to make sure we get the correct ip in hosts
+ subnet := nettypes.IPNet{IPNet: net.IPNet{
+ IP: *ip,
+ Mask: res.Subnet.Mask,
+ }}
+ netStatus := map[string]nettypes.StatusBlock{
+ slirp4netns.BinaryName: {
+ Interfaces: map[string]nettypes.NetInterface{
+ "tap0": {
+ Subnets: []nettypes.NetAddress{{IPNet: subnet}},
+ },
+ },
+ },
+ }
+
+ return func() {
+ syscall.Kill(res.Pid, syscall.SIGKILL) // nolint:errcheck
+ var status syscall.WaitStatus
+ syscall.Wait4(res.Pid, &status, 0, nil) // nolint:errcheck
+ }, netStatus, nil
+}
+
+func setupPasta(config *config.Config, netns string, options []string) (func(), map[string]nettypes.StatusBlock, error) {
+ err := pasta.Setup(&pasta.SetupOptions{
+ Config: config,
+ Netns: netns,
+ ExtraOptions: options,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var ip string
+ err = ns.WithNetNSPath(netns, func(_ ns.NetNS) error {
+ // get the first ip in the netns and use this as our ip for /etc/hosts
+ ip = netUtil.GetLocalIP()
+ return nil
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // create fake status to make sure we get the correct ip in hosts
+ subnet := nettypes.IPNet{IPNet: net.IPNet{
+ IP: net.ParseIP(ip),
+ Mask: net.IPv4Mask(255, 255, 255, 0),
+ }}
+ netStatus := map[string]nettypes.StatusBlock{
+ slirp4netns.BinaryName: {
+ Interfaces: map[string]nettypes.NetInterface{
+ "tap0": {
+ Subnets: []nettypes.NetAddress{{IPNet: subnet}},
+ },
+ },
+ },
+ }
+
+ return nil, netStatus, nil
+}
+
+func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, network, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
+ netns := fmt.Sprintf("/proc/%d/ns/net", pid)
+ var configureNetworks []string
+ defConfig, err := config.Default()
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ name, networkOpts, hasOpts := strings.Cut(network, ":")
+ var netOpts []string
+ if hasOpts {
+ netOpts = strings.Split(networkOpts, ",")
+ }
+ if isolation == IsolationOCIRootless && name == "" {
+ switch defConfig.Network.DefaultRootlessNetworkCmd {
+ case slirp4netns.BinaryName, "":
+ name = slirp4netns.BinaryName
+ case pasta.BinaryName:
+ name = pasta.BinaryName
+ default:
+ return nil, nil, fmt.Errorf("invalid default_rootless_network_cmd option %q",
+ defConfig.Network.DefaultRootlessNetworkCmd)
+ }
+ }
+
+ switch {
+ case name == slirp4netns.BinaryName:
+ return setupSlirp4netnsNetwork(defConfig, netns, containerName, netOpts)
+ case name == pasta.BinaryName:
+ return setupPasta(defConfig, netns, netOpts)
+
+ // Basically default case except we make sure to not split an empty
+ // name as this would return a slice with one empty string which is
+ // not a valid network name.
+ case len(network) > 0:
+ // old syntax allow comma separated network names
+ configureNetworks = strings.Split(network, ",")
+ }
+
+ if isolation == IsolationOCIRootless {
+ return nil, nil, errors.New("cannot use networks as rootless")
+ }
+
+ if len(configureNetworks) == 0 {
+ configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
+ }
+
+ // Make sure we can access the container's network namespace,
+ // even after it exits, to successfully tear down the
+ // interfaces. Ensure this by opening a handle to the network
+ // namespace, and using our copy to both configure and
+ // deconfigure it.
+ netFD, err := unix.Open(netns, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, nil, fmt.Errorf("opening network namespace: %w", err)
+ }
+ mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD)
+
+ networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks))
+ for i, network := range configureNetworks {
+ networks[network] = nettypes.PerNetworkOptions{
+ InterfaceName: fmt.Sprintf("eth%d", i),
+ }
+ }
+
+ opts := nettypes.NetworkOptions{
+ ContainerID: containerName,
+ ContainerName: containerName,
+ Networks: networks,
+ }
+ netStatus, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teardown = func() {
+ err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts})
+ if err != nil {
+ options.Logger.Errorf("failed to cleanup network: %v", err)
+ }
+ }
+
+ return teardown, netStatus, nil
+}
+
+// Create pipes to use for relaying stdio.
+func runMakeStdioPipe(uid, gid int) ([][]int, error) {
+ stdioPipe := make([][]int, 3)
+ for i := range stdioPipe {
+ stdioPipe[i] = make([]int, 2)
+ if err := unix.Pipe(stdioPipe[i]); err != nil {
+ return nil, fmt.Errorf("creating pipe for container FD %d: %w", i, err)
+ }
+ }
+ if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil {
+ return nil, fmt.Errorf("setting owner of stdin pipe descriptor: %w", err)
+ }
+ if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil {
+ return nil, fmt.Errorf("setting owner of stdout pipe descriptor: %w", err)
+ }
+ if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil {
+ return nil, fmt.Errorf("setting owner of stderr pipe descriptor: %w", err)
+ }
+ return stdioPipe, nil
+}
+
+func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return false, "", false, fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ addSysctl := func(prefixes []string) error {
+ for _, sysctl := range defaultContainerConfig.Sysctls() {
+ splitn := strings.SplitN(sysctl, "=", 2)
+ if len(splitn) > 2 {
+ return fmt.Errorf("sysctl %q defined in containers.conf must be formatted name=value", sysctl)
+ }
+ for _, prefix := range prefixes {
+ if strings.HasPrefix(splitn[0], prefix) {
+ g.AddLinuxSysctl(splitn[0], splitn[1])
+ }
+ }
+ }
+ return nil
+ }
+
+ // Set namespace options in the container configuration.
+ configureUserns := false
+ specifiedNetwork := false
+ for _, namespaceOption := range namespaceOptions {
+ switch namespaceOption.Name {
+ case string(specs.IPCNamespace):
+ if !namespaceOption.Host {
+ if err := addSysctl([]string{"fs.mqueue"}); err != nil {
+ return false, "", false, err
+ }
+ }
+ case string(specs.UserNamespace):
+ configureUserns = false
+ if !namespaceOption.Host && namespaceOption.Path == "" {
+ configureUserns = true
+ }
+ case string(specs.NetworkNamespace):
+ specifiedNetwork = true
+ configureNetwork = false
+ if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
+ if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
+ networkString = namespaceOption.Path
+ namespaceOption.Path = ""
+ }
+ configureNetwork = (policy != define.NetworkDisabled)
+ }
+ case string(specs.UTSNamespace):
+ configureUTS = false
+ if !namespaceOption.Host {
+ if namespaceOption.Path == "" {
+ configureUTS = true
+ }
+ if err := addSysctl([]string{"kernel.hostname", "kernel.domainame"}); err != nil {
+ return false, "", false, err
+ }
+ }
+ }
+ if namespaceOption.Host {
+ if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil {
+ return false, "", false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err)
+ }
+ } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil {
+ if namespaceOption.Path == "" {
+ return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err)
+ }
+ return false, "", false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
+ }
+ }
+
+ // If we've got mappings, we're going to have to create a user namespace.
+ if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns {
+ if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil {
+ return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err)
+ }
+ hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
+ if err != nil {
+ return false, "", false, err
+ }
+ for _, m := range idmapOptions.UIDMap {
+ g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size)
+ }
+ if len(idmapOptions.UIDMap) == 0 {
+ for _, m := range hostUidmap {
+ g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size)
+ }
+ }
+ for _, m := range idmapOptions.GIDMap {
+ g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size)
+ }
+ if len(idmapOptions.GIDMap) == 0 {
+ for _, m := range hostGidmap {
+ g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size)
+ }
+ }
+ if !specifiedNetwork {
+ if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil {
+ return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
+ }
+ configureNetwork = (policy != define.NetworkDisabled)
+ }
+ } else {
+ if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil {
+ return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err)
+ }
+ if !specifiedNetwork {
+ if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil {
+ return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
+ }
+ }
+ }
+ if configureNetwork {
+ if err := addSysctl([]string{"net"}); err != nil {
+ return false, "", false, err
+ }
+ }
+ return configureNetwork, networkString, configureUTS, nil
+}
+
+func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) {
+ defaultNamespaceOptions, err := DefaultNamespaceOptions()
+ if err != nil {
+ return false, "", err
+ }
+
+ namespaceOptions := defaultNamespaceOptions
+ namespaceOptions.AddOrReplace(b.NamespaceOptions...)
+ namespaceOptions.AddOrReplace(options.NamespaceOptions...)
+
+ networkPolicy := options.ConfigureNetwork
+ //Nothing was specified explicitly so network policy should be inherited from builder
+ if networkPolicy == NetworkDefault {
+ networkPolicy = b.ConfigureNetwork
+
+ // If builder policy was NetworkDisabled and
+ // we want to disable network for this run.
+ // reset options.ConfigureNetwork to NetworkDisabled
+ // since it will be treated as source of truth later.
+ if networkPolicy == NetworkDisabled {
+ options.ConfigureNetwork = networkPolicy
+ }
+ }
+ if networkPolicy == NetworkDisabled {
+ namespaceOptions.AddOrReplace(define.NamespaceOptions{{Name: string(specs.NetworkNamespace), Host: false}}...)
+ }
+ configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
+ if err != nil {
+ return false, "", err
+ }
+
+ if configureUTS {
+ if options.Hostname != "" {
+ g.SetHostname(options.Hostname)
+ } else if b.Hostname() != "" {
+ g.SetHostname(b.Hostname())
+ } else {
+ g.SetHostname(stringid.TruncateID(b.ContainerID))
+ }
+ } else {
+ g.SetHostname("")
+ }
+
+ found := false
+ spec := g.Config
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
+ }
+
+ return configureNetwork, networkString, nil
+}
+
+func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
+ for dest, src := range bindFiles {
+ options := []string{"rbind"}
+ if strings.HasPrefix(src, bundlePath) {
+ options = append(options, bind.NoBindOption)
+ }
+ mounts = append(mounts, specs.Mount{
+ Source: src,
+ Destination: dest,
+ Type: "bind",
+ Options: options,
+ })
+ }
+ return mounts
+}
+
+func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error {
+ var (
+ ul *units.Ulimit
+ err error
+ )
+
+ ulimit = append(defaultUlimits, ulimit...)
+ for _, u := range ulimit {
+ if ul, err = butil.ParseUlimit(u); err != nil {
+ return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
+ }
+
+ g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
+ }
+ return nil
+}
+
+func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
+ // Make sure the overlay directory is clean before running
+ containerDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err)
+ }
+ if err := overlay.CleanupContent(containerDir); err != nil {
+ return nil, fmt.Errorf("cleaning up overlay content for %s: %w", b.ContainerID, err)
+ }
+
+ parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
+ var foundrw, foundro, foundz, foundZ, foundO, foundU bool
+ var rootProp, upperDir, workDir string
+ for _, opt := range options {
+ switch opt {
+ case "rw":
+ foundrw = true
+ case "ro":
+ foundro = true
+ case "z":
+ foundz = true
+ case "Z":
+ foundZ = true
+ case "O":
+ foundO = true
+ case "U":
+ foundU = true
+ case "private", "rprivate", "slave", "rslave", "shared", "rshared":
+ rootProp = opt
+ }
+
+ if strings.HasPrefix(opt, "upperdir") {
+ splitOpt := strings.SplitN(opt, "=", 2)
+ if len(splitOpt) > 1 {
+ upperDir = splitOpt[1]
+ }
+ }
+ if strings.HasPrefix(opt, "workdir") {
+ splitOpt := strings.SplitN(opt, "=", 2)
+ if len(splitOpt) > 1 {
+ workDir = splitOpt[1]
+ }
+ }
+ }
+ if !foundrw && !foundro {
+ options = append(options, "rw")
+ }
+ if foundz {
+ if err := label.Relabel(host, mountLabel, true); err != nil {
+ return specs.Mount{}, err
+ }
+ }
+ if foundZ {
+ if err := label.Relabel(host, mountLabel, false); err != nil {
+ return specs.Mount{}, err
+ }
+ }
+ if foundU {
+ if err := chown.ChangeHostPathOwnership(host, true, idMaps.processUID, idMaps.processGID); err != nil {
+ return specs.Mount{}, err
+ }
+ }
+ if foundO {
+ if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") {
+ return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa")
+ }
+
+ containerDir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return specs.Mount{}, err
+ }
+
+ contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
+ if err != nil {
+ return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
+ }
+
+ overlayOpts := overlay.Options{
+ RootUID: idMaps.rootUID,
+ RootGID: idMaps.rootGID,
+ UpperDirOptionFragment: upperDir,
+ WorkDirOptionFragment: workDir,
+ GraphOpts: b.store.GraphOptions(),
+ }
+
+ overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
+ if err == nil {
+ b.TempVolumes[contentDir] = true
+ }
+
+ // If chown true, add correct ownership to the overlay temp directories.
+ if foundU {
+ if err := chown.ChangeHostPathOwnership(contentDir, true, idMaps.processUID, idMaps.processGID); err != nil {
+ return specs.Mount{}, err
+ }
+ }
+
+ return overlayMount, err
+ }
+ if rootProp == "" {
+ options = append(options, "private")
+ }
+ if mountType != "tmpfs" {
+ mountType = "bind"
+ options = append(options, "rbind")
+ }
+ return specs.Mount{
+ Destination: container,
+ Type: mountType,
+ Source: host,
+ Options: options,
+ }, nil
+ }
+
+ // Bind mount volumes specified for this particular Run() invocation
+ for _, i := range optionMounts {
+ logrus.Debugf("setting up mounted volume at %q", i.Destination)
+ mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ // Bind mount volumes given by the user when the container was created
+ for _, i := range volumeMounts {
+ var options []string
+ spliti := parse.SplitStringWithColonEscape(i)
+ if len(spliti) > 2 {
+ options = strings.Split(spliti[2], ",")
+ }
+ options = append(options, "rbind")
+ mount, err := parseMount("bind", spliti[0], spliti[1], options)
+ if err != nil {
+ return nil, err
+ }
+ mounts = append(mounts, mount)
+ }
+ return mounts, nil
+}
+
+func setupMaskedPaths(g *generate.Generator) {
+ for _, mp := range config.DefaultMaskedPaths {
+ g.AddLinuxMaskedPaths(mp)
+ }
+}
+
+func setupReadOnlyPaths(g *generate.Generator) {
+ for _, rp := range config.DefaultReadOnlyPaths {
+ g.AddLinuxReadonlyPaths(rp)
+ }
+}
+
+func setupCapAdd(g *generate.Generator, caps ...string) error {
+ for _, cap := range caps {
+ if err := g.AddProcessCapabilityBounding(cap); err != nil {
+ return fmt.Errorf("adding %q to the bounding capability set: %w", cap, err)
+ }
+ if err := g.AddProcessCapabilityEffective(cap); err != nil {
+ return fmt.Errorf("adding %q to the effective capability set: %w", cap, err)
+ }
+ if err := g.AddProcessCapabilityPermitted(cap); err != nil {
+ return fmt.Errorf("adding %q to the permitted capability set: %w", cap, err)
+ }
+ if err := g.AddProcessCapabilityAmbient(cap); err != nil {
+ return fmt.Errorf("adding %q to the ambient capability set: %w", cap, err)
+ }
+ }
+ return nil
+}
+
+func setupCapDrop(g *generate.Generator, caps ...string) error {
+ for _, cap := range caps {
+ if err := g.DropProcessCapabilityBounding(cap); err != nil {
+ return fmt.Errorf("removing %q from the bounding capability set: %w", cap, err)
+ }
+ if err := g.DropProcessCapabilityEffective(cap); err != nil {
+ return fmt.Errorf("removing %q from the effective capability set: %w", cap, err)
+ }
+ if err := g.DropProcessCapabilityPermitted(cap); err != nil {
+ return fmt.Errorf("removing %q from the permitted capability set: %w", cap, err)
+ }
+ if err := g.DropProcessCapabilityAmbient(cap); err != nil {
+ return fmt.Errorf("removing %q from the ambient capability set: %w", cap, err)
+ }
+ }
+ return nil
+}
+
+func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error {
+ g.ClearProcessCapabilities()
+ if err := setupCapAdd(g, defaultCapabilities...); err != nil {
+ return err
+ }
+ for _, c := range adds {
+ if strings.ToLower(c) == "all" {
+ adds = capabilities.AllCapabilities()
+ break
+ }
+ }
+ for _, c := range drops {
+ if strings.ToLower(c) == "all" {
+ g.ClearProcessCapabilities()
+ return nil
+ }
+ }
+ if err := setupCapAdd(g, adds...); err != nil {
+ return err
+ }
+ return setupCapDrop(g, drops...)
+}
+
+func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []specs.Mount {
+ for i := range mounts {
+ if mounts[i].Destination == mount.Destination {
+ mounts[i] = mount
+ return mounts
+ }
+ }
+ return append(mounts, mount)
+}
+
+// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces
+// logic taken from podman and adapted for buildah
+// https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178
+func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mount, error) {
+ mounts := spec.Mounts
+ isRootless := unshare.IsRootless()
+ isNewUserns := false
+ isNetns := false
+ isPidns := false
+ isIpcns := false
+
+ for _, namespace := range spec.Linux.Namespaces {
+ switch namespace.Type {
+ case specs.NetworkNamespace:
+ isNetns = true
+ case specs.UserNamespace:
+ isNewUserns = true
+ case specs.PIDNamespace:
+ isPidns = true
+ case specs.IPCNamespace:
+ isIpcns = true
+ }
+ }
+
+ addCgroup := true
+ // mount sys when root and no userns or when a new netns is created
+ canMountSys := (!isRootless && !isNewUserns) || isNetns
+ if !canMountSys {
+ addCgroup = false
+ sys := "/sys"
+ sysMnt := specs.Mount{
+ Destination: sys,
+ Type: "bind",
+ Source: sys,
+ Options: []string{bind.NoBindOption, "rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"},
+ }
+ mounts = addOrReplaceMount(mounts, sysMnt)
+ }
+
+ gid5Available := true
+ if isRootless {
+ _, gids, err := unshare.GetHostIDMappings("")
+ if err != nil {
+ return nil, err
+ }
+ gid5Available = checkIdsGreaterThan5(gids)
+ }
+ if gid5Available && len(spec.Linux.GIDMappings) > 0 {
+ gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings)
+ }
+ if !gid5Available {
+ // If we have no GID mappings, the gid=5 default option would fail, so drop it.
+ devPts := specs.Mount{
+ Destination: "/dev/pts",
+ Type: "devpts",
+ Source: "devpts",
+ Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
+ }
+ mounts = addOrReplaceMount(mounts, devPts)
+ }
+
+ isUserns := isNewUserns || isRootless
+
+ if isUserns && !isIpcns {
+ devMqueue := "/dev/mqueue"
+ devMqueueMnt := specs.Mount{
+ Destination: devMqueue,
+ Type: "bind",
+ Source: devMqueue,
+ Options: []string{bind.NoBindOption, "bind", "nosuid", "noexec", "nodev"},
+ }
+ mounts = addOrReplaceMount(mounts, devMqueueMnt)
+ }
+ if isUserns && !isPidns {
+ proc := "/proc"
+ procMount := specs.Mount{
+ Destination: proc,
+ Type: "bind",
+ Source: proc,
+ Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"},
+ }
+ mounts = addOrReplaceMount(mounts, procMount)
+ }
+
+ if addCgroup {
+ cgroupMnt := specs.Mount{
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "rw"},
+ }
+ mounts = addOrReplaceMount(mounts, cgroupMnt)
+ }
+
+ // if userns and host ipc bind mount shm
+ if isUserns && !isIpcns {
+ // bind mount /dev/shm when it exists
+ if _, err := os.Stat("/dev/shm"); err == nil {
+ shmMount := specs.Mount{
+ Source: "/dev/shm",
+ Type: "bind",
+ Destination: "/dev/shm",
+ Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"},
+ }
+ mounts = addOrReplaceMount(mounts, shmMount)
+ }
+ } else if shmSize != "" {
+ shmMount := specs.Mount{
+ Source: "shm",
+ Destination: "/dev/shm",
+ Type: "tmpfs",
+ Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=" + shmSize},
+ }
+ mounts = addOrReplaceMount(mounts, shmMount)
+ }
+
+ return mounts, nil
+}
+
+func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
+ for _, r := range ids {
+ if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
+ return true
+ }
+ }
+ return false
+}
+
+// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
+func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) {
+ var optionMounts []specs.Mount
+ mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded && targetLock != nil {
+ targetLock.Unlock()
+ }
+ }()
+ optionMounts = append(optionMounts, mount)
+ volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded = true
+ return &volumes[0], targetLock, nil
+}
diff --git a/run_test.go b/run_test.go
new file mode 100644
index 0000000..2d61e24
--- /dev/null
+++ b/run_test.go
@@ -0,0 +1,84 @@
+package buildah
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/opencontainers/runtime-tools/generate"
+)
+
+func TestAddRlimits(t *testing.T) {
+ tt := []struct {
+ name string
+ ulimit []string
+ test func(error, *generate.Generator) error
+ }{
+ {
+ name: "empty ulimit",
+ ulimit: []string{},
+ test: func(e error, g *generate.Generator) error {
+ return e
+ },
+ },
+ {
+ name: "invalid ulimit argument",
+ ulimit: []string{"bla"},
+ test: func(e error, g *generate.Generator) error {
+ if e == nil {
+ return errors.New("expected to receive an error but got nil")
+ }
+ errMsg := "invalid ulimit argument"
+ if !strings.Contains(e.Error(), errMsg) {
+ return fmt.Errorf("expected error message to include %#v in %#v", errMsg, e.Error())
+ }
+ return nil
+ },
+ },
+ {
+ name: "invalid ulimit type",
+ ulimit: []string{"bla=hard"},
+ test: func(e error, g *generate.Generator) error {
+ if e == nil {
+ return errors.New("expected to receive an error but got nil")
+ }
+ errMsg := "invalid ulimit type"
+ if !strings.Contains(e.Error(), errMsg) {
+ return fmt.Errorf("expected error message to include %#v in %#v", errMsg, e.Error())
+ }
+ return nil
+ },
+ },
+ {
+ name: "valid ulimit",
+ ulimit: []string{"fsize=1024:4096"},
+ test: func(e error, g *generate.Generator) error {
+ if e != nil {
+ return e
+ }
+ rlimits := g.Config.Process.Rlimits
+ for _, rlimit := range rlimits {
+ if rlimit.Type == "RLIMIT_FSIZE" {
+ if rlimit.Hard != 4096 {
+ return fmt.Errorf("expected spec to have %#v hard limit set to %v but got %v", rlimit.Type, 4096, rlimit.Hard)
+ }
+ if rlimit.Soft != 1024 {
+ return fmt.Errorf("expected spec to have %#v hard limit set to %v but got %v", rlimit.Type, 1024, rlimit.Soft)
+ }
+ return nil
+ }
+ }
+ return errors.New("expected spec to have RLIMIT_FSIZE")
+ },
+ },
+ }
+
+ for _, tst := range tt {
+ g, _ := generate.New("linux")
+ err := addRlimits(tst.ulimit, &g, []string{})
+ if testErr := tst.test(err, &g); testErr != nil {
+ t.Errorf("test %#v failed: %v", tst.name, testErr)
+ }
+ }
+}
diff --git a/run_unix.go b/run_unix.go
new file mode 100644
index 0000000..68a3dac
--- /dev/null
+++ b/run_unix.go
@@ -0,0 +1,43 @@
+//go:build darwin
+// +build darwin
+
+package buildah
+
+import (
+ "errors"
+
+ "github.com/containers/buildah/define"
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/storage"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
+type ContainerDevices define.ContainerDevices
+
+func setChildProcess() error {
+ return errors.New("function not supported on non-linux systems")
+}
+
+func runUsingRuntimeMain() {}
+
+func (b *Builder) Run(command []string, options RunOptions) error {
+ return errors.New("function not supported on non-linux systems")
+}
+func DefaultNamespaceOptions() (NamespaceOptions, error) {
+ options := NamespaceOptions{
+ {Name: string(specs.CgroupNamespace), Host: false},
+ {Name: string(specs.IPCNamespace), Host: false},
+ {Name: string(specs.MountNamespace), Host: false},
+ {Name: string(specs.NetworkNamespace), Host: false},
+ {Name: string(specs.PIDNamespace), Host: false},
+ {Name: string(specs.UserNamespace), Host: false},
+ {Name: string(specs.UTSNamespace), Host: false},
+ }
+ return options, nil
+}
+
+// getNetworkInterface creates the network interface
+func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
+ return nil, nil
+}
diff --git a/run_unsupported.go b/run_unsupported.go
new file mode 100644
index 0000000..b135be7
--- /dev/null
+++ b/run_unsupported.go
@@ -0,0 +1,29 @@
+//go:build !linux && !darwin && !freebsd
+// +build !linux,!darwin,!freebsd
+
+package buildah
+
+import (
+ "errors"
+
+ nettypes "github.com/containers/common/libnetwork/types"
+ "github.com/containers/storage"
+)
+
+func setChildProcess() error {
+ return errors.New("function not supported on non-linux systems")
+}
+
+func runUsingRuntimeMain() {}
+
+func (b *Builder) Run(command []string, options RunOptions) error {
+ return errors.New("function not supported on non-linux systems")
+}
+func DefaultNamespaceOptions() (NamespaceOptions, error) {
+ return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
+}
+
+// getNetworkInterface creates the network interface
+func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
+ return nil, errors.New("function not supported on non-linux systems")
+}
diff --git a/seccomp.go b/seccomp.go
new file mode 100644
index 0000000..3348a3e
--- /dev/null
+++ b/seccomp.go
@@ -0,0 +1,36 @@
+//go:build seccomp && linux
+// +build seccomp,linux
+
+package buildah
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/containers/common/pkg/seccomp"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
+ switch seccompProfilePath {
+ case "unconfined":
+ spec.Linux.Seccomp = nil
+ case "":
+ seccompConfig, err := seccomp.GetDefaultProfile(spec)
+ if err != nil {
+ return fmt.Errorf("loading default seccomp profile failed: %w", err)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ default:
+ seccompProfile, err := os.ReadFile(seccompProfilePath)
+ if err != nil {
+ return fmt.Errorf("opening seccomp profile failed: %w", err)
+ }
+ seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
+ if err != nil {
+ return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ }
+ return nil
+}
diff --git a/seccomp_unsupported.go b/seccomp_unsupported.go
new file mode 100644
index 0000000..cba8390
--- /dev/null
+++ b/seccomp_unsupported.go
@@ -0,0 +1,15 @@
+// +build !seccomp !linux
+
+package buildah
+
+import (
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
+ if spec.Linux != nil {
+ // runtime-tools may have supplied us with a default filter
+ spec.Linux.Seccomp = nil
+ }
+ return nil
+}
diff --git a/selinux.go b/selinux.go
new file mode 100644
index 0000000..8cc2bfc
--- /dev/null
+++ b/selinux.go
@@ -0,0 +1,42 @@
+//go:build linux
+// +build linux
+
+package buildah
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/opencontainers/runtime-tools/generate"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
+
+func selinuxGetEnabled() bool {
+ return selinux.GetEnabled()
+}
+
+func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
+ if processLabel != "" && selinux.GetEnabled() {
+ g.SetProcessSelinuxLabel(processLabel)
+ g.SetLinuxMountLabel(mountLabel)
+ }
+}
+
+func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error {
+ if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" {
+ // SELinux is completely disabled, or we're not doing anything at all with labeling
+ return nil
+ }
+ pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file")
+ if err != nil {
+ return fmt.Errorf("computing file creation context for pipes: %w", err)
+ }
+ for i := range stdioPipe {
+ pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0])
+ if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("setting file label on %q: %w", pipeFdName, err)
+ }
+ }
+ return nil
+}
diff --git a/selinux_unsupported.go b/selinux_unsupported.go
new file mode 100644
index 0000000..5b19483
--- /dev/null
+++ b/selinux_unsupported.go
@@ -0,0 +1,18 @@
+// +build !linux
+
+package buildah
+
+import (
+ "github.com/opencontainers/runtime-tools/generate"
+)
+
+func selinuxGetEnabled() bool {
+ return false
+}
+
+func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
+}
+
+func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error {
+ return nil
+}
diff --git a/tests/add.bats b/tests/add.bats
new file mode 100644
index 0000000..ac4841b
--- /dev/null
+++ b/tests/add.bats
@@ -0,0 +1,308 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "add-flags-order-verification" {
+ run_buildah 125 add container1 -q /tmp/container1
+ check_options_flag_err "-q"
+
+ run_buildah 125 add container1 --chown /tmp/container1 --quiet
+ check_options_flag_err "--chown"
+
+ run_buildah 125 add container1 /tmp/container1 --quiet
+ check_options_flag_err "--quiet"
+}
+
+@test "add-local-plain" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ mkdir $root/subdir $root/other-subdir
+ # Copy a file to the working directory
+ run_buildah config --workingdir=/ $cid
+ run_buildah add --retry 4 --retry-delay 4s $cid ${TEST_SCRATCH_DIR}/randomfile
+ # Copy a file to a specific subdirectory
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/randomfile /subdir
+ # Copy two files to a specific subdirectory
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/other-randomfile /other-subdir
+ # Copy two files to a specific location, which succeeds because we can create it as a directory.
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/other-randomfile /notthereyet-subdir
+ # Copy two files to a specific location, which fails because it's not a directory.
+ run_buildah 125 add $cid ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/other-randomfile /randomfile
+ # Copy a file to a different working directory
+ run_buildah config --workingdir=/cwd $cid
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/randomfile
+ test -s $newroot/subdir/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/subdir/randomfile
+ test -s $newroot/other-subdir/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/other-subdir/randomfile
+ test -s $newroot/other-subdir/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/other-randomfile $newroot/other-subdir/other-randomfile
+ test -d $newroot/cwd
+ test -s $newroot/cwd/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/cwd/randomfile
+ run_buildah rm $newcid
+}
+
+@test "add-local-archive" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/random1
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/random2
+ tar -c -C ${TEST_SCRATCH_DIR} -f ${TEST_SCRATCH_DIR}/tarball1.tar random1 random2
+ mkdir ${TEST_SCRATCH_DIR}/tarball2
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball2/tarball2.random1
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball2/tarball2.random2
+ tar -c -C ${TEST_SCRATCH_DIR} -z -f ${TEST_SCRATCH_DIR}/tarball2.tar.gz tarball2
+ mkdir ${TEST_SCRATCH_DIR}/tarball3
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball3/tarball3.random1
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball3/tarball3.random2
+ tar -c -C ${TEST_SCRATCH_DIR} -j -f ${TEST_SCRATCH_DIR}/tarball3.tar.bz2 tarball3
+ mkdir ${TEST_SCRATCH_DIR}/tarball4
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball4/tarball4.random1
+ dd if=/dev/urandom bs=1024 count=4 of=${TEST_SCRATCH_DIR}/tarball4/tarball4.random2
+ tar -c -C ${TEST_SCRATCH_DIR} -j -f ${TEST_SCRATCH_DIR}/tarball4.tar.bz2 tarball4
+ # Add the files to the working directory, which should extract them all.
+ run_buildah config --workingdir=/ $cid
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/tarball1.tar
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/tarball2.tar.gz
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/tarball3.tar.bz2
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/tarball4.tar.bz2
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/random1
+ cmp ${TEST_SCRATCH_DIR}/random1 $newroot/random1
+ test -s $newroot/random2
+ cmp ${TEST_SCRATCH_DIR}/random2 $newroot/random2
+ test -s $newroot/tarball2/tarball2.random1
+ cmp ${TEST_SCRATCH_DIR}/tarball2/tarball2.random1 $newroot/tarball2/tarball2.random1
+ test -s $newroot/tarball2/tarball2.random2
+ cmp ${TEST_SCRATCH_DIR}/tarball2/tarball2.random2 $newroot/tarball2/tarball2.random2
+ test -s $newroot/tarball3/tarball3.random1
+ cmp ${TEST_SCRATCH_DIR}/tarball3/tarball3.random1 $newroot/tarball3/tarball3.random1
+ test -s $newroot/tarball3/tarball3.random2
+ cmp ${TEST_SCRATCH_DIR}/tarball3/tarball3.random2 $newroot/tarball3/tarball3.random2
+ test -s $newroot/tarball4/tarball4.random1
+ cmp ${TEST_SCRATCH_DIR}/tarball4/tarball4.random1 $newroot/tarball4/tarball4.random1
+ test -s $newroot/tarball4/tarball4.random2
+ cmp ${TEST_SCRATCH_DIR}/tarball4/tarball4.random2 $newroot/tarball4/tarball4.random2
+}
+
+@test "add single file creates absolute path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ createrandom ${TEST_SCRATCH_DIR}/distutils.cfg
+ permission=$(stat -c "%a" ${TEST_SCRATCH_DIR}/distutils.cfg)
+
+ run_buildah from --quiet $WITH_POLICY_JSON ubuntu
+ cid=$output
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/distutils.cfg /usr/lib/python3.7/distutils
+ run_buildah run $cid stat -c "%a" /usr/lib/python3.7/distutils
+ expect_output $permission
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:${imgName}
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON ${imgName}
+ newcid=$output
+ run_buildah run $newcid stat -c "%a" /usr/lib/python3.7/distutils
+ expect_output $permission
+}
+
+@test "add single file creates relative path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ createrandom ${TEST_SCRATCH_DIR}/distutils.cfg
+ permission=$(stat -c "%a" ${TEST_SCRATCH_DIR}/distutils.cfg)
+
+ run_buildah from --quiet $WITH_POLICY_JSON ubuntu
+ cid=$output
+ run_buildah add $cid ${TEST_SCRATCH_DIR}/distutils.cfg lib/custom
+ run_buildah run $cid stat -c "%a" lib/custom
+ expect_output $permission
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:${imgName}
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON ${imgName}
+ newcid=$output
+ run_buildah run $newcid stat -c "%a" lib/custom
+ expect_output $permission
+}
+
+@test "add with chown" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --chown bin:bin $cid ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ run_buildah run $cid ls -l /tmp/random
+
+ expect_output --substring bin.*bin
+}
+
+@test "add with chmod" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --chmod 777 $cid ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ run_buildah run $cid ls -l /tmp/random
+
+ expect_output --substring rwxrwxrwx
+}
+
+@test "add url" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add $cid https://github.com/containers/buildah/raw/main/README.md
+ run_buildah run $cid ls /README.md
+
+ run_buildah add $cid https://github.com/containers/buildah/raw/main/README.md /home
+ run_buildah run $cid ls /home/README.md
+}
+
+@test "add relative" {
+ # make sure we don't get thrown by relative source locations
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+
+ run_buildah add $cid deny.json /
+ run_buildah run $cid ls /deny.json
+
+ run_buildah add $cid ./docker.json /
+ run_buildah run $cid ls /docker.json
+
+ run_buildah add $cid tools/Makefile /
+ run_buildah run $cid ls /Makefile
+}
+
+@test "add --ignorefile" {
+ mytest=${TEST_SCRATCH_DIR}/mytest
+ mkdir -p ${mytest}
+ touch ${mytest}/mystuff
+ touch ${mytest}/source.go
+ mkdir -p ${mytest}/notmystuff
+ touch ${mytest}/notmystuff/notmystuff
+ cat > ${mytest}/.ignore << _EOF
+*.go
+.ignore
+notmystuff
+_EOF
+
+expect="
+stuff
+stuff/mystuff"
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+
+ run_buildah 125 copy --ignorefile ${mytest}/.ignore $cid ${mytest} /stuff
+ expect_output -- "Error: --ignorefile option requires that you specify a context dir using --contextdir" "container file list"
+
+ run_buildah add --contextdir=${mytest} --ignorefile ${mytest}/.ignore $cid ${mytest} /stuff
+
+ run_buildah mount $cid
+ mnt=$output
+ run find $mnt -printf "%P\n"
+ filelist=$(LC_ALL=C sort <<<"$output")
+ run_buildah umount $cid
+ expect_output --from="$filelist" "$expect" "container file list"
+}
+
+@test "add quietly" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --quiet $cid ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ expect_output ""
+ run_buildah mount $cid
+ croot=$output
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/random
+}
+
+@test "add from container" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ from=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --quiet $from ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ expect_output ""
+ run_buildah add --quiet $WITH_POLICY_JSON --from $from $cid /tmp/random /tmp/random # absolute path
+ expect_output ""
+ run_buildah add --quiet $WITH_POLICY_JSON --from $from $cid tmp/random /tmp/random2 # relative path
+ expect_output ""
+ run_buildah mount $cid
+ croot=$output
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/random
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/random2
+}
+
+@test "add from image" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --quiet $WITH_POLICY_JSON --from ubuntu $cid /etc/passwd /tmp/passwd # should pull the image, absolute path
+ expect_output ""
+ run_buildah add --quiet $WITH_POLICY_JSON --from ubuntu $cid etc/passwd /tmp/passwd2 # relative path
+ expect_output ""
+ run_buildah from --quiet $WITH_POLICY_JSON ubuntu
+ ubuntu=$output
+ run_buildah mount $cid
+ croot=$output
+ run_buildah mount $ubuntu
+ ubuntu=$output
+ cmp $ubuntu/etc/passwd ${croot}/tmp/passwd
+ cmp $ubuntu/etc/passwd ${croot}/tmp/passwd2
+}
+
+@test "add url with checksum flag" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add --checksum=sha256:4fd3aed66b5488b45fe83dd11842c2324fadcc38e1217bb45fbd28d660afdd39 $cid https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md /
+ run_buildah run $cid ls /README.md
+}
+
+@test "add url with bad checksum" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah 125 add --checksum=sha256:0000000000000000000000000000000000000000000000000000000000000000 $cid https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md /
+ expect_output --substring "unexpected response digest for \"https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md\": sha256:4fd3aed66b5488b45fe83dd11842c2324fadcc38e1217bb45fbd28d660afdd39, want sha256:0000000000000000000000000000000000000000000000000000000000000000"
+}
+
+@test "add path with checksum flag" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah 125 add --checksum=sha256:0000000000000000000000000000000000000000000000000000000000000000 $cid ${TEST_SCRATCH_DIR}/randomfile /
+ expect_output --substring "checksum flag is not supported for local sources"
+}
diff --git a/tests/authenticate.bats b/tests/authenticate.bats
new file mode 100644
index 0000000..fe276ec
--- /dev/null
+++ b/tests/authenticate.bats
@@ -0,0 +1,169 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "authenticate: login/logout" {
+ start_registry testuserfoo testpassword
+
+ run_buildah 0 login --cert-dir $REGISTRY_DIR --username testuserfoo --password testpassword localhost:$REGISTRY_PORT
+
+ run_buildah 0 logout localhost:$REGISTRY_PORT
+}
+
+@test "authenticate: with stdin" {
+ start_registry testuserfoo testpassword
+ run_buildah 0 login localhost:$REGISTRY_PORT --cert-dir $REGISTRY_DIR --username testuserfoo --password-stdin <<< testpassword
+ run_buildah 0 logout localhost:$REGISTRY_PORT
+}
+
+@test "authenticate: login/logout should succeed with XDG_RUNTIME_DIR unset" {
+ unset XDG_RUNTIME_DIR
+
+ start_registry testuserfoo testpassword
+
+ run_buildah 0 login --cert-dir $REGISTRY_DIR --username testuserfoo --password testpassword localhost:$REGISTRY_PORT
+
+ run_buildah 0 logout localhost:$REGISTRY_PORT
+}
+
+@test "authenticate: logout should fail with nonexistent authfile" {
+ start_registry testuserfoo testpassword
+
+ run_buildah 0 login --cert-dir $REGISTRY_DIR --username testuserfoo --password testpassword localhost:$REGISTRY_PORT
+
+ run_buildah 125 logout --authfile /tmp/nonexistent localhost:$REGISTRY_PORT
+ expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"
+
+ run_buildah 125 logout --compat-auth-file /tmp/nonexistent localhost:$REGISTRY_PORT
+ expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"
+
+ run_buildah 0 logout localhost:$REGISTRY_PORT
+}
+
+@test "authenticate: logout should fail with inconsistent authfiles" {
+ ambiguous_file=${TEST_SCRATCH_DIR}/ambiguous-auth.json
+ echo '{}' > $ambiguous_file # To make sure we are not hitting the “file not found” path
+
+ # We don’t start a real registry; login should never get that far.
+ run_buildah 125 login --authfile "$ambiguous_file" --compat-auth-file "$ambiguous_file" localhost:5000
+ expect_output "Error: options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously"
+
+ run_buildah 125 logout --authfile "$ambiguous_file" --compat-auth-file "$ambiguous_file" localhost:5000
+ expect_output "Error: options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously"
+}
+
+@test "authenticate: cert and credentials" {
+ _prefetch alpine
+
+ testuser="testuser$RANDOM"
+ testpassword="testpassword$RANDOM"
+ start_registry "$testuser" "$testpassword"
+
+ # Basic test: should pass
+ run_buildah push --cert-dir $REGISTRY_DIR $WITH_POLICY_JSON --tls-verify=false --creds "$testuser":"$testpassword" alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "Writing manifest to image destination"
+
+ # With tls-verify=true, should fail due to self-signed cert
+ run_buildah 125 push $WITH_POLICY_JSON --tls-verify=true alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring " x509: certificate signed by unknown authority" \
+ "push with --tls-verify=true"
+
+ # wrong credentials: should fail
+ run_buildah 125 from --cert-dir $REGISTRY_DIR $WITH_POLICY_JSON --creds baduser:badpassword localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "authentication required"
+ run_buildah 125 from --cert-dir $REGISTRY_DIR $WITH_POLICY_JSON --creds "$testuser":badpassword localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "authentication required"
+ run_buildah 125 from --cert-dir $REGISTRY_DIR $WITH_POLICY_JSON --creds baduser:"$testpassword" localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "authentication required"
+
+ # This should work
+ run_buildah from --cert-dir $REGISTRY_DIR --name "my-alpine-work-ctr" $WITH_POLICY_JSON --creds "$testuser":"$testpassword" localhost:$REGISTRY_PORT/my-alpine
+ expect_output --from="${lines[-1]}" "my-alpine-work-ctr"
+
+ # Create Dockerfile for bud tests
+ mkdir -p ${TEST_SCRATCH_DIR}/dockerdir
+ DOCKERFILE=${TEST_SCRATCH_DIR}/dockerdir/Dockerfile
+ /bin/cat <<EOM >$DOCKERFILE
+FROM localhost:$REGISTRY_PORT/my-alpine
+EOM
+
+ # Remove containers and images before bud tests
+ run_buildah rm --all
+ run_buildah rmi -f --all
+
+ # bud test bad password should fail
+ run_buildah 125 bud -f $DOCKERFILE $WITH_POLICY_JSON --tls-verify=false --creds="$testuser":badpassword
+ expect_output --substring "authentication required" \
+ "buildah bud with wrong credentials"
+
+ # bud test this should work
+ run_buildah bud -f $DOCKERFILE $WITH_POLICY_JSON --tls-verify=false --creds="$testuser":"$testpassword" .
+ expect_output --from="${lines[0]}" "STEP 1/1: FROM localhost:$REGISTRY_PORT/my-alpine"
+ expect_output --substring "Writing manifest to image destination"
+}
+
+
+@test "authenticate: with --tls-verify=true" {
+ _prefetch alpine
+
+ start_registry
+
+ # Push with correct credentials: should pass
+ run_buildah push $WITH_POLICY_JSON --tls-verify=true --cert-dir=$REGISTRY_DIR --creds testuser:testpassword alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "Writing manifest to image destination"
+
+ # Push with wrong credentials: should fail
+ run_buildah 125 push $WITH_POLICY_JSON --tls-verify=true --cert-dir=$REGISTRY_DIR --creds testuser:WRONGPASSWORD alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "authentication required"
+
+ # Make sure we can fetch it
+ run_buildah from --pull-always --cert-dir=$REGISTRY_DIR --tls-verify=true --creds=testuser:testpassword localhost:$REGISTRY_PORT/my-alpine
+ expect_output --from="${lines[-1]}" "localhost-working-container"
+ cid="${lines[-1]}"
+
+ # Commit with correct credentials
+ run_buildah run $cid touch testfile
+ run_buildah commit $WITH_POLICY_JSON --cert-dir=$REGISTRY_DIR --tls-verify=true --creds=testuser:testpassword $cid docker://localhost:$REGISTRY_PORT/my-alpine
+
+ # Create Dockerfile for bud tests
+ mkdir -p ${TEST_SCRATCH_DIR}/dockerdir
+ DOCKERFILE=${TEST_SCRATCH_DIR}/dockerdir/Dockerfile
+ /bin/cat <<EOM >$DOCKERFILE
+FROM localhost:$REGISTRY_PORT/my-alpine
+RUN rm testfile
+EOM
+
+ # Remove containers and images before bud tests
+ run_buildah rm --all
+ run_buildah rmi -f --all
+
+ # bud with correct credentials
+ run_buildah bud -f $DOCKERFILE $WITH_POLICY_JSON --cert-dir=$REGISTRY_DIR --tls-verify=true --creds=testuser:testpassword .
+ expect_output --from="${lines[0]}" "STEP 1/2: FROM localhost:$REGISTRY_PORT/my-alpine"
+ expect_output --substring "Writing manifest to image destination"
+}
+
+
+@test "authenticate: with cached (not command-line) credentials" {
+ _prefetch alpine
+
+ start_registry
+
+ run_buildah 0 login --tls-verify=false --username testuser --password testpassword localhost:$REGISTRY_PORT
+ expect_output "Login Succeeded!"
+
+ # After login, push should pass
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "Writing manifest to image destination"
+
+ run_buildah 125 login --tls-verify=false --username testuser --password WRONGPASSWORD localhost:$REGISTRY_PORT
+ expect_output --substring 'logging into "localhost:'"$REGISTRY_PORT"'": invalid username/password' \
+ "buildah login, wrong credentials"
+
+ run_buildah 0 logout localhost:$REGISTRY_PORT
+ expect_output "Removed login credentials for localhost:$REGISTRY_PORT"
+
+ run_buildah 125 push $WITH_POLICY_JSON --tls-verify=false alpine localhost:$REGISTRY_PORT/my-alpine
+ expect_output --substring "authentication required" \
+ "buildah push after buildah logout"
+}
diff --git a/tests/basic.bats b/tests/basic.bats
new file mode 100644
index 0000000..24de5c3
--- /dev/null
+++ b/tests/basic.bats
@@ -0,0 +1,135 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "from" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm $cid
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah rm $cid
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON --name i-love-naming-things alpine
+ cid=$output
+ run_buildah rm i-love-naming-things
+}
+
+@test "from-defaultpull" {
+ _prefetch alpine
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm $cid
+}
+
+@test "from-scratch" {
+ run_buildah from --pull=false $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah rm $cid
+ run_buildah from --pull=true $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah rm $cid
+}
+
+@test "from-nopull" {
+ run_buildah 125 from --pull-never $WITH_POLICY_JSON alpine
+}
+
+@test "mount" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah unmount $cid
+ run_buildah mount $cid
+ root=$output
+ touch $root/foobar
+ run_buildah unmount $cid
+ run_buildah rm $cid
+}
+
+@test "by-name" {
+ run_buildah from $WITH_POLICY_JSON --name scratch-working-image-for-test scratch
+ cid=$output
+ run_buildah mount scratch-working-image-for-test
+ root=$output
+ run_buildah unmount scratch-working-image-for-test
+ run_buildah rm scratch-working-image-for-test
+}
+
+@test "commit" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ cp ${TEST_SCRATCH_DIR}/randomfile $root/randomfile
+ run_buildah unmount $cid
+ run_buildah commit --iidfile ${TEST_SCRATCH_DIR}/output.iid $WITH_POLICY_JSON $cid containers-storage:new-image
+ iid=$(< ${TEST_SCRATCH_DIR}/output.iid)
+ assert "$iid" =~ "sha256:[0-9a-f]{64}"
+ run_buildah rmi $iid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/randomfile
+ cp ${TEST_SCRATCH_DIR}/other-randomfile $newroot/other-randomfile
+ run_buildah commit $WITH_POLICY_JSON $newcid containers-storage:other-new-image
+ # Not an allowed ordering of arguments and flags. Check that it's rejected.
+ run_buildah 125 commit $newcid $WITH_POLICY_JSON containers-storage:rejected-new-image
+ run_buildah commit $WITH_POLICY_JSON $newcid containers-storage:another-new-image
+ run_buildah commit $WITH_POLICY_JSON $newcid yet-another-new-image
+ run_buildah commit $WITH_POLICY_JSON $newcid containers-storage:gratuitous-new-image
+ run_buildah unmount $newcid
+ run_buildah rm $newcid
+
+ run_buildah from --quiet $WITH_POLICY_JSON other-new-image
+ othernewcid=$output
+ run_buildah mount $othernewcid
+ othernewroot=$output
+ test -s $othernewroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $othernewroot/randomfile
+ test -s $othernewroot/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/other-randomfile $othernewroot/other-randomfile
+ run_buildah rm $othernewcid
+
+ run_buildah from --quiet $WITH_POLICY_JSON another-new-image
+ anothernewcid=$output
+ run_buildah mount $anothernewcid
+ anothernewroot=$output
+ test -s $anothernewroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $anothernewroot/randomfile
+ test -s $anothernewroot/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/other-randomfile $anothernewroot/other-randomfile
+ run_buildah rm $anothernewcid
+
+ run_buildah from --quiet $WITH_POLICY_JSON yet-another-new-image
+ yetanothernewcid=$output
+ run_buildah mount $yetanothernewcid
+ yetanothernewroot=$output
+ test -s $yetanothernewroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $yetanothernewroot/randomfile
+ test -s $yetanothernewroot/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/other-randomfile $yetanothernewroot/other-randomfile
+ run_buildah delete $yetanothernewcid
+
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah commit --rm $WITH_POLICY_JSON $newcid containers-storage:remove-container-image
+ run_buildah 125 mount $newcid
+
+ run_buildah rmi remove-container-image
+ run_buildah rmi containers-storage:other-new-image
+ run_buildah rmi another-new-image
+ run_buildah images -q
+ assert "$output" != "" "images -q"
+ run_buildah rmi -a
+ run_buildah images -q
+ expect_output ""
+}
diff --git a/tests/blobcache.bats b/tests/blobcache.bats
new file mode 100644
index 0000000..686379d
--- /dev/null
+++ b/tests/blobcache.bats
@@ -0,0 +1,210 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "blobcache-pull" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ # Pull an image using a fresh directory for the blob cache.
+ run_buildah pull --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
+ # Check that we dropped some files in there.
+ run find ${blobcachedir} -type f
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "${#lines[@]}" -gt 0 ]
+}
+
+@test "blobcache-from" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ # Pull an image using a fresh directory for the blob cache.
+ run_buildah from --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
+ # Check that we dropped some files in there.
+ run find ${blobcachedir} -type f
+ echo "$output"
+ [ "$status" -eq 0 ]
+ [ "${#lines[*]}" -gt 0 ]
+}
+
+
+function _check_matches() {
+ local destdir="$1"
+ local blobcachedir="$2"
+
+ # Look for layer blobs in the destination that match the ones in the cache.
+ local matched=0
+ local unmatched=0
+ for content in ${destdir}/* ; do
+ match=false
+ for blob in ${blobcachedir}/* ; do
+ if cmp -s ${content} ${blob} ; then
+ echo $(file ${blob}) and ${content} have the same contents, was cached
+ match=true
+ break
+ fi
+ done
+ if ${match} ; then
+ matched=$(( ${matched} + 1 ))
+ else
+ unmatched=$(( ${unmatched} + 1 ))
+ echo ${content} was not cached
+ fi
+ done
+
+ expect_output --from="$matched" "$3" "$4 should match"
+ expect_output --from="$unmatched" "$5" "$6 should not match"
+}
+
+# Integration test for https://github.com/containers/image/pull/1645
+@test "blobcache: blobs must be reused when pushing across registry" {
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ outputdir=${TEST_SCRATCH_DIR}/outputdir
+ mkdir -p ${outputdir}
+ run podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --mount type=bind,src=${outputdir},target=/output,Z --net host quay.io/skopeo/stable copy --preserve-digests --authfile=/test.auth --tls-verify=false docker://registry.fedoraproject.org/fedora-minimal dir:/output
+
+ run_buildah rmi --all -f
+ run_buildah pull dir:${outputdir}
+ run_buildah images -a --format '{{.ID}}'
+ cid=$output
+ run_buildah --log-level debug push --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth $cid docker://localhost:${REGISTRY_PORT}/test
+ # must not contain "Skipping blob" since push must happen
+ assert "$output" !~ "Skipping blob"
+
+ # Clear local image and c/image's blob-info-cache
+ run_buildah rmi --all -f
+ if is_rootless;
+ then
+ run rm $HOME/.local/share/containers/cache/blob-info-cache-v1.sqlite
+ assert "$status" -eq 0 "status of `run rm $HOME/.local/share/containers/cache/blob-info-cache-v1.sqlite` must be 0"
+ else
+ run rm /var/lib/containers/cache/blob-info-cache-v1.sqlite
+ assert "$status" -eq 0 "status of `run rm /var/lib/containers/cache/blob-info-cache-v1.sqlite` must be 0"
+ fi
+
+ # In first push blob must be skipped after vendoring https://github.com/containers/image/pull/1645
+ run_buildah pull dir:${outputdir}
+ run_buildah images -a --format '{{.ID}}'
+ cid=$output
+ run_buildah --log-level debug push --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth $cid docker://localhost:${REGISTRY_PORT}/test
+ expect_output --substring "Skipping blob"
+}
+
+@test "blobcache-commit" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ # Pull an image using a fresh directory for the blob cache.
+ run_buildah from --quiet --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
+ ctr="$output"
+ run_buildah add ${ctr} $BUDFILES/add-file/file /
+ # Commit the image without using the blob cache, using compression so that uncompressed blobs
+ # in the cache which we inherited from our base image won't be matched.
+ doomeddir=${TEST_SCRATCH_DIR}/doomed
+ mkdir -p ${doomeddir}
+ run_buildah commit $WITH_POLICY_JSON --disable-compression=false ${ctr} dir:${doomeddir}
+ _check_matches $doomeddir $blobcachedir \
+ 0 "nothing" \
+ 6 "everything"
+
+ # Commit the image using the blob cache, again using compression. We'll have recorded the
+ # compressed digests that match the uncompressed digests the last time around, so we should
+ # get some matches this time.
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ ls -l ${blobcachedir}
+ run_buildah commit $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${ctr} dir:${destdir}
+ _check_matches $destdir $blobcachedir \
+ 5 "base layers, new layer, config, and manifest" \
+ 1 "version"
+}
+
+@test "blobcache-push" {
+ target=targetimage
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ # Pull an image using a fresh directory for the blob cache.
+ run_buildah from --quiet --blob-cache=${blobcachedir} $WITH_POLICY_JSON registry.k8s.io/pause
+ ctr="$output"
+ run_buildah add ${ctr} $BUDFILES/add-file/file /
+ # Commit the image using the blob cache.
+ ls -l ${blobcachedir}
+ run_buildah commit $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${ctr} ${target}
+ # Try to push the image without the blob cache.
+ doomeddir=${TEST_SCRATCH_DIR}/doomed
+ mkdir -p ${doomeddir}
+ ls -l ${blobcachedir}
+ run_buildah push $WITH_POLICY_JSON ${target} dir:${doomeddir}
+ _check_matches $doomeddir $blobcachedir \
+ 2 "only config and new layer" \
+ 4 "version, manifest, base layers"
+
+ # Now try to push the image using the blob cache.
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ ls -l ${blobcachedir}
+
+ run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} ${target} dir:${destdir}
+ _check_matches $destdir $blobcachedir \
+ 5 "base image layers, new layer, config, and manifest" \
+ 1 "version"
+}
+
+@test "blobcache-build-compressed-using-dockerfile-explicit-push" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ target=new-image
+ # Build an image while pulling the base image. Compress the layers so that they get added
+ # to the blob cache in their compressed forms.
+ run_buildah build-using-dockerfile -t ${target} --pull-always $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false $BUDFILES/add-file
+ # Now try to push the image using the blob cache. The blob cache will only suggest the
+ # compressed version of a blob if it's been told that we want to compress things, so
+ # we also request compression here to avoid having the copy logic just compress the
+ # uncompressed copy again.
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} --disable-compression=false ${target} dir:${destdir}
+ _check_matches $destdir $blobcachedir \
+ 4 "config, base layer, new layer, and manifest" \
+ 1 "version"
+}
+
+@test "blobcache-build-uncompressed-using-dockerfile-explicit-push" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ target=new-image
+ # Build an image while pulling the base image.
+ run_buildah build-using-dockerfile -t ${target} -D --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
+ # Now try to push the image using the blob cache.
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ run_buildah push $WITH_POLICY_JSON --blob-cache=${blobcachedir} ${target} dir:${destdir}
+ _check_matches $destdir $blobcachedir \
+ 2 "config and previously-compressed base layer" \
+ 3 "version, new layer, and manifest"
+}
+
+@test "blobcache-build-compressed-using-dockerfile-implicit-push" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ target=new-image
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ # Build an image while pulling the base image, implicitly pushing while writing.
+ run_buildah build-using-dockerfile -t dir:${destdir} --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
+ _check_matches $destdir $blobcachedir \
+ 4 "base image, layer, config, and manifest" \
+ 1 "version"
+}
+
+@test "blobcache-build-uncompressed-using-dockerfile-implicit-push" {
+ blobcachedir=${TEST_SCRATCH_DIR}/cache
+ mkdir -p ${blobcachedir}
+ target=new-image
+ destdir=${TEST_SCRATCH_DIR}/dest
+ mkdir -p ${destdir}
+ # Build an image while pulling the base image, implicitly pushing while writing.
+ run_buildah build-using-dockerfile -t dir:${destdir} -D --pull-always --blob-cache=${blobcachedir} $WITH_POLICY_JSON $BUDFILES/add-file
+ _check_matches $destdir $blobcachedir \
+ 4 "base image, our layer, config, and manifest" \
+ 1 "version"
+}
diff --git a/tests/bud.bats b/tests/bud.bats
new file mode 100644
index 0000000..878a159
--- /dev/null
+++ b/tests/bud.bats
@@ -0,0 +1,6529 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "bud with a path to a Dockerfile (-f) containing a non-directory entry" {
+ run_buildah 125 build -f $BUDFILES/non-directory-in-path/non-directory/Dockerfile
+ expect_output --substring "non-directory/Dockerfile: not a directory"
+}
+
+@test "bud stdio is usable pipes" {
+ run_buildah build $BUDFILES/stdio
+}
+
+@test "bud: build manifest list and --add-compression zstd" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+_EOF
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ run_buildah build $WITH_POLICY_JSON -t image1 --platform linux/amd64 -f $contextdir/Dockerfile1
+ run_buildah build $WITH_POLICY_JSON -t image2 --platform linux/arm64 -f $contextdir/Dockerfile1
+
+ run_buildah manifest create foo
+ run_buildah manifest add foo image1
+ run_buildah manifest add foo image2
+
+ run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list
+
+ run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list
+ list="$output"
+
+ validate_instance_compression "0" "$list" "amd64" "gzip"
+ validate_instance_compression "1" "$list" "arm64" "gzip"
+ validate_instance_compression "2" "$list" "amd64" "zstd"
+ validate_instance_compression "3" "$list" "arm64" "zstd"
+}
+
+@test "bud: build manifest list with --add-compression zstd, --compression and --force-compression" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+_EOF
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ run_buildah build $WITH_POLICY_JSON -t image1 --platform linux/amd64 -f $contextdir/Dockerfile1
+ run_buildah build $WITH_POLICY_JSON -t image2 --platform linux/arm64 -f $contextdir/Dockerfile1
+
+ run_buildah manifest create foo
+ run_buildah manifest add foo image1
+ run_buildah manifest add foo image2
+
+ run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list
+
+ run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list
+ list="$output"
+
+ validate_instance_compression "0" "$list" "amd64" "gzip"
+ validate_instance_compression "1" "$list" "arm64" "gzip"
+ validate_instance_compression "2" "$list" "amd64" "zstd"
+ validate_instance_compression "3" "$list" "arm64" "zstd"
+
+ # Pushing again should keep every thing intact if original compression is `gzip` and `--force-compression` is specified
+ run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --compression-format gzip --force-compression --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list
+
+ run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list
+ list="$output"
+
+ validate_instance_compression "0" "$list" "amd64" "gzip"
+ validate_instance_compression "1" "$list" "arm64" "gzip"
+ validate_instance_compression "2" "$list" "amd64" "zstd"
+ validate_instance_compression "3" "$list" "arm64" "zstd"
+
+ # Pushing again without --force-compression but with --compression-format should do the same thing
+ run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --compression-format gzip --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list
+
+ run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list
+ list="$output"
+
+ validate_instance_compression "0" "$list" "amd64" "gzip"
+ validate_instance_compression "1" "$list" "arm64" "gzip"
+ validate_instance_compression "2" "$list" "amd64" "zstd"
+ validate_instance_compression "3" "$list" "arm64" "zstd"
+}
+
+@test "Multi-stage should not remove used base-image without --layers" {
+ run_buildah build -t parent-one -f $BUDFILES/multi-stage-only-base/Containerfile1
+ run_buildah build -t parent-two -f $BUDFILES/multi-stage-only-base/Containerfile2
+ run_buildah build -t multi-stage -f $BUDFILES/multi-stage-only-base/Containerfile3
+ run_buildah images -a
+ expect_output --substring "parent-one" "parent one must not be removed"
+}
+
+@test "no layer should be created on scratch" {
+ run_buildah build --layers --label "label1=value1" -t test -f $BUDFILES/from-scratch/Containerfile
+ run_buildah inspect -f '{{len .Docker.RootFS.DiffIDs}}' test
+ expect_output "0" "layer should not exist"
+ run_buildah build --layers -t test -f $BUDFILES/from-scratch/Containerfile
+ run_buildah inspect -f '{{len .Docker.RootFS.DiffIDs}}' test
+ expect_output "0" "layer should not exist"
+}
+
+@test "bud: build push with --force-compression" {
+ skip_if_no_podman
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+_EOF
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ run_buildah build $WITH_POLICY_JSON -t image1 --platform linux/amd64 -f $contextdir/Dockerfile1
+
+ run_buildah push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false --compression-format gzip image1 docker://localhost:${REGISTRY_PORT}/image
+ run podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --net host quay.io/skopeo/stable inspect --authfile=/test.auth --tls-verify=false --raw docker://localhost:${REGISTRY_PORT}/image
+ # layers should have no trace of zstd since push was with --compression-format gzip
+ assert "$output" !~ "zstd" "zstd found in layers where push was with --compression-format gzip"
+ run_buildah push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false --compression-format zstd --force-compression=false image1 docker://localhost:${REGISTRY_PORT}/image
+ run podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --net host quay.io/skopeo/stable inspect --authfile=/test.auth --tls-verify=false --raw docker://localhost:${REGISTRY_PORT}/image
+ # layers should have no trace of zstd since push is --force-compression=false
+ assert "$output" !~ "zstd" "zstd found even though push was without --force-compression"
+ run_buildah push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false --compression-format zstd image1 docker://localhost:${REGISTRY_PORT}/image
+ run podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --net host quay.io/skopeo/stable inspect --authfile=/test.auth --tls-verify=false --raw docker://localhost:${REGISTRY_PORT}/image
+ # layers should container `zstd`
+ expect_output --substring "zstd" "layers must contain zstd compression"
+ run_buildah push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false --compression-format zstd --force-compression image1 docker://localhost:${REGISTRY_PORT}/image
+ run podman run --rm --mount type=bind,src=${TEST_SCRATCH_DIR}/test.auth,target=/test.auth,Z --net host quay.io/skopeo/stable inspect --authfile=/test.auth --tls-verify=false --raw docker://localhost:${REGISTRY_PORT}/image
+ # layers should container `zstd`
+ expect_output --substring "zstd" "layers must contain zstd compression"
+}
+
+@test "bud with --dns* flags" {
+ _prefetch alpine
+
+ for dnsopt in --dns --dns-option --dns-search; do
+ run_buildah 125 build $dnsopt=example.com --network=none $WITH_POLICY_JSON -f $BUDFILES/dns/Dockerfile $BUDFILES/dns
+ expect_output "Error: the $dnsopt option cannot be used with --network=none" "dns options should not be allowed with --network=none"
+ done
+
+ run_buildah build --dns-search=example.com --dns=223.5.5.5 --dns-option=use-vc $WITH_POLICY_JSON -f $BUDFILES/dns/Dockerfile $BUDFILES/dns
+ expect_output --substring "search example.com"
+ expect_output --substring "nameserver 223.5.5.5"
+ expect_output --substring "options use-vc"
+}
+
+@test "build with inline RUN --network=host" {
+ #hostns=$(readlink /proc/self/ns/net)
+ run readlink /proc/self/ns/net
+ hostns="$output"
+ run_buildah build $WITH_POLICY_JSON -t source -f $BUDFILES/inline-network/Dockerfile1
+ expect_output --from="${lines[8]}" "${hostns}"
+}
+
+@test "build with inline RUN --network=none" {
+ run_buildah 1 build $WITH_POLICY_JSON -t source -f $BUDFILES/inline-network/Dockerfile2
+ expect_output --substring "wget: bad address"
+}
+
+@test "build with inline RUN --network=fake" {
+ run_buildah 125 build $WITH_POLICY_JSON -t source -f $BUDFILES/inline-network/Dockerfile3
+ expect_output --substring "unsupported value"
+}
+
+@test "build with inline default RUN --network=default" {
+ skip_if_chroot
+ _prefetch alpine
+ run readlink /proc/self/ns/net
+ hostns=$output
+ run_buildah build --network=host $WITH_POLICY_JSON -t source -f $BUDFILES/inline-network/Dockerfile4
+ firstns=${lines[2]}
+ assert "${hostns}" == "$firstns"
+ run_buildah build --network=private $WITH_POLICY_JSON -t source -f $BUDFILES/inline-network/Dockerfile4
+ secondns=${lines[2]}
+ assert "$secondns" != "$firstns"
+}
+
+
+@test "bud with ignoresymlink on default file" {
+ cat > /tmp/private_file << _EOF
+hello
+_EOF
+
+run_buildah build $WITH_POLICY_JSON -t test -f Dockerfile $BUDFILES/container-ignoresymlink
+# Default file must not point to symlink so hello should not be ignored from build context
+expect_output --substring "hello"
+
+}
+
+#Verify https://github.com/containers/buildah/issues/4342
+@test "buildkit-mount type=cache should not hang if cache is wiped in between" {
+ containerfile=$BUDFILES/cache-mount-locked/Containerfile
+ run_buildah build $WITH_POLICY_JSON --build-arg WIPE_CACHE=1 -t source -f $containerfile $BUDFILES/cache-mount-locked
+ # build should be success and must contain `hello` from `file` in last step
+ expect_output --substring "hello"
+}
+
+# Test for https://github.com/containers/buildah/pull/4295
+@test "build test warning for preconfigured TARGETARCH, TARGETOS, TARGETPLATFORM or TARGETVARIANT" {
+ _prefetch alpine
+ containerfile=$BUDFILES/platform-sets-args/Containerfile
+
+ # Containerfile must contain one or more (four, as of 2022-10) lines
+ # of the form 'ARG TARGETxxx' for each of the variables of interest.
+ local -a checkvars=($(sed -ne 's/^ARG //p' <$containerfile))
+ assert "${checkvars[*]}" != "" \
+ "INTERNAL ERROR! No 'ARG xxx' lines in $containerfile!"
+
+ # With explicit and full --platform, buildah should not warn.
+ run_buildah build $WITH_POLICY_JSON --platform linux/amd64/v2 \
+ -t source -f $containerfile
+ assert "$output" !~ "missing .* build argument" \
+ "With explicit --platform, buildah should not warn"
+
+ # Likewise with individual args
+ run_buildah build $WITH_POLICY_JSON --os linux --arch amd64 --variant v2 \
+ -t source -f $containerfile
+ assert "$output" !~ "missing .* build argument" \
+ "With explicit --os + --arch + --variant, buildah should not warn"
+
+ # FIXME FIXME FIXME: #4319: with --os only, buildah should not warn about OS
+ if false; then
+ run_buildah build $WITH_POLICY_JSON --os linux \
+ -t source -f $containerfile
+ assert "$output" !~ "missing.*TARGETOS" \
+ "With explicit --os (but no arch/variant), buildah should not warn about TARGETOS"
+ # FIXME: add --arch test too, and maybe make this cleaner
+ fi
+
+}
+
+@test "build-conflicting-isolation-chroot-and-network" {
+ _prefetch alpine
+ cat > ${TEST_SCRATCH_DIR}/Containerfile << _EOF
+FROM alpine
+RUN ping -c 1 4.2.2.2
+_EOF
+
+ run_buildah 125 build --network=none --isolation=chroot $WITH_POLICY_JSON ${TEST_SCRATCH_DIR}
+ expect_output --substring "cannot set --network other than host with --isolation chroot"
+}
+
+@test "bud with .dockerignore #1" {
+ _prefetch alpine busybox
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $BUDFILES/dockerignore/Dockerfile $BUDFILES/dockerignore
+ expect_output --substring 'building.*"COPY subdir \./".*no such file or directory'
+
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $BUDFILES/dockerignore/Dockerfile.succeed $BUDFILES/dockerignore
+
+ run_buildah from --name myctr testbud
+
+ run_buildah 1 run myctr ls -l test1.txt
+
+ run_buildah run myctr ls -l test2.txt
+
+ run_buildah 1 run myctr ls -l sub1.txt
+
+ run_buildah 1 run myctr ls -l sub2.txt
+
+ run_buildah 1 run myctr ls -l subdir/
+}
+
+@test "bud build with heredoc content" {
+ run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile .
+ expect_output --substring "print first line from heredoc"
+ expect_output --substring "print second line from heredoc"
+ expect_output --substring "Heredoc writing first file"
+ expect_output --substring "some text of first file"
+ expect_output --substring "file2 from python"
+ expect_output --substring "(your index page goes here)"
+ expect_output --substring "(robots content)"
+ expect_output --substring "(humans content)"
+ expect_output --substring "this is the output of test6 part1"
+ expect_output --substring "this is the output of test6 part2"
+ expect_output --substring "this is the output of test7 part1"
+ expect_output --substring "this is the output of test7 part2"
+ expect_output --substring "this is the output of test7 part3"
+ expect_output --substring "this is the output of test8 part1"
+ expect_output --substring "this is the output of test8 part2"
+}
+
+@test "bud build with heredoc content which is a bash file" {
+ skip_if_in_container
+ _prefetch busybox
+ run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.bash_file .
+ expect_output --substring "this is the output of test9"
+ expect_output --substring "this is the output of test10"
+}
+
+@test "bud build with heredoc verify mount leak" {
+ skip_if_in_container
+ _prefetch alpine
+ run_buildah 1 build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.verify_mount_leak .
+ expect_output --substring "this is the output of test"
+ expect_output --substring "ls: /dev/pipes: No such file or directory"
+}
+
+@test "bud with .containerignore" {
+ _prefetch alpine busybox
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $BUDFILES/containerignore/Dockerfile $BUDFILES/containerignore
+ expect_output --substring 'building.*"COPY subdir \./".*no such file or directory'
+
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $BUDFILES/containerignore/Dockerfile.succeed $BUDFILES/containerignore
+
+ run_buildah from --name myctr testbud
+
+ run_buildah 1 run myctr ls -l test1.txt
+
+ run_buildah run myctr ls -l test2.txt
+
+ run_buildah 1 run myctr ls -l sub1.txt
+
+ run_buildah 1 run myctr ls -l sub2.txt
+
+ run_buildah 1 run myctr ls -l subdir/
+}
+
+@test "bud with .dockerignore - unmatched" {
+ # Here .dockerignore contains 'unmatched', which will not match anything.
+ # Therefore everything in the subdirectory should be copied into the image.
+ #
+ # We need to do this from a tmpdir, not the original or distributed
+ # bud subdir, because of rpm: as of 2020-04-01 rpmbuild 4.16 alpha
+ # on rawhide no longer packages circular symlinks (rpm issue #1159).
+ # We used to include these symlinks in git and the rpm; now we need to
+ # set them up manually as part of test setup to be able to package tests.
+ local contextdir=${TEST_SCRATCH_DIR}/dockerignore2
+ cp -a $BUDFILES/dockerignore2 $contextdir
+
+ # Create symlinks, including bad ones
+ ln -sf subdir $contextdir/symlink
+ ln -sf circular-link $contextdir/subdir/circular-link
+ ln -sf no-such-file $contextdir/subdir/dangling-link
+
+ # Build, create a container, mount it, and list all files therein
+ run_buildah build -t testbud2 $WITH_POLICY_JSON $contextdir
+
+ run_buildah from --pull=false testbud2
+ cid=$output
+
+ run_buildah mount $cid
+ mnt=$output
+ run find $mnt -printf "%P(%l)\n"
+ filelist=$(LC_ALL=C sort <<<"$output")
+ run_buildah umount $cid
+
+ # Format is: filename, and, in parentheses, symlink target (usually empty)
+ # The list below has been painstakingly crafted; please be careful if
+ # you need to touch it (e.g. if you add new files/symlinks)
+ expect="()
+.dockerignore()
+Dockerfile()
+subdir()
+subdir/circular-link(circular-link)
+subdir/dangling-link(no-such-file)
+subdir/sub1.txt()
+subdir/subsubdir()
+subdir/subsubdir/subsub1.txt()
+symlink(subdir)"
+
+ # If this test ever fails, the 'expect' message will be almost impossible
+ # for humans to read -- sorry, I never implemented multi-line comparisons.
+ # Should this ever happen, uncomment these two lines and run tests in
+ # your own vm; then diff the two files.
+ #echo "$filelist" >${TMPDIR}/filelist.actual
+ #echo "$expect" >${TMPDIR}/filelist.expect
+
+ expect_output --from="$filelist" "$expect" "container file list"
+}
+
+@test "bud with .dockerignore #2" {
+ run_buildah 125 build -t testbud3 $WITH_POLICY_JSON $BUDFILES/dockerignore3
+ expect_output --substring 'building.*"COPY test1.txt /upload/test1.txt".*no such file or directory'
+ expect_output --substring $(realpath "$BUDFILES/dockerignore3/.dockerignore")
+}
+
+@test "bud with .dockerignore #4" {
+ run_buildah 125 build -t testbud3 $WITH_POLICY_JSON -f Dockerfile.test $BUDFILES/dockerignore4
+ expect_output --substring 'building.*"COPY test1.txt /upload/test1.txt".*no such file or directory'
+ expect_output --substring '1 filtered out using /[^ ]*/Dockerfile.test.dockerignore'
+}
+
+@test "bud with .dockerignore #6" {
+ _prefetch alpine busybox
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $BUDFILES/dockerignore6/Dockerfile $BUDFILES/dockerignore6
+ expect_output --substring 'building.*"COPY subdir \./".*no such file or directory'
+
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $BUDFILES/dockerignore6/Dockerfile.succeed $BUDFILES/dockerignore6
+
+ run_buildah from --name myctr testbud
+
+ run_buildah 1 run myctr ls -l test1.txt
+
+ run_buildah run myctr ls -l test2.txt
+
+ run_buildah 1 run myctr ls -l sub1.txt
+
+ run_buildah 1 run myctr ls -l sub2.txt
+
+ run_buildah 1 run myctr ls -l subdir/
+}
+
+@test "build with --platform without OS" {
+ run_buildah info --format '{{.host.arch}}'
+ myarch="$output"
+
+ run_buildah build --platform $myarch $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile
+ expect_output --substring "This is built for $myarch"
+
+ ## podman-remote binding has a bug where is sends `--platform as /`
+ run_buildah build --platform "/" $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile
+ expect_output --substring "This is built for $myarch"
+}
+
+@test "build with basename resolving default arg" {
+ run_buildah info --format '{{.host.arch}}'
+ myarch="$output"
+ run_buildah info --format '{{.host.variant}}'
+ myvariant="$output"
+
+ run_buildah build --platform linux/$myarch/$myvariant $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile
+ expect_output --substring "This is built for $myarch"
+}
+
+@test "build with basename resolving user arg" {
+ run_buildah build --build-arg CUSTOM_TARGET=first $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for first"
+ run_buildah build --build-arg CUSTOM_TARGET=second $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for second"
+}
+
+@test "build with basename resolving user arg from file" {
+ run_buildah build \
+ --build-arg-file $BUDFILES/base-with-arg/first.args \
+ $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for first"
+
+ run_buildah build \
+ --build-arg-file $BUDFILES/base-with-arg/second.args \
+ $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for second"
+}
+
+@test "build with basename resolving user arg from latest file in arg list" {
+ run_buildah build \
+ --build-arg-file $BUDFILES/base-with-arg/second.args \
+ --build-arg-file $BUDFILES/base-with-arg/first.args \
+ $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for first"
+}
+
+@test "build with basename resolving user arg from in arg list" {
+ run_buildah build \
+ --build-arg-file $BUDFILES/base-with-arg/second.args \
+ --build-arg CUSTOM_TARGET=first \
+ $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfile2
+ expect_output --substring "This is built for first"
+}
+
+# Following test should fail since we are trying to use build-arg which
+# was not declared. Honors discussion here: https://github.com/containers/buildah/pull/4061/commits/1237c04d6ae0ee1f027a1f02bf3ab5c57ac7d9b6#r906188374
+@test "build with basename resolving user arg - should fail" {
+ run_buildah 125 build --build-arg CUSTOM_TARGET=first $WITH_POLICY_JSON -t test -f $BUDFILES/base-with-arg/Containerfilebad
+ expect_output --substring "invalid reference format"
+}
+
+# Try building with arch and variant
+# Issue: https://github.com/containers/buildah/issues/4276
+@test "build-with-inline-platform-and-variant" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ cat > $contextdir/Dockerfile << _EOF
+FROM --platform=freebsd/arm64/v8 scratch
+COPY . .
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t test $contextdir
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' test
+ expect_output --substring "arm64"
+ run_buildah inspect --format '{{ .OCIv1.Variant }}' test
+ expect_output --substring "v8"
+}
+
+# Following test must fail since we are trying to run linux/arm64 on linux/amd64
+# Issue: https://github.com/containers/buildah/issues/3712
+@test "build-with-inline-platform" {
+ # Host arch
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ run_buildah info --format '{{.host.arch}}'
+ myarch="$output"
+ otherarch="arm64"
+
+ # just make sure that other arch is not equivalent to host arch
+ if [[ "$otherarch" == "$myarch" ]]; then
+ otherarch="amd64"
+ fi
+ # ...create a Containerfile with --platform=linux/$otherarch
+ cat > $contextdir/Dockerfile << _EOF
+FROM --platform=linux/${otherarch} alpine
+RUN uname -m
+_EOF
+
+ run_buildah '?' build $WITH_POLICY_JSON -t test $contextdir
+ if [[ $status -eq 0 ]]; then
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' test
+ expect_output --substring "$otherarch"
+ else
+ # Build failed: we DO NOT have qemu-user-static installed.
+ expect_output --substring "format error"
+ fi
+}
+
+@test "build-with-inline-platform-and-rely-on-defaultbuiltinargs" {
+ # Get host arch
+ run_buildah info --format '{{.host.arch}}'
+ myarch="$output"
+ otherarch="arm64"
+ # just make sure that other arch is not equivalent to host arch
+ if [[ "$otherarch" == "$myarch" ]]; then
+ otherarch="amd64"
+ fi
+
+ run_buildah build --platform linux/$otherarch $WITH_POLICY_JSON -t test -f $BUDFILES/multiarch/Dockerfile.built-in-args
+ expect_output --substring "I'm compiling for linux/$otherarch"
+ expect_output --substring "and tagging for linux/$otherarch"
+ expect_output --substring "and OS linux"
+ expect_output --substring "and ARCH $otherarch"
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' test
+ expect_output --substring "$otherarch"
+}
+
+# Buildkit parity: this verifies if we honor custom overrides of TARGETOS, TARGETVARIANT, TARGETARCH and TARGETPLATFORM if user wants
+@test "build-with-inline-platform-and-rely-on-defaultbuiltinargs-check-custom-override" {
+ run_buildah build --platform linux/arm64 $WITH_POLICY_JSON --build-arg TARGETOS=android -t test -f $BUDFILES/multiarch/Dockerfile.built-in-args
+ expect_output --substring "I'm compiling for linux/arm64"
+ expect_output --substring "and tagging for linux/arm64"
+ ## Note since we used --build-arg and overrode OS, OS must be android
+ expect_output --substring "and OS android"
+ expect_output --substring "and ARCH $otherarch"
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' test
+ expect_output --substring "$otherarch"
+}
+
+# Following test must pass since we want to tag image as host arch
+# Test for use-case described here: https://github.com/containers/buildah/issues/3261
+@test "build-with-inline-platform-amd-but-tag-as-arm" {
+ # Host arch
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ run_buildah info --format '{{.host.arch}}'
+ myarch="$output"
+ targetarch="arm64"
+
+ if [[ "$targetArch" == "$myarch" ]]; then
+ targetarch="amd64"
+ fi
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM --platform=linux/${myarch} alpine
+RUN uname -m
+_EOF
+
+ # Tries building image where baseImage has --platform=linux/HostArch
+ run_buildah build --platform linux/${targetarch} $WITH_POLICY_JSON -t test $contextdir
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' test
+ # base image is pulled as HostArch but tagged as non host arch
+ expect_output --substring $targetarch
+}
+
+# Test build with --add-history=false
+@test "build-with-omit-history-to-true should not add history" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN echo hello
+RUN echo world
+_EOF
+
+ # Built image must not contain history for the layers which we have just built.
+ run_buildah build $WITH_POLICY_JSON --omit-history -t source -f $contextdir/Dockerfile1
+ run_buildah inspect --format "{{index .Docker.History}}" source
+ expect_output "[]"
+ run_buildah inspect --format "{{index .OCIv1.History}}" source
+ expect_output "[]"
+ run_buildah inspect --format "{{index .History}}" source
+ expect_output "[]"
+}
+
+# Test building with --userns=auto
+@test "build with --userns=auto also with size" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ user=$USER
+
+ if [[ "$user" == "root" ]]; then
+ user="containers"
+ fi
+
+ if ! grep -q $user "/etc/subuid"; then
+ skip "cannot find mappings for the current user"
+ fi
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN cat /proc/self/uid_map
+RUN echo hello
+
+FROM alpine
+COPY --from=0 /tmp /tmp
+RUN cat /proc/self/uid_map
+RUN ls -a
+_EOF
+
+ run_buildah build --userns=auto $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "1024"
+ run_buildah build --userns=auto:size=500 $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "500"
+}
+
+# Test building with --userns=auto with uidmapping
+@test "build with --userns=auto with uidmapping" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ user=$USER
+
+ if [[ "$user" == "root" ]]; then
+ user="containers"
+ fi
+
+ if ! grep -q $user "/etc/subuid"; then
+ skip "cannot find mappings for the current user"
+ fi
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN cat /proc/self/uid_map
+_EOF
+
+ run_buildah build --userns=auto:size=8192,uidmapping=0:0:1 $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "8191"
+ run_buildah build --userns=auto:uidmapping=0:0:1 $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring " 0 0 1"
+}
+
+# Test building with --userns=auto with gidmapping
+@test "build with --userns=auto with gidmapping" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ user=$USER
+
+ if [[ "$user" == "root" ]]; then
+ user="containers"
+ fi
+
+ if ! grep -q $user "/etc/subuid"; then
+ skip "cannot find mappings for the current user"
+ fi
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN cat /proc/self/gid_map
+_EOF
+
+ run_buildah build --userns=auto:size=8192,gidmapping=0:0:1 $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "8191"
+ run_buildah build --userns=auto:gidmapping=0:0:1 $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring " 0 0 1"
+}
+
+# Test bud with prestart hook
+@test "build-test with OCI prestart hook" {
+ skip_if_in_container # This works in privileged container setup but does not works in CI setup
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir/hooks
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo hello
+_EOF
+
+ cat > $contextdir/hooks/test.json << _EOF
+{
+ "version": "1.0.0",
+ "hook": {
+ "path": "$contextdir/hooks/test"
+ },
+ "when": {
+ "always": true
+ },
+ "stages": ["prestart"]
+}
+_EOF
+
+ cat > $contextdir/hooks/test << _EOF
+#!/bin/sh
+echo from-hook > $contextdir/hooks/hook-output
+_EOF
+
+ # make actual hook executable
+ chmod +x $contextdir/hooks/test
+ run_buildah build $WITH_POLICY_JSON -t source --hooks-dir=$contextdir/hooks -f $contextdir/Dockerfile
+ run cat $contextdir/hooks/hook-output
+ expect_output --substring "from-hook"
+}
+
+@test "build with add resolving to invalid HTTP status code" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+ADD https://google.com/test /
+_EOF
+
+ run_buildah 125 build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "invalid response status"
+}
+
+@test "build test has gid in supplemental groups" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -t source -f $BUDFILES/supplemental-groups/Dockerfile
+ # gid 1000 must be in supplemental groups
+ expect_output --substring "Groups: 1000"
+}
+
+@test "build test if supplemental groups has gid with --isolation chroot" {
+ test -z "${BUILDAH_ISOLATION}" || skip "BUILDAH_ISOLATION=${BUILDAH_ISOLATION} overrides --isolation"
+
+ _prefetch alpine
+ run_buildah build --isolation chroot $WITH_POLICY_JSON -t source -f $BUDFILES/supplemental-groups/Dockerfile
+ # gid 1000 must be in supplemental groups
+ expect_output --substring "Groups: 1000"
+}
+
+@test "build-test --mount=type=secret test relative to workdir mount" {
+ local contextdir=$BUDFILES/secret-relative
+ run_buildah build $WITH_POLICY_JSON --no-cache --secret id=secret-foo,src=$contextdir/secret1.txt --secret id=secret-bar,src=$contextdir/secret2.txt -t test -f $contextdir/Dockerfile
+ expect_output --substring "secret:foo"
+ expect_output --substring "secret:bar"
+}
+
+@test "build-test --mount=type=cache test relative to workdir mount" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ ## write-cache
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN mkdir test
+WORKDIR test
+RUN --mount=type=cache,id=YfHI60aApFM-target,target=target echo world > /test/target/hello
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN mkdir test
+WORKDIR test
+RUN --mount=type=cache,id=YfHI60aApFM-target,target=target cat /test/target/hello
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "world"
+}
+
+@test "build-test do not use mount stage from cache if it was rebuilt" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine as dependencies
+
+RUN mkdir /build && echo v1 > /build/version
+
+FROM alpine
+
+RUN --mount=type=bind,source=/build,target=/build,from=dependencies \
+ cp /build/version /version
+
+RUN cat /version
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile
+ run_buildah build $WITH_POLICY_JSON --layers -t source2 -f $contextdir/Dockerfile
+ expect_output --substring "Using cache"
+
+ # First stage i.e dependencies is changed so it should not use the steps in second stage from
+ # cache
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine as dependencies
+
+RUN mkdir /build && echo v2 > /build/version
+
+FROM alpine
+
+RUN --mount=type=bind,source=/build,target=/build,from=dependencies \
+ cp /build/version /version
+
+RUN cat /version
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON --layers -t source3 -f $contextdir/Dockerfile
+ assert "$output" !~ "Using cache"
+
+}
+
+# Verify: https://github.com/containers/buildah/issues/4572
+@test "build-test verify no dangling containers are left" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine AS alpine_builder
+FROM busybox AS busybox_builder
+FROM scratch
+COPY --from=alpine_builder /etc/alpine* .
+COPY --from=busybox_builder /bin/busybox /bin/busybox
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ # No leftover containers, just the header line.
+ run_buildah containers
+ expect_line_count 1
+}
+
+# Verify: https://github.com/containers/buildah/issues/4485
+# Verify: https://github.com/containers/buildah/issues/4319
+@test "No default warning for TARGETARCH, TARGETOS, TARGETPLATFORM " {
+ local contextdir=$BUDFILES/targetarch
+
+ run_buildah build $WITH_POLICY_JSON --platform=linux/amd64,linux/arm64 -f $contextdir/Dockerfile
+ assert "$output" !~ "one or more build args were not consumed" \
+ "No warning for default args should be there"
+
+ run_buildah build $WITH_POLICY_JSON --os linux -f $contextdir/Dockerfile
+ assert "$output" !~ "Try adding" \
+ "No Warning for default args should be there"
+}
+
+
+@test "build-test skipping unwanted stages with --skip-unused-stages=false and --skip-unused-stages=true" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "first unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM one
+RUN echo "target stage"
+_EOF
+
+ # with --skip-unused-stages=false
+ run_buildah build $WITH_POLICY_JSON --skip-unused-stages=false -t source -f $contextdir/Dockerfile
+ expect_output --substring "needed stage"
+ expect_output --substring "target stage"
+ # this is expected since user specified `--skip-unused-stages=false`
+ expect_output --substring "first unwanted stage"
+ expect_output --substring "another unwanted stage"
+
+ # with --skip-unused-stages=true
+ run_buildah build $WITH_POLICY_JSON --skip-unused-stages=true -t source -f $contextdir/Dockerfile
+ expect_output --substring "needed stage"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+@test "build-test: do not warn for instructions declared in unused stages" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "first unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+
+FROM alpine
+ARG FOO_BAR
+RUN echo "another unwanted stage"
+
+FROM one
+RUN echo "target stage"
+_EOF
+
+ # with --skip-unused-stages=true no warning should be printed since ARG is decalred in stage which is not used
+ run_buildah build $WITH_POLICY_JSON --skip-unused-stages=true -t source -f $contextdir/Dockerfile
+ expect_output --substring "needed stage"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+ # must not contain warning "missing FOO_BAR"
+ assert "$output" !~ "missing"
+
+ # with --skip-unused-stages=false should print unwanted stage as well as warning for unused arg
+ run_buildah build $WITH_POLICY_JSON --skip-unused-stages=false -t source -f $contextdir/Dockerfile
+ expect_output --substring "needed stage"
+ expect_output --substring "target stage"
+ expect_output --substring "unwanted stage"
+ expect_output --substring "missing"
+}
+
+# Test skipping images with FROM
+@test "build-test skipping unwanted stages with FROM" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM one
+RUN echo "target stage"
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile
+ expect_output --substring "needed stage"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+# Note: Please skip this tests in case of podman-remote build
+@test "build: test race in updating image name while performing parallel commits" {
+ _prefetch alpine
+ # Run 25 parallel builds using the same Containerfile
+ local count=25
+ for i in $(seq --format '%02g' 1 $count); do
+ timeout --foreground -v --kill=10 300 \
+ ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} $WITH_POLICY_JSON build --quiet --squash --iidfile ${TEST_SCRATCH_DIR}/id.$i --timestamp 0 -f $BUDFILES/check-race/Containerfile >/dev/null &
+ done
+ # Wait for all background builds to complete. Note that this succeeds
+ # even if some of the individual builds fail! Our actual test is below.
+ wait
+ # Number of output bytes must be always same, which confirms that there is no race.
+ assert "$(cat ${TEST_SCRATCH_DIR}/id.* | wc -c)" = 1775 "Total chars in all id.* files"
+ # clean all images built for this test
+ run_buildah rmi --all -f
+}
+
+# Test skipping images with FROM but stage name also conflicts with additional build context
+# so selected stage should be still skipped since it is not being actually used by additional build
+# context is being used.
+@test "build-test skipping unwanted stages with FROM and conflict with additional build context" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ # add file on original context
+ echo something > $contextdir/somefile
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine as one
+RUN echo "unwanted stage"
+RUN echo "from stage unwanted stage"
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM alpine
+COPY --from=one somefile .
+RUN cat somefile
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON --build-context one=$contextdir -t source -f $contextdir/Dockerfile
+ expect_output --substring "something"
+ assert "$output" !~ "unwanted stage"
+}
+
+# Test skipping unwanted stage with COPY from stage name
+@test "build-test skipping unwanted stages with COPY from stage name" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+COPY somefile file
+
+FROM alpine
+COPY --from=one file .
+RUN cat file
+RUN echo "target stage"
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "needed stage"
+ expect_output --substring "something"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+@test "build test --retry and --retry-delay" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo hello
+_EOF
+
+ run_buildah --log-level debug build --retry 4 --retry-delay 5s $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "Setting MaxPullPushRetries to 4 and PullPushRetryDelay to 5s"
+}
+
+# Test skipping unwanted stage with COPY from stage index
+@test "build-test skipping unwanted stages with COPY from stage index" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine
+RUN echo "needed stage"
+COPY somefile file
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM alpine
+COPY --from=1 file .
+RUN cat file
+RUN echo "target stage"
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "needed stage"
+ expect_output --substring "something"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+# Test if our cache is working in optimal way for COPY use case
+@test "build test optimal cache working for COPY instruction" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+COPY somefile .
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile $contextdir
+ # Run again and verify if we hit cache in first pass
+ run_buildah --log-level debug build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "Found a cache hit in the first iteration"
+}
+
+# Test if our cache is working in optimal way for ADD use case
+@test "build test optimal cache working for ADD instruction" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+ADD somefile .
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile $contextdir
+ # Run again and verify if we hit cache in first pass
+ run_buildah --log-level debug build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "Found a cache hit in the first iteration"
+}
+
+# Test skipping unwanted stage with --mount from another stage
+@test "build-test skipping unwanted stages with --mount from stagename" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+COPY somefile file
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM alpine
+RUN --mount=type=bind,from=one,target=/test cat /test/file
+RUN echo "target stage"
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "needed stage"
+ expect_output --substring "something"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+# Test skipping unwanted stage with --mount from another stage
+@test "build-test skipping unwanted stages with --mount from stagename with flag order changed" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ echo something > $contextdir/somefile
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN echo "unwanted stage"
+
+FROM alpine as one
+RUN echo "needed stage"
+COPY somefile file
+
+FROM alpine
+RUN echo "another unwanted stage"
+
+FROM alpine
+RUN --mount=from=one,target=/test,type=bind cat /test/file
+RUN echo "target stage"
+_EOF
+
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile $contextdir
+ expect_output --substring "needed stage"
+ expect_output --substring "something"
+ expect_output --substring "target stage"
+ assert "$output" !~ "unwanted stage"
+}
+
+# Test pinning image using additional build context
+@test "build-with-additional-build-context and COPY, test pinning image" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world > hello
+_EOF
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine
+COPY --from=busybox hello .
+RUN cat hello
+_EOF
+
+ # Build a first image which we can use as source
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile1
+ # Pin upstream busybox to local image source
+ run_buildah build $WITH_POLICY_JSON --build-context busybox=docker://source -t test -f $contextdir/Dockerfile2
+ expect_output --substring "world"
+}
+
+# Test conflict between stage short name and additional-context conflict
+# Buildkit parity give priority to additional-context over stage names.
+@test "build-with-additional-build-context and COPY, stagename and additional-context conflict" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world > hello
+_EOF
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo world
+
+# hello should get copied since we are giving priority to additional context
+COPY --from=some-stage hello .
+RUN cat hello
+_EOF
+
+ # Build a first image which we can use as source
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile1
+ # Pin upstream busybox to local image source
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=docker://source -t test -f $contextdir/Dockerfile2
+ expect_output --substring "world"
+}
+
+# When numeric index of stage is used and stage exists but additional context also exist with name
+# same as stage in such situations always use additional context.
+@test "build-with-additional-build-context and COPY, additionalContext and numeric value of stage" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo override-numeric > hello
+_EOF
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo world > hello
+
+# hello should get copied since we are accessing stage from its numeric value and not
+# additional build context where some-stage is docker://alpine
+FROM alpine
+COPY --from=0 hello .
+RUN cat hello
+_EOF
+
+ # Build a first image which we can use as source
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile1
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=docker://source -t test -f $contextdir/Dockerfile2
+ expect_output --substring "override-numeric"
+}
+
+# Test conflict between stage short name and additional-context conflict on FROM
+# Buildkit parity give priority to additional-context over stage names.
+@test "build-with-additional-build-context and FROM, stagename and additional-context conflict" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world > hello
+_EOF
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo world
+
+# hello should be there since we are giving priority to additional context
+FROM some-stage
+RUN cat hello
+_EOF
+
+ # Build a first image which we can use as source
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile1
+ # Second FROM should choose base as `source` instead of local-stage named `some-stage`.
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=docker://source -t test -f $contextdir/Dockerfile2
+ expect_output --substring "world"
+}
+
+# Test adding additional build context
+@test "build-with-additional-build-context and COPY, additional context from host" {
+ local contextdir1=${TEST_SCRATCH_DIR}/bud/platform
+ local contextdir2=${TEST_SCRATCH_DIR}/bud/platform2
+ mkdir -p $contextdir1 $contextdir2
+
+ # add file on original context
+ echo something > $contextdir1/somefile
+ # add file on additional context
+ echo hello_world > $contextdir2/hello
+
+ cat > $contextdir1/Dockerfile << _EOF
+FROM alpine
+COPY somefile .
+RUN cat somefile
+COPY --from=context2 hello .
+RUN cat hello
+_EOF
+
+ # Test additional context
+ run_buildah build $WITH_POLICY_JSON -t source --build-context context2=$contextdir2 $contextdir1
+ expect_output --substring "something"
+ expect_output --substring "hello_world"
+}
+
+# Test adding additional build context but download tar
+@test "build-with-additional-build-context and COPY, additional context from external URL" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+COPY --from=crun-context . .
+RUN ls crun-1.4.5
+_EOF
+
+ # Test additional context but download from tar
+ run_buildah build $WITH_POLICY_JSON -t source --build-context crun-context=https://github.com/containers/crun/releases/download/1.4.5/crun-1.4.5.tar.xz $contextdir
+ # additional context from tar must show crun binary inside container
+ expect_output --substring "libcrun"
+}
+
+# Test pinning image
+@test "build-with-additional-build-context and FROM, pin busybox to alpine" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM busybox
+RUN ls /etc/*release
+_EOF
+
+ # Test additional context but download from tar
+ # We are pinning busybox to alpine so we must always pull alpine and use that
+ run_buildah build $WITH_POLICY_JSON -t source --build-context busybox=docker://alpine $contextdir
+ # We successfully pinned binary cause otherwise busybox should not contain alpine-release binary
+ expect_output --substring "alpine-release"
+}
+
+# Test usage of RUN --mount=from=<name> with additional context and also test conflict with stage-name
+@test "build-with-additional-build-context and RUN --mount=from=, additional-context and also test conflict with stagename" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world > hello
+_EOF
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo something_random
+
+# hello should get copied since we are giving priority to additional context
+FROM alpine
+RUN --mount=type=bind,from=some-stage,target=/test cat /test/hello
+_EOF
+
+ # Build a first image which we can use as source
+ run_buildah build $WITH_POLICY_JSON -t source -f $contextdir/Dockerfile1
+ # Additional Context for RUN --mount is additional image and it should not conflict with stage
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=docker://source -t test -f $contextdir/Dockerfile2
+ expect_output --substring "world"
+}
+
+# Test usage of RUN --mount=from=<name> with additional context and also test conflict with stage-name, when additionalContext is on host
+@test "build-with-additional-build-context and RUN --mount=from=, additional-context not image and also test conflict with stagename" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+ echo world > $contextdir/hello
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo some_text
+
+# hello should get copied since we are giving priority to additional context
+FROM alpine
+RUN --mount=type=bind,from=some-stage,target=/test,z cat /test/hello
+_EOF
+
+ # Additional context for RUN --mount is file on host
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=$contextdir -t test -f $contextdir/Dockerfile2
+ expect_output --substring "world"
+}
+
+# Test usage of RUN --mount=from=<name> with additional context is URL and mount source is relative using src
+@test "build-with-additional-build-context and RUN --mount=from=, additional-context is URL and mounted from subdir" {
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile2 << _EOF
+FROM alpine as some-stage
+RUN echo world
+
+# hello should get copied since we are giving priority to additional context
+FROM alpine
+RUN --mount=type=bind,src=crun-1.4.5/src,from=some-stage,target=/test,z ls /test
+_EOF
+
+ # Additional context for RUN --mount is file on host
+ run_buildah build $WITH_POLICY_JSON --build-context some-stage=https://github.com/containers/crun/releases/download/1.4.5/crun-1.4.5.tar.xz -t test -f $contextdir/Dockerfile2
+ expect_output --substring "crun.c"
+}
+
+@test "build-with-additional-build-context and COPY, ensure .containerignore is being respected" {
+ local additionalcontextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $additionalcontextdir
+ touch $additionalcontextdir/hello
+ cat > $additionalcontextdir/.containerignore << _EOF
+hello
+_EOF
+
+ cat > $additionalcontextdir/Containerfile << _EOF
+FROM alpine
+RUN echo world
+
+# hello should not be available since
+# it's excluded as per the additional
+# build context's .containerignore file
+COPY --from=project hello .
+RUN cat hello
+_EOF
+
+ run_buildah 125 build $WITH_POLICY_JSON --build-context project=$additionalcontextdir -t test -f $additionalcontextdir/Containerfile
+ expect_output --substring "COPY --from=project hello .\": no items matching glob"
+}
+
+@test "bud with --layers and --no-cache flags" {
+ local contextdir=${TEST_SCRATCH_DIR}/use-layers
+ cp -a $BUDFILES/use-layers $contextdir
+
+ # Run with --pull-always to have a regression test for
+ # containers/podman/issues/10307.
+ run_buildah build --pull-always $WITH_POLICY_JSON --layers -t test1 $contextdir
+ run_buildah images -a
+ expect_line_count 8
+
+ run_buildah build --pull-never $WITH_POLICY_JSON --layers -t test2 $contextdir
+ run_buildah images -a
+ expect_line_count 10
+ run_buildah inspect --format "{{index .Docker.ContainerConfig.Env 1}}" test1
+ expect_output "foo=bar"
+ run_buildah inspect --format "{{index .Docker.ContainerConfig.Env 1}}" test2
+ expect_output "foo=bar"
+ run_buildah inspect --format "{{.Docker.ContainerConfig.ExposedPorts}}" test1
+ expect_output "map[8080/tcp:{}]"
+ run_buildah inspect --format "{{.Docker.ContainerConfig.ExposedPorts}}" test2
+ expect_output "map[8080/tcp:{}]"
+ run_buildah inspect --format "{{index .Docker.History 2}}" test1
+ expect_output --substring "FROM docker.io/library/alpine:latest"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test3 -f Dockerfile.2 $contextdir
+ run_buildah images -a
+ expect_line_count 12
+
+ mkdir -p $contextdir/mount/subdir
+ run_buildah build $WITH_POLICY_JSON --layers -t test4 -f Dockerfile.3 $contextdir
+ run_buildah images -a
+ expect_line_count 14
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test5 -f Dockerfile.3 $contextdir
+ run_buildah images -a
+ expect_line_count 15
+
+ touch $contextdir/mount/subdir/file.txt
+ run_buildah build $WITH_POLICY_JSON --layers -t test6 -f Dockerfile.3 $contextdir
+ run_buildah images -a
+ expect_line_count 17
+
+ run_buildah build $WITH_POLICY_JSON --no-cache -t test7 -f Dockerfile.2 $contextdir
+ run_buildah images -a
+ expect_line_count 18
+}
+
+@test "bud with --layers and single and two line Dockerfiles" {
+ _prefetch alpine
+ run_buildah inspect --format "{{.FromImageDigest}}" alpine
+ fromDigest="$output"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test -f Dockerfile.5 $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 3
+
+ # Also check for base-image annotations.
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' test
+ expect_output "$fromDigest" "base digest from alpine"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' test
+ expect_output "docker.io/library/alpine:latest" "base name from alpine"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 -f Dockerfile.6 $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 4
+
+ # Note that the base-image annotations are empty here since a Container with
+ # a single FROM line is effectively just a tag and it does not create a new
+ # image.
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' test1
+ expect_output "" "base digest from alpine"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' test1
+ expect_output "" "base name from alpine"
+}
+
+@test "bud with --layers, multistage, and COPY with --from" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/use-layers
+ cp -a $BUDFILES/use-layers $contextdir
+
+ mkdir -p $contextdir/uuid
+ uuidgen > $contextdir/uuid/data
+ mkdir -p $contextdir/date
+ date > $contextdir/date/data
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 -f Dockerfile.multistage-copy $contextdir
+ run_buildah images -a
+ expect_line_count 6
+ # The second time through, the layers should all get reused.
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 -f Dockerfile.multistage-copy $contextdir
+ run_buildah images -a
+ expect_line_count 6
+ # The third time through, the layers should all get reused, but we'll have a new line of output for the new name.
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test2 -f Dockerfile.multistage-copy $contextdir
+ run_buildah images -a
+ expect_line_count 7
+
+ # Both interim images will be different, and all of the layers in the final image will be different.
+ uuidgen > $contextdir/uuid/data
+ date > $contextdir/date/data
+ run_buildah build $WITH_POLICY_JSON --layers -t test3 -f Dockerfile.multistage-copy $contextdir
+ run_buildah images -a
+ expect_line_count 11
+ # No leftover containers, just the header line.
+ run_buildah containers
+ expect_line_count 1
+
+ run_buildah from --quiet $WITH_POLICY_JSON test3
+ ctr=$output
+ run_buildah mount ${ctr}
+ mnt=$output
+ test -e $mnt/uuid
+ test -e $mnt/date
+
+ # Layers won't get reused because this build won't use caching.
+ run_buildah build $WITH_POLICY_JSON -t test4 -f Dockerfile.multistage-copy $contextdir
+ run_buildah images -a
+ expect_line_count 12
+}
+
+@test "bud-multistage-partial-cache" {
+ _prefetch alpine
+ target=foo
+ # build the first stage
+ run_buildah build $WITH_POLICY_JSON --layers -f $BUDFILES/cache-stages/Dockerfile.1 $BUDFILES/cache-stages
+ # expect alpine + 1 image record for the first stage
+ run_buildah images -a
+ expect_line_count 3
+ # build the second stage, itself not cached, when the first stage is found in the cache
+ run_buildah build $WITH_POLICY_JSON --layers -f $BUDFILES/cache-stages/Dockerfile.2 -t ${target} $BUDFILES/cache-stages
+ # expect alpine + 1 image record for the first stage, then two more image records for the second stage
+ run_buildah images -a
+ expect_line_count 5
+}
+
+@test "bud-multistage-copy-final-slash" {
+ _prefetch busybox
+ target=foo
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/dest-final-slash
+ run_buildah from --pull=false $WITH_POLICY_JSON ${target}
+ cid="$output"
+ run_buildah run ${cid} /test/ls -lR /test/ls
+}
+
+@test "bud-multistage-reused" {
+ _prefetch alpine busybox
+ run_buildah inspect --format "{{.FromImageDigest}}" busybox
+ fromDigest="$output"
+
+ target=foo
+
+ # Check the base-image annotations in a single-layer build where the last stage is just an earlier stage.
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.reused $BUDFILES/multi-stage-builds
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$fromDigest" "base digest from busybox"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' ${target}
+ expect_output "docker.io/library/busybox:latest" "base name from busybox"
+
+ run_buildah from $WITH_POLICY_JSON ${target}
+ run_buildah rmi -f ${target}
+
+ # Check the base-image annotations in a multi-layer build where the last stage is just an earlier stage.
+ run_buildah build $WITH_POLICY_JSON -t ${target} --layers -f $BUDFILES/multi-stage-builds/Dockerfile.reused $BUDFILES/multi-stage-builds
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$fromDigest" "base digest from busybox"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' ${target}
+ expect_output "docker.io/library/busybox:latest" "base name from busybox"
+
+ run_buildah from $WITH_POLICY_JSON ${target}
+ run_buildah rmi -f ${target}
+
+ # Check the base-image annotations in a single-layer build where the last stage is based on an earlier stage.
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.reused2 $BUDFILES/multi-stage-builds
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$fromDigest" "base digest from busybox"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' ${target}
+ expect_output "docker.io/library/busybox:latest" "base name from busybox"
+
+ run_buildah from $WITH_POLICY_JSON ${target}
+ run_buildah rmi -f ${target}
+
+ # Check the base-image annotations in a multi-layer build where the last stage is based on an earlier stage.
+ run_buildah build $WITH_POLICY_JSON -t ${target} --layers -f $BUDFILES/multi-stage-builds/Dockerfile.reused2 $BUDFILES/multi-stage-builds
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$fromDigest" "base digest from busybox"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' ${target}
+ expect_output "docker.io/library/busybox:latest" "base name from busybox"
+
+ run_buildah from $WITH_POLICY_JSON ${target}
+ run_buildah rmi -f ${target}
+}
+
+@test "bud-multistage-cache" {
+ _prefetch alpine busybox
+ target=foo
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.extended $BUDFILES/multi-stage-builds
+ run_buildah from $WITH_POLICY_JSON ${target}
+ cid="$output"
+ run_buildah mount "$cid"
+ root="$output"
+ # cache should have used this one
+ test -r "$root"/tmp/preCommit
+ # cache should not have used this one
+ ! test -r "$root"/tmp/postCommit
+}
+
+@test "bud-multistage-pull-always" {
+ _prefetch busybox
+ run_buildah build --pull-always $WITH_POLICY_JSON -f $BUDFILES/multi-stage-builds/Dockerfile.extended $BUDFILES/multi-stage-builds
+}
+
+@test "bud with --layers and symlink file" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/use-layers
+ cp -a $BUDFILES/use-layers $contextdir
+ echo 'echo "Hello World!"' > $contextdir/hello.sh
+ ln -s hello.sh $contextdir/hello_world.sh
+ run_buildah build $WITH_POLICY_JSON --layers -t test -f Dockerfile.4 $contextdir
+ run_buildah images -a
+ expect_line_count 4
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 -f Dockerfile.4 $contextdir
+ run_buildah images -a
+ expect_line_count 5
+
+ echo 'echo "Hello Cache!"' > $contextdir/hello.sh
+ run_buildah build $WITH_POLICY_JSON --layers -t test2 -f Dockerfile.4 $contextdir
+ run_buildah images -a
+ expect_line_count 7
+}
+
+@test "bud with --layers and dangling symlink" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/use-layers
+ cp -a $BUDFILES/use-layers $contextdir
+ mkdir $contextdir/blah
+ ln -s ${TEST_SOURCES}/policy.json $contextdir/blah/policy.json
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test -f Dockerfile.dangling-symlink $contextdir
+ run_buildah images -a
+ expect_line_count 3
+
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 -f Dockerfile.dangling-symlink $contextdir
+ run_buildah images -a
+ expect_line_count 4
+
+ run_buildah from --quiet $WITH_POLICY_JSON test
+ cid=$output
+ run_buildah run $cid ls /tmp
+ expect_output "policy.json"
+}
+
+@test "bud with --layers and --build-args" {
+ _prefetch alpine
+ # base plus 3, plus the header line
+ run_buildah build $WITH_POLICY_JSON --build-arg=user=0 --layers -t test -f Dockerfile.build-args $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 5
+
+ # running the same build again does not run the commands again
+ run_buildah build $WITH_POLICY_JSON --build-arg=user=0 --layers -t test -f Dockerfile.build-args $BUDFILES/use-layers
+ if [[ "$output" =~ "MAo=" ]]; then
+ # MAo= is the base64 of "0\n" (i.e. `echo 0`)
+ printf "Expected command not to run again if layer is cached\n" >&2
+ false
+ fi
+
+ # two more, starting at the "echo $user | base64" instruction
+ run_buildah build $WITH_POLICY_JSON --build-arg=user=1 --layers -t test1 -f Dockerfile.build-args $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 7
+
+ # one more, because we added a new name to the same image
+ run_buildah build $WITH_POLICY_JSON --build-arg=user=1 --layers -t test2 -f Dockerfile.build-args $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 8
+
+ # two more, starting at the "echo $user | base64" instruction
+ run_buildah build $WITH_POLICY_JSON --layers -t test3 -f Dockerfile.build-args $BUDFILES/use-layers
+ run_buildah images -a
+ expect_line_count 11
+}
+
+
+@test "bud with --layers and --build-args: override ARG with ENV and image must be cached" {
+ _prefetch alpine
+ #when ARG is overridden by config
+ run_buildah build $WITH_POLICY_JSON --build-arg=FOO=1 --layers -t args-cache -f $BUDFILES/with-arg/Dockerfile
+ run_buildah inspect -f '{{.FromImageID}}' args-cache
+ idbefore="$output"
+ run_buildah build $WITH_POLICY_JSON --build-arg=FOO=12 --layers -t args-cache -f $BUDFILES/with-arg/Dockerfile
+ run_buildah inspect -f '{{.FromImageID}}' args-cache
+ expect_output --substring ${idbefore}
+ run_buildah rmi args-cache
+}
+
+@test "bud with --layers and --build-args: use raw ARG and cache should not be used" {
+ # when ARG is used as a raw value
+ run_buildah build $WITH_POLICY_JSON --build-arg=FOO=1 --layers -t args-cache -f $BUDFILES/with-arg/Dockerfile2
+ run_buildah inspect -f '{{.FromImageID}}' args-cache
+ idbefore="$output"
+ run_buildah build $WITH_POLICY_JSON --build-arg=FOO=12 --layers -t args-cache -f $BUDFILES/with-arg/Dockerfile2
+ run_buildah inspect -f '{{.FromImageID}}' args-cache
+ idafter="$output"
+ run_buildah rmi args-cache
+
+ assert "$idbefore" != "$idafter" \
+ ".Args changed so final image id should be different"
+}
+
+@test "bud with --rm flag" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON --layers -t test1 $BUDFILES/use-layers
+ run_buildah containers
+ expect_line_count 1
+
+ run_buildah build $WITH_POLICY_JSON --rm=false --layers -t test2 $BUDFILES/use-layers
+ run_buildah containers
+ expect_line_count 7
+}
+
+@test "bud with --force-rm flag" {
+ _prefetch alpine
+ run_buildah 125 build $WITH_POLICY_JSON --force-rm --layers -t test1 -f Dockerfile.fail-case $BUDFILES/use-layers
+ run_buildah containers
+ expect_line_count 1
+
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t test2 -f Dockerfile.fail-case $BUDFILES/use-layers
+ run_buildah containers
+ expect_line_count 2
+}
+
+@test "bud --layers with non-existent/down registry" {
+ _prefetch alpine
+ run_buildah 125 build $WITH_POLICY_JSON --force-rm --layers -t test1 -f Dockerfile.non-existent-registry $BUDFILES/use-layers
+ expect_output --substring "no such host"
+}
+
+@test "bud from base image should have base image ENV also" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -t test -f Dockerfile.check-env $BUDFILES/env
+ run_buildah from --quiet $WITH_POLICY_JSON test
+ cid=$output
+ run_buildah config --env random=hello,goodbye ${cid}
+ run_buildah commit $WITH_POLICY_JSON ${cid} test1
+ run_buildah inspect --format '{{index .Docker.ContainerConfig.Env 1}}' test1
+ expect_output "foo=bar"
+ run_buildah inspect --format '{{index .Docker.ContainerConfig.Env 2}}' test1
+ expect_output "random=hello,goodbye"
+}
+
+@test "bud-from-scratch" {
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah from ${target}
+ expect_output "${target}-working-container"
+}
+
+@test "bud-with-unlimited-memory-swap" {
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON --memory-swap -1 -t ${target} $BUDFILES/from-scratch
+ run_buildah rmi -f ${target}
+}
+
+@test "build with --no-cache and --layer" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo hello
+RUN echo world
+_EOF
+
+ # This should do a fresh build and just populate build cache
+ run_buildah build --layers $WITH_POLICY_JSON -t test -f $mytmpdir/Containerfile .
+ # This should also do a fresh build and just populate build cache
+ run_buildah build --no-cache --layers $WITH_POLICY_JSON -t test -f $mytmpdir/Containerfile .
+ # This should use everything from build cache
+ run_buildah build --layers $WITH_POLICY_JSON -t test -f $mytmpdir/Containerfile .
+ expect_output --substring "Using cache"
+
+}
+
+@test "build --unsetenv PATH" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+ENV date="today"
+ENV foo="bar"
+ENV container="buildah"
+_EOF
+ target=unsetenv-image
+ run_buildah build --unsetenv PATH $WITH_POLICY_JSON -t oci-${target} -f $mytmpdir/Containerfile .
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' oci-${target}
+ expect_output "[date=today foo=bar container=buildah]" "No Path should be defined"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' oci-${target}
+ expect_output "[date=today foo=bar container=buildah]" "No Path should be defined"
+ cat > $mytmpdir/Containerfile << _EOF
+FROM oci-${target}
+ENV date="tomorrow"
+_EOF
+ run_buildah build --format docker --unsetenv PATH --unsetenv foo $WITH_POLICY_JSON -t docker-${target} -f $mytmpdir/Containerfile .
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' docker-${target}
+ expect_output "[container=buildah date=tomorrow]" "No Path should be defined"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' docker-${target}
+ expect_output "[container=buildah date=tomorrow]" "No Path should be defined"
+ cat > $mytmpdir/Containerfile << _EOF
+FROM oci-${target}
+_EOF
+ run_buildah build --format docker --unsetenv PATH --unsetenv foo $WITH_POLICY_JSON -t docker-${target} -f $mytmpdir/Containerfile .
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' docker-${target}
+ expect_output "[date=today container=buildah]" "No Path should be defined"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' docker-${target}
+ expect_output "[date=today container=buildah]" "No Path should be defined"
+}
+
+@test "bud with --env" {
+ target=scratch-image
+ run_buildah build --quiet=false --iidfile ${TEST_SCRATCH_DIR}/output.iid --env PATH $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ run_buildah inspect --format '{{.Docker.Config.Env}}' $iid
+ expect_output "[PATH=$PATH]"
+
+ run_buildah build --quiet=false --iidfile ${TEST_SCRATCH_DIR}/output.iid --env PATH=foo $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ run_buildah inspect --format '{{.Docker.Config.Env}}' $iid
+ expect_output "[PATH=foo]"
+
+ # --unsetenv takes precedence over --env, since we don't know the relative order of the two
+ run_buildah build --quiet=false --iidfile ${TEST_SCRATCH_DIR}/output.iid --unsetenv PATH --env PATH=foo --env PATH= $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ run_buildah inspect --format '{{.Docker.Config.Env}}' $iid
+ expect_output "[]"
+
+ # Reference foo=baz from process environment
+ foo=baz run_buildah build --quiet=false --iidfile ${TEST_SCRATCH_DIR}/output.iid --env foo $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ run_buildah inspect --format '{{.Docker.Config.Env}}' $iid
+ expect_output --substring "foo=baz"
+}
+
+@test "build with custom build output and output rootfs to directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo 'hello'> hello
+_EOF
+ run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ ls $mytmpdir/rootfs
+ # exported rootfs must contain `hello` file which we created inside the image
+ expect_output --substring 'hello'
+}
+
+@test "build with custom build output for multi-stage and output rootfs to directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine as builder
+RUN touch rogue
+
+FROM builder as intermediate
+RUN touch artifact
+
+FROM scratch as outputs
+COPY --from=intermediate artifact target
+_EOF
+ run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ ls $mytmpdir/rootfs
+ # exported rootfs must contain only 'target' from last/final stage and not contain file `rogue` from first stage
+ expect_output --substring 'target'
+ # must not contain rogue from first stage
+ assert "$output" =~ "rogue"
+ # must not contain artifact from second stage
+ assert "$output" =~ "artifact"
+}
+
+@test "build with custom build output for multi-stage-cached and output rootfs to directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine as builder
+RUN touch rogue
+
+FROM builder as intermediate
+RUN touch artifact
+
+FROM scratch as outputs
+COPY --from=intermediate artifact target
+_EOF
+ # Populate layers but don't generate --output
+ run_buildah build --layers $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ # Reuse cached layers and check if --output still works as expected
+ run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ ls $mytmpdir/rootfs
+ # exported rootfs must contain only 'target' from last/final stage and not contain file `rouge` from first stage
+ expect_output --substring 'target'
+ # must not contain rogue from first stage
+ assert "$output" =~ "rogue"
+ # must not contain artifact from second stage
+ assert "$output" =~ "artifact"
+}
+
+@test "build with custom build output for single-stage-cached and output rootfs to directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine as builder
+RUN touch rogue
+_EOF
+ # Populate layers but don't generate --output
+ run_buildah build --layers $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ # Reuse cached layers and check if --output still works as expected
+ run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ ls $mytmpdir/rootfs
+ # exported rootfs must contain only 'rouge' even if build from cache.
+ expect_output --substring 'rogue'
+}
+
+@test "build with custom build output and output rootfs to tar" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo 'hello'> hello
+_EOF
+ run_buildah build --output type=tar,dest=$mytmpdir/rootfs.tar $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ # explode tar
+ mkdir $mytmpdir/rootfs
+ tar -C $mytmpdir/rootfs -xvf $mytmpdir/rootfs.tar
+ ls $mytmpdir/rootfs
+ # exported rootfs must contain `hello` file which we created inside the image
+ expect_output --substring 'hello'
+}
+
+@test "build with custom build output and output rootfs to tar by pipe" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo 'hello'> hello
+_EOF
+ # Using buildah() defined in helpers.bash since run_buildah adds unwanted chars to tar created by pipe.
+ buildah build $WITH_POLICY_JSON -o - -t test-bud -f $mytmpdir/Containerfile . > $mytmpdir/rootfs.tar
+ # explode tar
+ mkdir $mytmpdir/rootfs
+ tar -C $mytmpdir/rootfs -xvf $mytmpdir/rootfs.tar
+ ls $mytmpdir/rootfs/hello
+}
+
+@test "build with custom build output and output rootfs to tar with no additional step" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ # We only want content of alpine nothing else
+ # so just `FROM alpine` should work.
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+_EOF
+ run_buildah build --output type=tar,dest=$mytmpdir/rootfs.tar $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ # explode tar
+ mkdir $mytmpdir/rootfs
+ tar -C $mytmpdir/rootfs -xvf $mytmpdir/rootfs.tar
+ run ls $mytmpdir/rootfs
+ # exported rootfs must contain `var`,`bin` directory which exists in alpine
+ # so output of `ls $mytmpdir/rootfs` must contain following strings
+ expect_output --substring 'var'
+ expect_output --substring 'bin'
+}
+
+@test "build with custom build output must fail for bad input" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo 'hello'> hello
+_EOF
+ run_buildah 125 build --output type=tar, $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile .
+ expect_output --substring 'invalid'
+ run_buildah 125 build --output type=wrong,dest=hello --signature-policy ${TESTSDIR}/policy.json -t test-bud -f $mytmpdir/Containerfile .
+ expect_output --substring 'invalid'
+}
+
+@test "bud-from-scratch-untagged" {
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/output.iid $WITH_POLICY_JSON $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ expect_output --substring --from="$iid" '^sha256:[0-9a-f]{64}$'
+ run_buildah from ${iid}
+ buildctr="$output"
+ run_buildah commit $buildctr new-image
+
+ run_buildah inspect --format "{{.FromImageDigest}}" $iid
+ fromDigest="$output"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' new-image
+ expect_output "$fromDigest" "digest for untagged base image"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' new-image
+ expect_output "" "no base name for untagged base image"
+}
+
+@test "bud with --tag" {
+ target=scratch-image
+ run_buildah build --quiet=false --tag test1 $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ expect_output --substring "Successfully tagged localhost/test1:latest"
+
+ run_buildah build --quiet=false --tag test1 --tag test2 $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ expect_output --substring "Successfully tagged localhost/test1:latest"
+ expect_output --substring "Successfully tagged localhost/test2:latest"
+}
+
+@test "bud with bad --tag" {
+ target=scratch-image
+ run_buildah 125 build --quiet=false --tag TEST1 $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ expect_output --substring "tag TEST1: invalid reference format: repository name must be lowercase"
+
+ run_buildah 125 build --quiet=false --tag test1 --tag TEST2 $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ expect_output --substring "tag TEST2: invalid reference format: repository name must be lowercase"
+}
+
+@test "bud-from-scratch-iid" {
+ target=scratch-image
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/output.iid $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ iid=$(cat ${TEST_SCRATCH_DIR}/output.iid)
+ expect_output --substring --from="$iid" '^sha256:[0-9a-f]{64}$'
+ run_buildah from ${iid}
+ expect_output "${target}-working-container"
+}
+
+@test "bud-from-scratch-label" {
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+ want_output='map["io.buildah.version":"'$buildah_version'" "test":"label"]'
+
+ target=scratch-image
+ run_buildah build --label "test=label" $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' ${target}
+ expect_output "$want_output"
+
+ want_output='map["io.buildah.version":"'$buildah_version'" "test":""]'
+ run_buildah build --label test $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' ${target}
+ expect_output "$want_output"
+}
+
+@test "bud-from-scratch-remove-identity-label" {
+ target=scratch-image
+ run_buildah build --identity-label=false $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' ${target}
+ expect_output "map[]"
+}
+
+@test "bud-from-scratch-annotation" {
+ target=scratch-image
+ run_buildah build --annotation "test=annotation1,annotation2=z" $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah inspect --format '{{index .ImageAnnotations "test"}}' ${target}
+ expect_output "annotation1,annotation2=z"
+}
+
+@test "bud-from-scratch-layers" {
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/from-scratch/Containerfile2 -t ${target} $BUDFILES/from-scratch
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/from-scratch/Containerfile2 -t ${target} $BUDFILES/from-scratch
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah images
+ expect_line_count 3
+ run_buildah rm ${cid}
+ expect_line_count 1
+}
+
+@test "bud-from-multiple-files-one-from" {
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-multiple-files/Dockerfile1.scratch -f $BUDFILES/from-multiple-files/Dockerfile2.nofrom $BUDFILES/from-multiple-files
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile1 $BUDFILES/from-multiple-files/Dockerfile1.scratch
+ cmp $root/Dockerfile2.nofrom $BUDFILES/from-multiple-files/Dockerfile2.nofrom
+ test ! -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile1.alpine -f Dockerfile2.nofrom $BUDFILES/from-multiple-files
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile1 $BUDFILES/from-multiple-files/Dockerfile1.alpine
+ cmp $root/Dockerfile2.nofrom $BUDFILES/from-multiple-files/Dockerfile2.nofrom
+ test -s $root/etc/passwd
+}
+
+@test "bud-from-multiple-files-two-froms" {
+ _prefetch alpine
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile1.scratch -f Dockerfile2.withfrom $BUDFILES/from-multiple-files
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test ! -s $root/Dockerfile1
+ cmp $root/Dockerfile2.withfrom $BUDFILES/from-multiple-files/Dockerfile2.withfrom
+ test -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile1.alpine -f Dockerfile2.withfrom $BUDFILES/from-multiple-files
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test ! -s $root/Dockerfile1
+ cmp $root/Dockerfile2.withfrom $BUDFILES/from-multiple-files/Dockerfile2.withfrom
+ test -s $root/etc/passwd
+}
+
+@test "build using --layer-label and test labels on intermediate images" {
+ # Remove all images so no intermediate images are present
+ run_buildah rmi --all -f
+ _prefetch alpine
+ label="l_$(random_string)"
+ labelvalue="v_$(random_string)"
+
+ run_buildah build --no-cache --layers --layer-label $label=$labelvalue --layer-label emptylabel $WITH_POLICY_JSON -t exp -f $BUDFILES/simple-multi-step/Containerfile
+
+ # Final image must not contain the layer-label
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "'$label'"}}' exp
+ expect_output "" "label on actual image"
+
+ # Find all intermediate images...
+ run_buildah images -a --format '{{.ID}}' --filter intermediate=true
+ # ...and confirm that they have both $label and emptylabel
+ for image in "${lines[@]}";do
+ run_buildah inspect $image
+ inspect="$output"
+
+ run jq -r ".Docker.config.Labels.$label" <<<"$inspect"
+ assert "$output" = "$labelvalue" "label in intermediate layer $image"
+
+ run jq -r ".Docker.config.Labels.emptylabel" <<<"$inspect"
+ assert "$output" = "" "emptylabel in intermediate layer $image"
+ done
+}
+
+@test "bud and test --unsetlabel" {
+ _prefetch registry.fedoraproject.org/fedora-minimal
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+ run_buildah build $WITH_POLICY_JSON -t exp -f $BUDFILES/base-with-labels/Containerfile
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "license"}}' exp
+ expect_output "MIT" "license must be MIT from fedora base image"
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "name"}}' exp
+ expect_output "fedora" "name must be fedora from base image"
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "vendor"}}' exp
+ expect_output "Fedora Project" "vendor must be fedora from base image"
+
+ run_buildah build $WITH_POLICY_JSON --unsetlabel license --unsetlabel name --unsetlabel vendor --unsetlabel version --label hello=world -t exp -f $BUDFILES/base-with-labels/Containerfile
+ # no labels should be inherited from base image only the, buildah version label
+ # and `hello=world` which we just added using cli flag
+ want_output='map["hello":"world" "io.buildah.version":"'$buildah_version'"]'
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' exp
+ expect_output "$want_output"
+}
+
+@test "build using intermediate images should not inherit label" {
+ _prefetch alpine
+
+ # Build imageone, with a label
+ run_buildah build --no-cache --layers --label somefancylabel=true $WITH_POLICY_JSON -t imageone -f Dockerfile.name $BUDFILES/multi-stage-builds
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "somefancylabel"}}' imageone
+ expect_output "true" "imageone: somefancylabel"
+
+ # Build imagetwo. Must use all steps from cache but should not contain label
+ run_buildah build --layers $WITH_POLICY_JSON -t imagetwo -f Dockerfile.name $BUDFILES/multi-stage-builds
+ for i in 2 6;do
+ expect_output --substring --from="${lines[$i]}" "Using cache" \
+ "build imagetwo (no label), line $i"
+ done
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "somefancylabel"}}' imagetwo
+ expect_output "" "imagetwo: somefancylabel"
+
+ # build another multi-stage image with different label, it should use stages from cache from previous build
+ run_buildah build --layers $WITH_POLICY_JSON --label anotherfancylabel=true -t imagethree -f Dockerfile.name $BUDFILES/multi-stage-builds
+ for i in 2 6;do
+ expect_output --substring --from="${lines[$i]}" "Using cache" \
+ "build imagethree ('anotherfancylabel'), line $i"
+ done
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "somefancylabel"}}' imagethree
+ expect_output "" "imagethree: somefancylabel"
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "anotherfancylabel"}}' imagethree
+ expect_output "true" "imagethree: anotherfancylabel"
+}
+
+@test "bud-multi-stage-builds" {
+ _prefetch alpine
+ target=multi-stage-index
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.index $BUDFILES/multi-stage-builds
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.index $BUDFILES/multi-stage-builds/Dockerfile.index
+ test -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ _prefetch alpine
+ target=multi-stage-name
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.name $BUDFILES/multi-stage-builds
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.name $BUDFILES/multi-stage-builds/Dockerfile.name
+ test ! -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ target=multi-stage-mixed
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.mixed $BUDFILES/multi-stage-builds
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.name $BUDFILES/multi-stage-builds/Dockerfile.name
+ cmp $root/Dockerfile.index $BUDFILES/multi-stage-builds/Dockerfile.index
+ cmp $root/Dockerfile.mixed $BUDFILES/multi-stage-builds/Dockerfile.mixed
+}
+
+@test "bud-multi-stage-builds-small-as" {
+ _prefetch alpine
+ target=multi-stage-index
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds-small-as/Dockerfile.index $BUDFILES/multi-stage-builds-small-as
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.index $BUDFILES/multi-stage-builds-small-as/Dockerfile.index
+ test -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ _prefetch alpine
+ target=multi-stage-name
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.name $BUDFILES/multi-stage-builds-small-as
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.name $BUDFILES/multi-stage-builds-small-as/Dockerfile.name
+ test ! -s $root/etc/passwd
+ run_buildah rm ${cid}
+ run_buildah rmi -a
+
+ target=multi-stage-mixed
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds-small-as/Dockerfile.mixed $BUDFILES/multi-stage-builds-small-as
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile.name $BUDFILES/multi-stage-builds-small-as/Dockerfile.name
+ cmp $root/Dockerfile.index $BUDFILES/multi-stage-builds-small-as/Dockerfile.index
+ cmp $root/Dockerfile.mixed $BUDFILES/multi-stage-builds-small-as/Dockerfile.mixed
+}
+
+@test "bud-preserve-subvolumes" {
+ # This Dockerfile needs us to be able to handle a working RUN instruction.
+ skip_if_no_runtime
+
+ _prefetch alpine
+ target=volume-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/preserve-volumes
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test -s $root/vol/subvol/subsubvol/subsubvolfile
+ test ! -s $root/vol/subvol/subvolfile
+ test -s $root/vol/volfile
+ test -s $root/vol/Dockerfile
+ test -s $root/vol/Dockerfile2
+ test ! -s $root/vol/anothervolfile
+}
+
+# Helper function for several of the tests which pull from http.
+#
+# Usage: _test_http SUBDIRECTORY URL_PATH [EXTRA ARGS]
+#
+# SUBDIRECTORY is a subdirectory path under the 'buds' subdirectory.
+# This will be the argument to starthttpd(), i.e. where
+# the httpd will serve files.
+#
+# URL_PATH is the path requested by buildah from the http server,
+# probably 'Dockerfile' or 'context.tar'
+#
+# [EXTRA ARGS] if present, will be passed to buildah on the 'bud'
+# command line; it is intended for '-f subdir/Dockerfile'.
+#
+function _test_http() {
+ local testdir=$1; shift; # in: subdirectory under bud/
+ local urlpath=$1; shift; # in: path to request from localhost
+
+ starthttpd "$BUDFILES/$testdir"
+ target=scratch-image
+ run_buildah build $WITH_POLICY_JSON \
+ -t ${target} \
+ "$@" \
+ http://0.0.0.0:${HTTP_SERVER_PORT}/$urlpath
+ stophttpd
+ run_buildah from ${target}
+}
+
+# Helper function for several of the tests which verifies compression.
+#
+# Usage: validate_instance_compression INDEX MANIFEST ARCH COMPRESSION
+#
+# INDEX instance which needs to be verified in
+# provided manifest list.
+#
+# MANIFEST OCI manifest specification in json format
+#
+# ARCH instance architecture
+#
+# COMPRESSION compression algorithm name; e.g "zstd".
+#
+function validate_instance_compression {
+ case $4 in
+
+ gzip)
+ run jq -r '.manifests['$1'].annotations' <<< $2
+ # annotation is `null` for gzip compression
+ assert "$output" = "null" ".manifests[$1].annotations (null means gzip)"
+ ;;
+
+ zstd)
+ # annotation `'"io.github.containers.compression.zstd": "true"'` must be there for zstd compression
+ run jq -r '.manifests['$1'].annotations."io.github.containers.compression.zstd"' <<< $2
+ assert "$output" = "true" ".manifests[$1].annotations.'io.github.containers.compression.zstd' (io.github.containers.compression.zstd must be set)"
+ ;;
+ esac
+
+ run jq -r '.manifests['$1'].platform.architecture' <<< $2
+ assert "$output" = $3 ".manifests[$1].platform.architecture"
+}
+
+@test "bud-http-Dockerfile" {
+ _test_http from-scratch Containerfile
+}
+
+@test "bud-http-context-with-Dockerfile" {
+ _test_http http-context context.tar
+}
+
+@test "bud-http-context-dir-with-Dockerfile" {
+ _test_http http-context-subdir context.tar -f context/Dockerfile
+}
+
+@test "bud-git-context" {
+ # We need git to be around to handle cloning a repository.
+ if ! which git ; then
+ skip "no git in PATH"
+ fi
+ target=giturl-image
+ # Any repo would do, but this one is small, is FROM: scratch, and local.
+ if ! start_git_daemon ; then
+ skip "error running git daemon"
+ fi
+ gitrepo=git://localhost:${GITPORT}/repo
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ run_buildah from ${target}
+}
+
+@test "bud-git-context-subdirectory" {
+ # We need git to be around to handle cloning a repository.
+ if ! which git ; then
+ skip "no git in PATH"
+ fi
+ target=giturl-image
+ # Any repo would do, but this one is small, is FROM: scratch, local, and has
+ # its entire build context in a subdirectory of the repository.
+ if ! start_git_daemon ${TEST_SOURCES}/git-daemon/subdirectory.tar.gz ; then
+ skip "error running git daemon"
+ fi
+ gitrepo=git://localhost:${GITPORT}/repo#main:nested/subdirectory
+ tmpdir="${TEST_SCRATCH_DIR}/build"
+ mkdir -p "${tmpdir}"
+ TMPDIR="${tmpdir}" run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ run_buildah from "${target}"
+ run find "${tmpdir}" -type d -print
+ echo "$output"
+ test "${#lines[*]}" -le 2
+}
+
+@test "bud-git-context-failure" {
+ # We need git to be around to try cloning a repository, even though it'll fail
+ # and exit with return code 128.
+ if ! which git ; then
+ skip "no git in PATH"
+ fi
+ target=giturl-image
+ gitrepo=git:///tmp/no-such-repository
+ run_buildah 128 build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ # Expect part of what git would have told us... before things went horribly wrong
+ expect_output --substring "failed while performing"
+ expect_output --substring "git fetch"
+}
+
+@test "bud-github-context" {
+ target=github-image
+ # Any repo should do, but this one is small and is FROM: scratch.
+ gitrepo=github.com/projectatomic/nulecule-library
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ run_buildah from ${target}
+}
+
+# Containerfile in this repo should only exist on older commit and
+# not on HEAD or the default branch.
+@test "bud-github-context-from-commit" {
+ if ! which git ; then
+ skip "no git in PATH"
+ fi
+ target=giturl-image
+ # Any repo would do, but this one is small, is FROM: scratch, local, and has
+ # its entire build context in a subdirectory of the repository.
+ if ! start_git_daemon ${TEST_SOURCES}/git-daemon/repo-with-containerfile-on-old-commit.tar.gz ; then
+ skip "error running git daemon"
+ fi
+ # Containerfile in this repo should only exist on older commit and
+ # not on HEAD or the default branch.
+ gitrepo=git://localhost:${GITPORT}/repo#f94193d34548eb58650a10a5183936d32c2d3280
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ expect_output --substring "FROM scratch"
+ expect_output --substring "COMMIT giturl-image"
+ # Verify that build must fail on default `main` branch since we
+ # don't have a `Containerfile` on main branch.
+ gitrepo=git://localhost:${GITPORT}/repo#main
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ expect_output --substring "cannot find Containerfile or Dockerfile"
+}
+
+@test "bud-github-context-with-branch-subdir-commit" {
+ subdir=tests/bud/from-scratch
+ target=github-image
+ gitrepo=https://github.com/containers/buildah.git#main:$subdir
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ # check syntax only for subdirectory
+ gitrepo=https://github.com/containers/buildah.git#:$subdir
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+ # Try pulling repo with specific commit
+ # This commit is the initial commit, which used Dockerfile rather then Containerfile
+ gitrepo=https://github.com/containers/buildah.git#761597056c8dc2bb1efd67e937a196ddff1fa7a6:$subdir
+ run_buildah build $WITH_POLICY_JSON -t ${target} "${gitrepo}"
+}
+
+@test "bud-additional-tags" {
+ target=scratch-image
+ target2=another-scratch-image
+ target3=so-many-scratch-images
+ run_buildah build $WITH_POLICY_JSON -t ${target} -t docker.io/${target2} -t ${target3} $BUDFILES/from-scratch
+ run_buildah images
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah rm ${cid}
+ run_buildah from --quiet $WITH_POLICY_JSON library/${target2}
+ cid=$output
+ run_buildah rm ${cid}
+ run_buildah from --quiet $WITH_POLICY_JSON ${target3}:latest
+ run_buildah rm $output
+
+ run_buildah rmi $target3 $target2 $target
+ expect_line_count 4
+ for i in 0 1 2;do
+ expect_output --substring --from="${lines[$i]}" "untagged: "
+ done
+ expect_output --substring --from="${lines[3]}" '^[0-9a-f]{64}$'
+}
+
+@test "bud-additional-tags-cached" {
+ _prefetch busybox
+ target=tagged-image
+ target2=another-tagged-image
+ target3=yet-another-tagged-image
+ target4=still-another-tagged-image
+ run_buildah build --layers $WITH_POLICY_JSON -t ${target} $BUDFILES/addtl-tags
+ run_buildah build --layers $WITH_POLICY_JSON -t ${target2} -t ${target3} -t ${target4} $BUDFILES/addtl-tags
+ run_buildah inspect -f '{{.FromImageID}}' busybox
+ busyboxid="$output"
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ targetid="$output"
+ assert "$targetid" != "$busyboxid" "FromImageID(target) != busybox"
+ run_buildah inspect -f '{{.FromImageID}}' ${target2}
+ expect_output "$targetid" "target2 -> .FromImageID"
+ run_buildah inspect -f '{{.FromImageID}}' ${target3}
+ expect_output "$targetid" "target3 -> .FromImageID"
+ run_buildah inspect -f '{{.FromImageID}}' ${target4}
+ expect_output "$targetid" "target4 -> .FromImageID"
+}
+
+@test "bud-volume-perms" {
+ # This Dockerfile needs us to be able to handle a working RUN instruction.
+ skip_if_no_runtime
+
+ _prefetch alpine
+ target=volume-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/volume-perms
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test ! -s $root/vol/subvol/subvolfile
+ run stat -c %f $root/vol/subvol
+ assert "$status" -eq 0 "status code from stat $root/vol/subvol"
+ expect_output "41ed" "stat($root/vol/subvol) [0x41ed = 040755]"
+}
+
+@test "bud-volume-ownership" {
+ # This Dockerfile needs us to be able to handle a working RUN instruction.
+ skip_if_no_runtime
+
+ _prefetch alpine
+ target=volume-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/volume-ownership
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah run $cid stat -c "%U %G" /vol/subvol
+ expect_output "testuser testgroup"
+}
+
+@test "bud-builtin-volume-symlink" {
+ # This Dockerfile needs us to be able to handle a working RUN instruction.
+ skip_if_no_runtime
+
+ _prefetch alpine
+ target=volume-symlink
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/volume-symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah run $cid echo hello
+ expect_output "hello"
+
+ target=volume-no-symlink
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/volume-symlink/Dockerfile.no-symlink $BUDFILES/volume-symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah run $cid echo hello
+ expect_output "hello"
+}
+
+@test "bud-from-glob" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile2.glob $BUDFILES/from-multiple-files
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ cmp $root/Dockerfile1.alpine $BUDFILES/from-multiple-files/Dockerfile1.alpine
+ cmp $root/Dockerfile2.withfrom $BUDFILES/from-multiple-files/Dockerfile2.withfrom
+}
+
+@test "bud-maintainer" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/maintainer
+ run_buildah inspect --type=image --format '{{.Docker.Author}}' ${target}
+ expect_output "kilroy"
+ run_buildah inspect --type=image --format '{{.OCIv1.Author}}' ${target}
+ expect_output "kilroy"
+}
+
+@test "bud-unrecognized-instruction" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} $BUDFILES/unrecognized
+ expect_output --substring "BOGUS"
+}
+
+@test "bud-shell" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build --format docker $WITH_POLICY_JSON -t ${target} $BUDFILES/shell
+ run_buildah inspect --type=image --format '{{printf "%q" .Docker.Config.Shell}}' ${target}
+ expect_output '["/bin/sh" "-c"]' ".Docker.Config.Shell (original)"
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ ctr=$output
+ run_buildah config --shell "/bin/bash -c" ${ctr}
+ run_buildah inspect --type=container --format '{{printf "%q" .Docker.Config.Shell}}' ${ctr}
+ expect_output '["/bin/bash" "-c"]' ".Docker.Config.Shell (changed)"
+}
+
+@test "bud-shell during build in Docker format" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build --format docker $WITH_POLICY_JSON -t ${target} -f $BUDFILES/shell/Dockerfile.build-shell-default $BUDFILES/shell
+ expect_output --substring "SHELL=/bin/sh"
+}
+
+@test "bud-shell during build in OCI format" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/shell/Dockerfile.build-shell-default $BUDFILES/shell
+ expect_output --substring "SHELL=/bin/sh"
+}
+
+@test "bud-shell changed during build in Docker format" {
+ _prefetch ubuntu
+ target=ubuntu-image
+ run_buildah build --format docker $WITH_POLICY_JSON -t ${target} -f $BUDFILES/shell/Dockerfile.build-shell-custom $BUDFILES/shell
+ expect_output --substring "SHELL=/bin/bash"
+}
+
+@test "bud-shell changed during build in OCI format" {
+ _prefetch ubuntu
+ target=ubuntu-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/shell/Dockerfile.build-shell-custom $BUDFILES/shell
+ expect_output --substring "SHELL is not supported for OCI image format, \[/bin/bash -c\] will be ignored."
+}
+
+@test "bud with symlinks" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ run ls $root/data/log
+ assert "$status" -eq 0 "status from ls $root/data/log"
+ expect_output --substring "test" "ls \$root/data/log"
+ expect_output --substring "blah.txt" "ls \$root/data/log"
+
+ run ls -al $root
+ assert "$status" -eq 0 "status from ls -al $root"
+ expect_output --substring "test-log -> /data/log" "ls -l \$root/data/log"
+ expect_output --substring "blah -> /test-log" "ls -l \$root/data/log"
+}
+
+@test "bud with symlinks to relative path" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.relative-symlink $BUDFILES/symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ run ls $root/log
+ assert "$status" -eq 0 "status from ls $root/log"
+ expect_output --substring "test" "ls \$root/log"
+
+ run ls -al $root
+ assert "$status" -eq 0 "status from ls -al $root"
+ expect_output --substring "test-log -> ../log" "ls -l \$root/log"
+ test -r $root/var/data/empty
+}
+
+@test "bud with multiple symlinks in a path" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/symlink/Dockerfile.multiple-symlinks $BUDFILES/symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ run ls $root/data/log
+ assert "$status" -eq 0 "status from ls $root/data/log"
+ expect_output --substring "bin" "ls \$root/data/log"
+ expect_output --substring "blah.txt" "ls \$root/data/log"
+
+ run ls -al $root/myuser
+ assert "$status" -eq 0 "status from ls -al $root/myuser"
+ expect_output --substring "log -> /test" "ls -al \$root/myuser"
+
+ run ls -al $root/test
+ assert "$status" -eq 0 "status from ls -al $root/test"
+ expect_output --substring "bar -> /test-log" "ls -al \$root/test"
+
+ run ls -al $root/test-log
+ assert "$status" -eq 0 "status from ls -al $root/test-log"
+ expect_output --substring "foo -> /data/log" "ls -al \$root/test-log"
+}
+
+@test "bud with multiple symlink pointing to itself" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/symlink/Dockerfile.symlink-points-to-itself $BUDFILES/symlink
+ assert "$output" =~ "building .* open /test-log/test: too many levels of symbolic links"
+}
+
+@test "bud multi-stage with symlink to absolute path" {
+ _prefetch ubuntu
+ target=ubuntu-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.absolute-symlink $BUDFILES/symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ run ls $root/bin
+ assert "$status" -eq 0 "status from ls $root/bin"
+ expect_output --substring "myexe" "ls \$root/bin"
+
+ run cat $root/bin/myexe
+ assert "$status" -eq 0 "status from cat $root/bin/myexe"
+ expect_output "symlink-test" "cat \$root/bin/myexe"
+}
+
+@test "bud multi-stage with dir symlink to absolute path" {
+ _prefetch ubuntu
+ target=ubuntu-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.absolute-dir-symlink $BUDFILES/symlink
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ run ls $root/data
+ assert "$status" -eq 0 "status from ls $root/data"
+ expect_output --substring "myexe" "ls \$root/data"
+}
+
+@test "bud with ENTRYPOINT and RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.entrypoint-run $BUDFILES/run-scenarios
+ expect_output --substring "unique.test.string"
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+}
+
+@test "bud with ENTRYPOINT and empty RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 2 bud $WITH_POLICY_JSON -t ${target} -f Dockerfile.entrypoint-empty-run $BUDFILES/run-scenarios
+ expect_output --substring " -c requires an argument"
+ expect_output --substring "building at STEP.*: exit status 2"
+}
+
+@test "bud with CMD and RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/run-scenarios/Dockerfile.cmd-run $BUDFILES/run-scenarios
+ expect_output --substring "unique.test.string"
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+}
+
+@test "bud with CMD and empty RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 2 bud $WITH_POLICY_JSON -t ${target} -f Dockerfile.cmd-empty-run $BUDFILES/run-scenarios
+ expect_output --substring " -c requires an argument"
+ expect_output --substring "building at STEP.*: exit status 2"
+}
+
+@test "bud with ENTRYPOINT, CMD and RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/run-scenarios/Dockerfile.entrypoint-cmd-run $BUDFILES/run-scenarios
+ expect_output --substring "unique.test.string"
+ run_buildah from $WITH_POLICY_JSON ${target}
+}
+
+@test "bud with ENTRYPOINT, CMD and empty RUN" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 2 bud $WITH_POLICY_JSON -t ${target} -f $BUDFILES/run-scenarios/Dockerfile.entrypoint-cmd-empty-run $BUDFILES/run-scenarios
+ expect_output --substring " -c requires an argument"
+ expect_output --substring "building at STEP.*: exit status 2"
+}
+
+# Determines if a variable set with ENV is available to following commands in the Dockerfile
+@test "bud access ENV variable defined in same source file" {
+ _prefetch alpine
+ target=env-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/env/Dockerfile.env-same-file $BUDFILES/env
+ expect_output --substring ":unique.test.string:"
+ run_buildah from $WITH_POLICY_JSON ${target}
+}
+
+# Determines if a variable set with ENV in an image is available to commands in downstream Dockerfile
+@test "bud access ENV variable defined in FROM image" {
+ _prefetch alpine
+ from_target=env-from-image
+ target=env-image
+ run_buildah build $WITH_POLICY_JSON -t ${from_target} -f $BUDFILES/env/Dockerfile.env-same-file $BUDFILES/env
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/env/Dockerfile.env-from-image $BUDFILES/env
+ expect_output --substring "@unique.test.string@"
+ run_buildah from --quiet ${from_target}
+ from_cid=$output
+ run_buildah from ${target}
+}
+
+@test "bud ENV preserves special characters after commit" {
+ _prefetch ubuntu
+ from_target=special-chars
+ run_buildah build $WITH_POLICY_JSON -t ${from_target} -f $BUDFILES/env/Dockerfile.special-chars $BUDFILES/env
+ run_buildah from --quiet ${from_target}
+ cid=$output
+ run_buildah run ${cid} env
+ expect_output --substring "LIB=\\$\(PREFIX\)/lib"
+}
+
+@test "bud with Dockerfile from valid URL" {
+ target=url-image
+ url=https://raw.githubusercontent.com/containers/buildah/main/tests/bud/from-scratch/Dockerfile
+ run_buildah build $WITH_POLICY_JSON -t ${target} ${url}
+ run_buildah from ${target}
+}
+
+@test "bud with Dockerfile from invalid URL" {
+ target=url-image
+ url=https://raw.githubusercontent.com/containers/buildah/main/tests/bud/from-scratch/Dockerfile.bogus
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} ${url}
+ expect_output --substring "invalid response status 404"
+}
+
+# When provided with a -f flag and directory, buildah will look for the alternate Dockerfile name in the supplied directory
+@test "bud with -f flag, alternate Dockerfile name" {
+ target=fileflag-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.noop-flags $BUDFILES/run-scenarios
+ run_buildah from ${target}
+}
+
+# Following flags are configured to result in noop but should not affect buildah bud behavior
+@test "bud with --cache-from noop flag" {
+ target=noop-image
+ run_buildah build --cache-from=invalidimage $WITH_POLICY_JSON -t ${target} -f Dockerfile.noop-flags $BUDFILES/run-scenarios
+ run_buildah from ${target}
+}
+
+@test "bud with --compress noop flag" {
+ target=noop-image
+ run_buildah build --compress $WITH_POLICY_JSON -t ${target} -f Dockerfile.noop-flags $BUDFILES/run-scenarios
+ run_buildah from ${target}
+}
+
+@test "bud with --cpu-shares flag, no argument" {
+ target=bud-flag
+ run_buildah 125 build --cpu-shares $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-scratch/Containerfile $BUDFILES/from-scratch
+ expect_output --substring "invalid argument .* invalid syntax"
+}
+
+@test "bud with --cpu-shares flag, invalid argument" {
+ target=bud-flag
+ run_buildah 125 build --cpu-shares bogus $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-scratch/Containerfile $BUDFILES/from-scratch
+ expect_output --substring "invalid argument \"bogus\" for "
+}
+
+@test "bud with --cpu-shares flag, valid argument" {
+ target=bud-flag
+ run_buildah build --cpu-shares 2 $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-scratch/Containerfile $BUDFILES/from-scratch
+ run_buildah from ${target}
+}
+
+@test "bud with --cpu-shares short flag (-c), no argument" {
+ target=bud-flag
+ run_buildah 125 build -c $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-scratch/Containerfile $BUDFILES/from-scratch
+ expect_output --substring "invalid argument .* invalid syntax"
+}
+
+@test "bud with --cpu-shares short flag (-c), invalid argument" {
+ target=bud-flag
+ run_buildah 125 build -c bogus $WITH_POLICY_JSON -t ${target} -f $BUDFILES/from-scratch/Containerfile $BUDFILES/from-scratch
+ expect_output --substring "invalid argument \"bogus\" for "
+}
+
+@test "bud with --cpu-shares short flag (-c), valid argument" {
+ target=bud-flag
+ run_buildah build -c 2 $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah from ${target}
+}
+
+@test "bud-onbuild" {
+ _prefetch alpine
+ target=onbuild
+ run_buildah build --format docker $WITH_POLICY_JSON -t ${target} $BUDFILES/onbuild
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.OnBuild}}' ${target}
+ expect_output '["RUN touch /onbuild1" "RUN touch /onbuild2"]'
+ run_buildah from --quiet ${target}
+ cid=${lines[0]}
+ run_buildah mount ${cid}
+ root=$output
+
+ test -e ${root}/onbuild1
+ test -e ${root}/onbuild2
+
+ run_buildah umount ${cid}
+ run_buildah rm ${cid}
+
+ target=onbuild-image2
+ run_buildah build --format docker $WITH_POLICY_JSON -t ${target} -f Dockerfile1 $BUDFILES/onbuild
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.OnBuild}}' ${target}
+ expect_output '["RUN touch /onbuild3"]'
+ run_buildah from --quiet ${target}
+ cid=${lines[0]}
+ run_buildah mount ${cid}
+ root=$output
+
+ test -e ${root}/onbuild1
+ test -e ${root}/onbuild2
+ test -e ${root}/onbuild3
+ run_buildah umount ${cid}
+
+ run_buildah config --onbuild "RUN touch /onbuild4" ${cid}
+
+ target=onbuild-image3
+ run_buildah commit $WITH_POLICY_JSON --format docker ${cid} ${target}
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.OnBuild}}' ${target}
+ expect_output '["RUN touch /onbuild4"]'
+}
+
+@test "bud-onbuild-layers" {
+ _prefetch alpine
+ target=onbuild
+ run_buildah build --format docker $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile2 $BUDFILES/onbuild
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.OnBuild}}' ${target}
+ expect_output '["RUN touch /onbuild1" "RUN touch /onbuild2"]'
+}
+
+@test "bud-logfile" {
+ _prefetch alpine
+ rm -f ${TEST_SCRATCH_DIR}/logfile
+ run_buildah build --logfile ${TEST_SCRATCH_DIR}/logfile $WITH_POLICY_JSON $BUDFILES/preserve-volumes
+ test -s ${TEST_SCRATCH_DIR}/logfile
+}
+
+@test "bud-logfile-with-split-logfile-by-platform" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+COPY . .
+_EOF
+
+ rm -f ${TEST_SCRATCH_DIR}/logfile
+ run_buildah build --logfile ${TEST_SCRATCH_DIR}/logfile --logsplit --platform linux/arm64,linux/amd64 $WITH_POLICY_JSON ${mytmpdir}
+ run cat ${TEST_SCRATCH_DIR}/logfile_linux_arm64
+ expect_output --substring "FROM alpine"
+ expect_output --substring "[linux/arm64]"
+ run cat ${TEST_SCRATCH_DIR}/logfile_linux_amd64
+ expect_output --substring "FROM alpine"
+ expect_output --substring "[linux/amd64]"
+}
+
+@test "bud with ARGS" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.args $BUDFILES/run-scenarios
+ expect_output --substring "arg_value"
+}
+
+@test "bud with unused ARGS" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.multi-args --build-arg USED_ARG=USED_VALUE $BUDFILES/run-scenarios
+ expect_output --substring "USED_VALUE"
+ assert "$output" !~ "one or more build args were not consumed: [UNUSED_ARG]"
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.multi-args --build-arg USED_ARG=USED_VALUE --build-arg UNUSED_ARG=whaaaat $BUDFILES/run-scenarios
+ expect_output --substring "USED_VALUE"
+ expect_output --substring "one or more build args were not consumed: \[UNUSED_ARG\]"
+}
+
+@test "bud with multi-value ARGS" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile.multi-args --build-arg USED_ARG=plugin1,plugin2,plugin3 $BUDFILES/run-scenarios
+ expect_output --substring "plugin1,plugin2,plugin3"
+ if [[ "$output" =~ "one or more build args were not consumed" ]]; then
+ expect_output "[not expecting to see 'one or more build args were not consumed']"
+ fi
+}
+
+@test "bud-from-stdin" {
+ target=scratch-image
+ cat $BUDFILES/from-multiple-files/Dockerfile1.scratch | run_buildah build $WITH_POLICY_JSON -t ${target} -f - $BUDFILES/from-multiple-files
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test -s $root/Dockerfile1
+}
+
+@test "bud with preprocessor" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build -q $WITH_POLICY_JSON -t ${target} -f Decomposed.in $BUDFILES/preprocess
+}
+
+@test "bud with preprocessor error" {
+ target=alpine-image
+ run_buildah bud $WITH_POLICY_JSON -t ${target} -f Error.in $BUDFILES/preprocess
+ expect_output --substring "Ignoring <stdin>:5:2: error: #error"
+}
+
+@test "bud-with-rejected-name" {
+ target=ThisNameShouldBeRejected
+ run_buildah 125 build -q $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ expect_output --substring "must be lower"
+}
+
+@test "bud with chown copy" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chown
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/copy-chown
+ expect_output --substring "user:2367 group:3267"
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run alpine-chown -- stat -c '%u' /tmp/copychown.txt
+ # Validate that output starts with "2367"
+ expect_output --substring "2367"
+
+ run_buildah run alpine-chown -- stat -c '%g' /tmp/copychown.txt
+ # Validate that output starts with "3267"
+ expect_output --substring "3267"
+}
+
+@test "bud with combined chown and chmod copy" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} -f $BUDFILES/copy-chmod/Dockerfile.combined $BUDFILES/copy-chmod
+ expect_output --substring "chmod:777 user:2367 group:3267"
+}
+
+@test "bud with combined chown and chmod add" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} -f $BUDFILES/add-chmod/Dockerfile.combined $BUDFILES/add-chmod
+ expect_output --substring "chmod:777 user:2367 group:3267"
+}
+
+@test "bud with chown copy with bad chown flag in Dockerfile with --layers" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chown
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t ${imgName} -f $BUDFILES/copy-chown/Dockerfile.bad $BUDFILES/copy-chown
+ expect_output --substring "COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image\|stage> flags"
+}
+
+@test "bud with chown copy with unknown substitutions in Dockerfile" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chown
+ run_buildah 125 build $WITH_POLICY_JSON -t ${imgName} -f $BUDFILES/copy-chown/Dockerfile.bad2 $BUDFILES/copy-chown
+ expect_output --substring "looking up UID/GID for \":\": can't find uid for user"
+}
+
+@test "bud with chmod copy" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/copy-chmod
+ expect_output --substring "rwxrwxrwx"
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run alpine-chmod ls -l /tmp/copychmod.txt
+ # Validate that output starts with 777 == "rwxrwxrwx"
+ expect_output --substring "rwxrwxrwx"
+}
+
+@test "bud with chmod copy with bad chmod flag in Dockerfile with --layers" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t ${imgName} -f $BUDFILES/copy-chmod/Dockerfile.bad $BUDFILES/copy-chmod
+ expect_output --substring "COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image\|stage> flags"
+}
+
+@test "bud with chmod add" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/add-chmod
+ expect_output --substring "rwxrwxrwx"
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run alpine-chmod ls -l /tmp/addchmod.txt
+ # Validate that rights equal 777 == "rwxrwxrwx"
+ expect_output --substring "rwxrwxrwx"
+}
+
+@test "bud with chown add" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chown
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/add-chown
+ expect_output --substring "user:2367 group:3267"
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run alpine-chown -- stat -c '%u' /tmp/addchown.txt
+ # Validate that output starts with "2367"
+ expect_output --substring "2367"
+
+ run_buildah run alpine-chown -- stat -c '%g' /tmp/addchown.txt
+ # Validate that output starts with "3267"
+ expect_output --substring "3267"
+}
+
+@test "bud with chown add with bad chown flag in Dockerfile with --layers" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chown
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t ${imgName} -f $BUDFILES/add-chown/Dockerfile.bad $BUDFILES/add-chown
+ expect_output --substring "ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags"
+}
+
+@test "bud with chmod add with bad chmod flag in Dockerfile with --layers" {
+ _prefetch alpine
+ imgName=alpine-image
+ ctrName=alpine-chmod
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t ${imgName} -f $BUDFILES/add-chmod/Dockerfile.bad $BUDFILES/add-chmod
+ expect_output --substring "ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags"
+}
+
+@test "bud with ADD with checksum flag" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t alpine-image -f $BUDFILES/add-checksum/Containerfile $BUDFILES/add-checksum
+ run_buildah from --quiet $WITH_POLICY_JSON --name alpine-ctr alpine-image
+ run_buildah run alpine-ctr -- ls -l /README.md
+ expect_output --substring "README.md"
+}
+
+@test "bud with ADD with bad checksum" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/add-checksum/Containerfile.bad-checksum $BUDFILES/add-checksum
+ expect_output --substring "unexpected response digest for \"https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md\": sha256:4fd3aed66b5488b45fe83dd11842c2324fadcc38e1217bb45fbd28d660afdd39, want sha256:0000000000000000000000000000000000000000000000000000000000000000"
+}
+
+@test "bud with ADD with bad checksum flag" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/add-checksum/Containerfile.bad $BUDFILES/add-checksum
+ expect_output --substring "ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags"
+}
+
+@test "bud with ADD file construct" {
+ _prefetch busybox
+ run_buildah build $WITH_POLICY_JSON -t test1 $BUDFILES/add-file
+ run_buildah images -a
+ expect_output --substring "test1"
+
+ run_buildah from --quiet $WITH_POLICY_JSON test1
+ ctr=$output
+ run_buildah containers -a
+ expect_output --substring "test1"
+
+ run_buildah run $ctr ls /var/file2
+ expect_output --substring "/var/file2"
+}
+
+@test "bud with COPY of single file creates absolute path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/copy-create-absolute-path
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" /usr/lib/python3.7/distutils
+ expect_output "755"
+}
+
+@test "bud with COPY of single file creates relative path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/copy-create-relative-path
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" lib/custom
+ expect_output "755"
+}
+
+@test "bud with ADD of single file creates absolute path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/add-create-absolute-path
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" /usr/lib/python3.7/distutils
+ expect_output "755"
+}
+
+@test "bud with ADD of single file creates relative path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -t ${imgName} $BUDFILES/add-create-relative-path
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" lib/custom
+ expect_output "755"
+}
+
+@test "bud multi-stage COPY creates absolute path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/copy-multistage-paths/Dockerfile.absolute -t ${imgName} $BUDFILES/copy-multistage-paths
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" /my/bin
+ expect_output "755"
+}
+
+@test "bud multi-stage COPY creates relative path with correct permissions" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/copy-multistage-paths/Dockerfile.relative -t ${imgName} $BUDFILES/copy-multistage-paths
+ expect_output --substring "permissions=755"
+
+ run_buildah from --name ${ctrName} ${imgName}
+ run_buildah run ${ctrName} -- stat -c "%a" my/bin
+ expect_output "755"
+}
+
+@test "bud multi-stage COPY with invalid from statement" {
+ _prefetch ubuntu
+ imgName=ubuntu-image
+ ctrName=ubuntu-copy
+ run_buildah 125 build $WITH_POLICY_JSON -f $BUDFILES/copy-multistage-paths/Dockerfile.invalid_from -t ${imgName} $BUDFILES/copy-multistage-paths
+ expect_output --substring "COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image\|stage> flags"
+}
+
+@test "bud COPY to root succeeds" {
+ _prefetch ubuntu
+ run_buildah build $WITH_POLICY_JSON $BUDFILES/copy-root
+}
+
+@test "bud with FROM AS construct" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -t test1 $BUDFILES/from-as
+ run_buildah images -a
+ expect_output --substring "test1"
+
+ run_buildah from --quiet $WITH_POLICY_JSON test1
+ ctr=$output
+ run_buildah containers -a
+ expect_output --substring "test1"
+
+ run_buildah inspect --format "{{.Docker.ContainerConfig.Env}}" --type image test1
+ expect_output --substring "LOCAL=/1"
+}
+
+@test "bud with FROM AS construct with layers" {
+ _prefetch alpine
+ run_buildah build --layers $WITH_POLICY_JSON -t test1 $BUDFILES/from-as
+ run_buildah images -a
+ expect_output --substring "test1"
+
+ run_buildah from --quiet $WITH_POLICY_JSON test1
+ ctr=$output
+ run_buildah containers -a
+ expect_output --substring "test1"
+
+ run_buildah inspect --format "{{.Docker.ContainerConfig.Env}}" --type image test1
+ expect_output --substring "LOCAL=/1"
+}
+
+@test "bud with FROM AS skip FROM construct" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -t test1 -f $BUDFILES/from-as/Dockerfile.skip $BUDFILES/from-as
+ expect_output --substring "LOCAL=/1"
+ expect_output --substring "LOCAL2=/2"
+
+ run_buildah images -a
+ expect_output --substring "test1"
+
+ run_buildah from --quiet $WITH_POLICY_JSON test1
+ ctr=$output
+ run_buildah containers -a
+ expect_output --substring "test1"
+
+ run_buildah mount $ctr
+ mnt=$output
+ test -e $mnt/1
+ test ! -e $mnt/2
+
+ run_buildah inspect --format "{{.Docker.ContainerConfig.Env}}" --type image test1
+ expect_output "[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin LOCAL=/1]"
+}
+
+@test "build with -f pointing to not a file should fail" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/dockerfile/
+ expect_output --substring "cannot be path to a directory"
+}
+
+@test "bud with symlink Dockerfile not specified in file" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/symlink/Dockerfile $BUDFILES/symlink
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud with dir for file but no Dockerfile in dir" {
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/empty-dir $BUDFILES/empty-dir
+ expect_output --substring "no such file or directory"
+}
+
+@test "bud with bad dir Dockerfile" {
+ target=alpine-image
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f ${TEST_SOURCES}/baddirname ${TEST_SOURCES}/baddirname
+ expect_output --substring "no such file or directory"
+}
+
+@test "bud with ARG before FROM default value" {
+ _prefetch busybox
+ target=leading-args-default
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/leading-args/Dockerfile $BUDFILES/leading-args
+}
+
+@test "bud with ARG before FROM" {
+ _prefetch busybox:musl
+ target=leading-args
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg=VERSION=musl -f $BUDFILES/leading-args/Dockerfile $BUDFILES/leading-args
+
+ #Verify https://github.com/containers/buildah/issues/4312
+ # stage `FROM stage_${my_env}` must be resolved with default arg value and build should be successful.
+ run_buildah build $WITH_POLICY_JSON -t source -f $BUDFILES/multi-stage-builds/Dockerfile.arg_in_stage
+
+ #Verify https://github.com/containers/buildah/issues/4573
+ # stage `COPY --from=stage_${my_env}` must be resolved with default arg value and build should be successful.
+ run_buildah build $WITH_POLICY_JSON -t source -f $BUDFILES/multi-stage-builds/Dockerfile.arg_in_copy
+
+}
+
+@test "bud-with-healthcheck" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} --format docker $BUDFILES/healthcheck
+ run_buildah inspect -f '{{printf "%q" .Docker.Config.Healthcheck.Test}} {{printf "%d" .Docker.Config.Healthcheck.StartPeriod}} {{printf "%d" .Docker.Config.Healthcheck.Interval}} {{printf "%d" .Docker.Config.Healthcheck.Timeout}} {{printf "%d" .Docker.Config.Healthcheck.Retries}}' ${target}
+ second=1000000000
+ threeseconds=$(( 3 * $second ))
+ fiveminutes=$(( 5 * 60 * $second ))
+ tenminutes=$(( 10 * 60 * $second ))
+ expect_output '["CMD-SHELL" "curl -f http://localhost/ || exit 1"]'" $tenminutes $fiveminutes $threeseconds 4" "Healthcheck config"
+}
+
+@test "bud with unused build arg" {
+ _prefetch alpine busybox
+ target=busybox-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg foo=bar --build-arg foo2=bar2 -f $BUDFILES/build-arg/Dockerfile $BUDFILES/build-arg
+ expect_output --substring "one or more build args were not consumed: \[foo2\]"
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg IMAGE=alpine -f $BUDFILES/build-arg/Dockerfile2 $BUDFILES/build-arg
+ assert "$output" !~ "one or more build args were not consumed: \[IMAGE\]"
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud with copy-from and cache" {
+ _prefetch busybox
+ target=busybox-image
+ run_buildah build $WITH_POLICY_JSON --layers --iidfile ${TEST_SCRATCH_DIR}/iid1 -f $BUDFILES/copy-from/Dockerfile2 $BUDFILES/copy-from
+ cat ${TEST_SCRATCH_DIR}/iid1
+ test -s ${TEST_SCRATCH_DIR}/iid1
+ run_buildah build $WITH_POLICY_JSON --layers --iidfile ${TEST_SCRATCH_DIR}/iid2 -f $BUDFILES/copy-from/Dockerfile2 $BUDFILES/copy-from
+ cat ${TEST_SCRATCH_DIR}/iid2
+ test -s ${TEST_SCRATCH_DIR}/iid2
+ cmp ${TEST_SCRATCH_DIR}/iid1 ${TEST_SCRATCH_DIR}/iid2
+}
+
+@test "bud with copy-from in Dockerfile no prior FROM" {
+ want_tag=20221018
+ _prefetch busybox quay.io/libpod/testimage:$want_tag
+ target=no-prior-from
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/copy-from/Dockerfile $BUDFILES/copy-from
+
+ run_buildah from --quiet $WITH_POLICY_JSON ${target}
+ ctr=$output
+ run_buildah mount ${ctr}
+ mnt=$output
+
+ newfile="/home/busyboxpodman/copied-testimage-id"
+ test -e $mnt/$newfile
+ expect_output --from="$(< $mnt/$newfile)" "$want_tag" "Contents of $newfile"
+}
+
+@test "bud with copy-from with bad from flag in Dockerfile with --layers" {
+ _prefetch busybox
+ target=bad-from-flag
+ run_buildah 125 build $WITH_POLICY_JSON --layers -t ${target} -f $BUDFILES/copy-from/Dockerfile.bad $BUDFILES/copy-from
+ expect_output --substring "COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image\|stage> flags"
+}
+
+@test "bud with copy-from referencing the base image" {
+ _prefetch busybox
+ target=busybox-derived
+ target_mt=busybox-mt-derived
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/copy-from/Dockerfile3 $BUDFILES/copy-from
+ run_buildah build $WITH_POLICY_JSON --jobs 4 -t ${target} -f $BUDFILES/copy-from/Dockerfile3 $BUDFILES/copy-from
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/copy-from/Dockerfile4 $BUDFILES/copy-from
+ run_buildah build --no-cache $WITH_POLICY_JSON --jobs 4 -t ${target_mt} -f $BUDFILES/copy-from/Dockerfile4 $BUDFILES/copy-from
+
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root_single_job=$output
+
+ run_buildah from --quiet ${target_mt}
+ cid=$output
+ run_buildah mount ${cid}
+ root_multi_job=$output
+
+ # Check that both the version with --jobs 1 and --jobs=N have the same number of files
+ test $(find $root_single_job -type f | wc -l) = $(find $root_multi_job -type f | wc -l)
+}
+
+@test "bud with copy-from referencing the current stage" {
+ _prefetch busybox
+ target=busybox-derived
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/copy-from/Dockerfile2.bad $BUDFILES/copy-from
+ expect_output --substring "COPY --from=build: no stage or image found with that name"
+}
+
+@test "bud-target" {
+ _prefetch alpine ubuntu
+ target=target
+ run_buildah build $WITH_POLICY_JSON -t ${target} --target mytarget $BUDFILES/target
+ expect_output --substring "\[1/2] STEP 1/3: FROM ubuntu:latest"
+ expect_output --substring "\[2/2] STEP 1/3: FROM alpine:latest AS mytarget"
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+ test -e ${root}/2
+ test ! -e ${root}/3
+}
+
+@test "bud-no-target-name" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON $BUDFILES/maintainer
+}
+
+@test "bud-multi-stage-nocache-nocommit" {
+ _prefetch alpine
+ # pull the base image directly, so that we don't record it being written to local storage in the next step
+ run_buildah pull $WITH_POLICY_JSON alpine
+ # okay, build an image with two stages
+ run_buildah --log-level=debug bud $WITH_POLICY_JSON -f $BUDFILES/multi-stage-builds/Dockerfile.name $BUDFILES/multi-stage-builds
+ # debug messages should only record us creating one new image: the one for the second stage, since we don't base anything on the first
+ run grep "created new image ID" <<< "$output"
+ expect_line_count 1
+}
+
+@test "bud-multi-stage-cache-nocontainer" {
+ skip "FIXME: Broken in CI right now"
+ _prefetch alpine
+ # first time through, quite normal
+ run_buildah build --layers -t base $WITH_POLICY_JSON -f $BUDFILES/multi-stage-builds/Dockerfile.rebase $BUDFILES/multi-stage-builds
+ # second time through, everything should be cached, and we shouldn't create a container based on the final image
+ run_buildah --log-level=debug bud --layers -t base $WITH_POLICY_JSON -f $BUDFILES/multi-stage-builds/Dockerfile.rebase $BUDFILES/multi-stage-builds
+ # skip everything up through the final COMMIT step, and make sure we didn't log a "Container ID:" after it
+ run sed '0,/COMMIT base/ d' <<< "$output"
+ echo "$output" >&2
+ test "${#lines[@]}" -gt 1
+ run grep "Container ID:" <<< "$output"
+ expect_output ""
+}
+
+@test "bud copy to symlink" {
+ _prefetch alpine
+ target=alpine-image
+ ctr=alpine-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/dest-symlink
+ expect_output --substring "STEP 5/6: RUN ln -s "
+
+ run_buildah from $WITH_POLICY_JSON --name=${ctr} ${target}
+ expect_output --substring ${ctr}
+
+ run_buildah run ${ctr} ls -alF /etc/hbase
+ expect_output --substring "/etc/hbase -> /usr/local/hbase/"
+
+ run_buildah run ${ctr} ls -alF /usr/local/hbase
+ expect_output --substring "Dockerfile"
+}
+
+@test "bud copy to dangling symlink" {
+ _prefetch ubuntu
+ target=ubuntu-image
+ ctr=ubuntu-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/dest-symlink-dangling
+ expect_output --substring "STEP 3/5: RUN ln -s "
+
+ run_buildah from $WITH_POLICY_JSON --name=${ctr} ${target}
+ expect_output --substring ${ctr}
+
+ run_buildah run ${ctr} ls -alF /src
+ expect_output --substring "/src -> /symlink"
+
+ run_buildah run ${ctr} ls -alF /symlink
+ expect_output --substring "Dockerfile"
+}
+
+@test "bud WORKDIR isa symlink" {
+ _prefetch alpine
+ target=alpine-image
+ ctr=alpine-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/workdir-symlink
+ expect_output --substring "STEP 3/6: RUN ln -sf "
+
+ run_buildah from $WITH_POLICY_JSON --name=${ctr} ${target}
+ expect_output --substring ${ctr}
+
+ run_buildah run ${ctr} ls -alF /tempest
+ expect_output --substring "/tempest -> /var/lib/tempest/"
+
+ run_buildah run ${ctr} ls -alF /etc/notareal.conf
+ expect_output --substring "\-rw\-rw\-r\-\-"
+}
+
+@test "bud WORKDIR isa symlink no target dir" {
+ _prefetch alpine
+ target=alpine-image
+ ctr=alpine-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile-2 $BUDFILES/workdir-symlink
+ expect_output --substring "STEP 2/6: RUN ln -sf "
+
+ run_buildah from $WITH_POLICY_JSON --name=${ctr} ${target}
+ expect_output --substring ${ctr}
+
+ run_buildah run ${ctr} ls -alF /tempest
+ expect_output --substring "/tempest -> /var/lib/tempest/"
+
+ run_buildah run ${ctr} ls /tempest
+ expect_output --substring "Dockerfile-2"
+
+ run_buildah run ${ctr} ls -alF /etc/notareal.conf
+ expect_output --substring "\-rw\-rw\-r\-\-"
+}
+
+@test "bud WORKDIR isa symlink no target dir and follow on dir" {
+ _prefetch alpine
+ target=alpine-image
+ ctr=alpine-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Dockerfile-3 $BUDFILES/workdir-symlink
+ expect_output --substring "STEP 2/9: RUN ln -sf "
+
+ run_buildah from $WITH_POLICY_JSON --name=${ctr} ${target}
+ expect_output --substring ${ctr}
+
+ run_buildah run ${ctr} ls -alF /tempest
+ expect_output --substring "/tempest -> /var/lib/tempest/"
+
+ run_buildah run ${ctr} ls /tempest
+ expect_output --substring "Dockerfile-3"
+
+ run_buildah run ${ctr} ls /tempest/lowerdir
+ expect_output --substring "Dockerfile-3"
+
+ run_buildah run ${ctr} ls -alF /etc/notareal.conf
+ expect_output --substring "\-rw\-rw\-r\-\-"
+}
+
+@test "buildah bud --volume" {
+ voldir=${TEST_SCRATCH_DIR}/bud-volume
+ mkdir -p ${voldir}
+
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -v ${voldir}:/testdir $BUDFILES/mount
+ expect_output --substring "/testdir"
+ run_buildah build $WITH_POLICY_JSON -v ${voldir}:/testdir:rw $BUDFILES/mount
+ expect_output --substring "/testdir"
+ run_buildah build $WITH_POLICY_JSON -v ${voldir}:/testdir:rw,z $BUDFILES/mount
+ expect_output --substring "/testdir"
+}
+
+@test "bud-copy-dot with --layers picks up changed file" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/use-layers
+ cp -a $BUDFILES/use-layers $contextdir
+
+ mkdir -p $contextdir/subdir
+ touch $contextdir/file.txt
+ run_buildah build $WITH_POLICY_JSON --layers --iidfile ${TEST_SCRATCH_DIR}/iid1 -f Dockerfile.7 $contextdir
+
+ touch $contextdir/file.txt
+ run_buildah build $WITH_POLICY_JSON --layers --iidfile ${TEST_SCRATCH_DIR}/iid2 -f Dockerfile.7 $contextdir
+
+ if [[ $(cat ${TEST_SCRATCH_DIR}/iid1) != $(cat ${TEST_SCRATCH_DIR}/iid2) ]]; then
+ echo "Expected image id to not change after touching a file copied into the image" >&2
+ false
+ fi
+}
+
+@test "buildah-bud-policy" {
+ target=foo
+
+ # A deny-all policy should prevent us from pulling the base image.
+ run_buildah 125 build --signature-policy ${TEST_SOURCES}/deny.json -t ${target} -v ${TEST_SOURCES}:/testdir $BUDFILES/mount
+ expect_output --substring 'Source image rejected: Running image .* rejected by policy.'
+
+ # A docker-only policy should allow us to pull the base image and commit.
+ run_buildah build --signature-policy ${TEST_SOURCES}/docker.json -t ${target} -v ${TEST_SOURCES}:/testdir $BUDFILES/mount
+ # A deny-all policy shouldn't break pushing, since policy is only evaluated
+ # on the source image, and we force it to allow local storage.
+ run_buildah push --signature-policy ${TEST_SOURCES}/deny.json ${target} dir:${TEST_SCRATCH_DIR}/mount
+ run_buildah rmi ${target}
+
+ # A docker-only policy should allow us to pull the base image first...
+ run_buildah pull --signature-policy ${TEST_SOURCES}/docker.json alpine
+ # ... and since we don't need to pull the base image, a deny-all policy shouldn't break a build.
+ run_buildah build --signature-policy ${TEST_SOURCES}/deny.json -t ${target} -v ${TEST_SOURCES}:/testdir $BUDFILES/mount
+ # A deny-all policy shouldn't break pushing, since policy is only evaluated
+ # on the source image, and we force it to allow local storage.
+ run_buildah push --signature-policy ${TEST_SOURCES}/deny.json ${target} dir:${TEST_SCRATCH_DIR}/mount
+ # Similarly, a deny-all policy shouldn't break committing directly to other locations.
+ run_buildah build --signature-policy ${TEST_SOURCES}/deny.json -t dir:${TEST_SCRATCH_DIR}/mount -v ${TEST_SOURCES}:/testdir $BUDFILES/mount
+}
+
+@test "bud-copy-replace-symlink" {
+ local contextdir=${TEST_SCRATCH_DIR}/top
+ mkdir -p $contextdir
+ cp $BUDFILES/symlink/Dockerfile.replace-symlink $contextdir/
+ ln -s Dockerfile.replace-symlink $contextdir/symlink
+ echo foo > $contextdir/.dockerignore
+ run_buildah build $WITH_POLICY_JSON -f $contextdir/Dockerfile.replace-symlink $contextdir
+}
+
+@test "bud-copy-recurse" {
+ local contextdir=${TEST_SCRATCH_DIR}/recurse
+ mkdir -p $contextdir
+ cp $BUDFILES/recurse/Dockerfile $contextdir
+ echo foo > $contextdir/.dockerignore
+ run_buildah build $WITH_POLICY_JSON $contextdir
+}
+
+@test "bud copy with .dockerignore #1" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir/stuff/huge/usr/bin/
+ touch $mytmpdir/stuff/huge/usr/bin/{file1,file2}
+ touch $mytmpdir/stuff/huge/usr/file3
+
+ cat > $mytmpdir/.dockerignore << _EOF
+stuff/huge/*
+!stuff/huge/usr/bin/*
+_EOF
+
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+COPY stuff /tmp/stuff
+RUN find /tmp/stuff -type f
+_EOF
+
+ run_buildah build -t testbud $WITH_POLICY_JSON ${mytmpdir}
+ expect_output --substring "file1"
+ expect_output --substring "file2"
+ assert "$output" !~ "file3"
+}
+
+@test "bud copy with .dockerignore #2" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p $mytmpdir/stuff/huge/usr/bin/
+ touch $mytmpdir/stuff/huge/usr/bin/{file1,file2}
+
+ cat > $mytmpdir/.dockerignore << _EOF
+stuff/huge/*
+_EOF
+
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+COPY stuff /tmp/stuff
+RUN find /tmp/stuff -type f
+_EOF
+
+ run_buildah build -t testbud $WITH_POLICY_JSON ${mytmpdir}
+ assert "$output" !~ file1
+ assert "$output" !~ file2
+}
+
+@test "bud-copy-workdir" {
+ target=testimage
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/copy-workdir
+ run_buildah from ${target}
+ cid="$output"
+ run_buildah mount "${cid}"
+ root="$output"
+ test -s "${root}"/file1.txt
+ test -d "${root}"/subdir
+ test -s "${root}"/subdir/file2.txt
+}
+
+# regression test for https://github.com/containers/podman/issues/10671
+@test "bud-copy-workdir --layers" {
+ _prefetch alpine
+
+ target=testimage
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile.2 $BUDFILES/copy-workdir
+ run_buildah from ${target}
+ cid="$output"
+ run_buildah mount "${cid}"
+ root="$output"
+ test -d "${root}"/subdir
+ test -s "${root}"/subdir/file1.txt
+}
+
+@test "bud-build-arg-cache" {
+ _prefetch busybox alpine
+ target=derived-image
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ targetid="$output"
+
+ # With build args, we should not find the previous build as a cached result. This will be true because there is a RUN command after all the ARG
+ # commands in the containerfile, so this does not truly test if the ARG commands were using cache or not. There is a test for that case below.
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 --build-arg=UID=17122 --build-arg=CODE=/copr/coprs_frontend --build-arg=USERNAME=praiskup --build-arg=PGDATA=/pgdata $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ argsid="$output"
+ assert "$argsid" != "$initialid" \
+ ".FromImageID of test-img-2 ($argsid) == same as test-img, it should be different"
+
+ # With build args, even in a different order, we should end up using the previous build as a cached result.
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 --build-arg=UID=17122 --build-arg=CODE=/copr/coprs_frontend --build-arg=USERNAME=praiskup --build-arg=PGDATA=/pgdata $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ expect_output "$argsid" "FromImageID of build 3"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 --build-arg=CODE=/copr/coprs_frontend --build-arg=USERNAME=praiskup --build-arg=PGDATA=/pgdata --build-arg=UID=17122 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ expect_output "$argsid" "FromImageID of build 4"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 --build-arg=USERNAME=praiskup --build-arg=PGDATA=/pgdata --build-arg=UID=17122 --build-arg=CODE=/copr/coprs_frontend $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ expect_output "$argsid" "FromImageID of build 5"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} -f Dockerfile3 --build-arg=PGDATA=/pgdata --build-arg=UID=17122 --build-arg=CODE=/copr/coprs_frontend --build-arg=USERNAME=praiskup $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' ${target}
+ expect_output "$argsid" "FromImageID of build 6"
+
+ # If build-arg is specified via the command line and is different from the previous cached build, it should not use the cached layers.
+ # Note, this containerfile does not have any RUN commands and we verify that the ARG steps are being rebuilt when a change is detected.
+ run_buildah build $WITH_POLICY_JSON --layers -t test-img -f Dockerfile4 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' test-img
+ initialid="$output"
+
+ # Build the same containerfile again and verify that the cached layers were used
+ run_buildah build $WITH_POLICY_JSON --layers -t test-img-1 -f Dockerfile4 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' test-img-1
+ expect_output "$initialid" "FromImageID of test-img-1 should match test-img"
+
+ # Set the build-arg flag and verify that the cached layers are not used
+ run_buildah build $WITH_POLICY_JSON --layers -t test-img-2 --build-arg TEST=foo -f Dockerfile4 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' test-img-2
+ argsid="$output"
+ assert "$argsid" != "$initialid" \
+ ".FromImageID of test-img-2 ($argsid) == same as test-img, it should be different"
+
+ # Set the build-arg via an ENV in the local environment and verify that the cached layers are not used
+ export TEST=bar
+ run_buildah build $WITH_POLICY_JSON --layers -t test-img-3 --build-arg TEST -f Dockerfile4 $BUDFILES/build-arg
+ run_buildah inspect -f '{{.FromImageID}}' test-img-3
+ argsid="$output"
+ assert "$argsid" != "$initialid" \
+ ".FromImageID of test-img-3 ($argsid) == same as test-img, it should be different"
+}
+
+@test "bud test RUN with a privileged command" {
+ _prefetch alpine
+ target=alpinepriv
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/run-privd/Dockerfile $BUDFILES/run-privd
+ expect_output --substring "[^:][^[:graph:]]COMMIT ${target}"
+ run_buildah images -q
+ expect_line_count 2
+}
+
+@test "bud-copy-dockerignore-hardlinks" {
+ target=image
+ local contextdir=${TEST_SCRATCH_DIR}/hardlinks
+ mkdir -p $contextdir/subdir
+ cp $BUDFILES/recurse/Dockerfile $contextdir
+ echo foo > $contextdir/.dockerignore
+ echo test1 > $contextdir/subdir/test1.txt
+ ln $contextdir/subdir/test1.txt $contextdir/subdir/test2.txt
+ ln $contextdir/subdir/test2.txt $contextdir/test3.txt
+ ln $contextdir/test3.txt $contextdir/test4.txt
+ run_buildah build $WITH_POLICY_JSON -t ${target} $contextdir
+ run_buildah from ${target}
+ ctrid="$output"
+ run_buildah mount "$ctrid"
+ root="$output"
+
+ run stat -c "%d:%i" ${root}/subdir/test1.txt
+ id1=$output
+ run stat -c "%h" ${root}/subdir/test1.txt
+ expect_output 4 "test1: number of hardlinks"
+ run stat -c "%d:%i" ${root}/subdir/test2.txt
+ expect_output $id1 "stat(test2) == stat(test1)"
+ run stat -c "%h" ${root}/subdir/test2.txt
+ expect_output 4 "test2: number of hardlinks"
+ run stat -c "%d:%i" ${root}/test3.txt
+ expect_output $id1 "stat(test3) == stat(test1)"
+ run stat -c "%h" ${root}/test3.txt
+ expect_output 4 "test3: number of hardlinks"
+ run stat -c "%d:%i" ${root}/test4.txt
+ expect_output $id1 "stat(test4) == stat(test1)"
+ run stat -c "%h" ${root}/test4.txt
+ expect_output 4 "test4: number of hardlinks"
+}
+
+@test "bud without any arguments should succeed" {
+ cd $BUDFILES/from-scratch
+ run_buildah build --signature-policy ${TEST_SOURCES}/policy.json
+}
+
+@test "bud without any arguments should fail when no Dockerfile exists" {
+ cd $TEST_SCRATCH_DIR
+ run_buildah 125 build --signature-policy ${TEST_SOURCES}/policy.json
+ expect_output --substring "no such file or directory"
+}
+
+@test "bud with specified context should fail if directory contains no Dockerfile" {
+ run_buildah 125 build $WITH_POLICY_JSON "$TEST_SCRATCH_DIR"
+ expect_output --substring "no such file or directory"
+}
+
+@test "bud with specified context should fail if assumed Dockerfile is a directory" {
+ mkdir -p "$TEST_SCRATCH_DIR"/Dockerfile
+ run_buildah 125 build $WITH_POLICY_JSON "$TEST_SCRATCH_DIR"
+ expect_output --substring "is not a file"
+}
+
+@test "bud with specified context should fail if context contains not-existing Dockerfile" {
+ run_buildah 125 build $WITH_POLICY_JSON "$TEST_SCRATCH_DIR"/Dockerfile
+ expect_output --substring "no such file or directory"
+}
+
+@test "bud with specified context should succeed if context contains existing Dockerfile" {
+ echo "FROM alpine" > $TEST_SCRATCH_DIR/Dockerfile
+ run_buildah bud $WITH_POLICY_JSON $TEST_SCRATCH_DIR/Dockerfile
+}
+
+@test "bud with specified context should fail if context contains empty Dockerfile" {
+ touch $TEST_SCRATCH_DIR/Dockerfile
+ run_buildah 125 build $WITH_POLICY_JSON $TEST_SCRATCH_DIR/Dockerfile
+ expect_output --substring "no contents in \"$TEST_SCRATCH_DIR/Dockerfile\""
+}
+
+@test "bud-no-change" {
+ _prefetch alpine
+ parent=alpine
+ target=no-change-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/no-change
+ run_buildah inspect --format '{{printf "%q" .FromImageDigest}}' ${parent}
+ parentid="$output"
+ run_buildah inspect --format '{{printf "%q" .FromImageDigest}}' ${target}
+ expect_output "$parentid"
+}
+
+@test "bud-no-change-label" {
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+ want_output='map["io.buildah.version":"'$buildah_version'" "test":"label"]'
+
+ _prefetch alpine
+ parent=alpine
+ target=no-change-image
+ run_buildah build --label "test=label" $WITH_POLICY_JSON -t ${target} $BUDFILES/no-change
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' ${target}
+ expect_output "$want_output"
+}
+
+@test "bud-no-change-annotation" {
+ _prefetch alpine
+ target=no-change-image
+ run_buildah build --annotation "test=annotation" $WITH_POLICY_JSON -t ${target} $BUDFILES/no-change
+ run_buildah inspect --format '{{index .ImageAnnotations "test"}}' ${target}
+ expect_output "annotation"
+}
+
+@test "bud-squash-layers" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON --squash $BUDFILES/layers-squash
+}
+
+@test "bud-squash-hardlinks" {
+ _prefetch busybox
+ run_buildah build $WITH_POLICY_JSON --squash $BUDFILES/layers-squash/Dockerfile.hardlinks
+}
+
+# Following test must pass for both rootless and rootfull
+@test "rootless: support --device and renaming device using bind-mount" {
+ skip_if_in_container # unable to perform mount of /dev/null for test in CI container setup
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile << _EOF
+FROM alpine
+RUN ls /test/dev
+_EOF
+ run_buildah build $WITH_POLICY_JSON --device /dev/null:/test/dev/null -t test -f $contextdir/Dockerfile
+ expect_output --substring "null"
+}
+
+@test "bud with additional directory of devices" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless
+
+ _prefetch alpine
+ target=alpine-image
+ local contextdir=${TEST_SCRATCH_DIR}/foo
+ mkdir -p $contextdir
+ mknod $contextdir/null c 1 3
+ run_buildah build $WITH_POLICY_JSON --device $contextdir:/dev/fuse -t ${target} -f $BUDFILES/device/Dockerfile $BUDFILES/device
+ expect_output --substring "null"
+}
+
+@test "bud with additional device" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON --device /dev/fuse -t ${target} -f $BUDFILES/device/Dockerfile $BUDFILES/device
+ expect_output --substring "/dev/fuse"
+}
+
+@test "bud with Containerfile" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud with Containerfile.in, --cpp-flag" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/containerfile/Containerfile.in $BUDFILES/containerfile
+ expect_output --substring "Ignoring In file included .* invalid preprocessing directive #This"
+ expect_output --substring "FROM alpine"
+ expect_output --substring "success"
+ expect_output --substring "debug=no" "with no cpp-flag or BUILDAH_CPPFLAGS"
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} --cpp-flag "-DTESTCPPDEBUG" -f $BUDFILES/containerfile/Containerfile.in $BUDFILES/containerfile
+ expect_output --substring "Ignoring In file included .* invalid preprocessing directive #This"
+ expect_output --substring "FROM alpine"
+ expect_output --substring "success"
+ expect_output --substring "debug=yes" "with --cpp-flag -DTESTCPPDEBUG"
+}
+
+@test "bud with Containerfile.in, via envariable" {
+ _prefetch alpine
+ target=alpine-image
+
+ BUILDAH_CPPFLAGS="-DTESTCPPDEBUG" run_buildah build $WITH_POLICY_JSON -t ${target} -f $BUDFILES/containerfile/Containerfile.in $BUDFILES/containerfile
+ expect_output --substring "Ignoring In file included .* invalid preprocessing directive #This"
+ expect_output --substring "FROM alpine"
+ expect_output --substring "success"
+ expect_output --substring "debug=yes" "with BUILDAH_CPPFLAGS=-DTESTCPPDEBUG"
+}
+
+@test "bud with Dockerfile" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/dockerfile
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud with Containerfile and Dockerfile" {
+ _prefetch alpine
+ target=alpine-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/containeranddockerfile
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud-http-context-with-Containerfile" {
+ _test_http http-context-containerfile context.tar
+}
+
+@test "bud with Dockerfile from stdin" {
+ _prefetch alpine
+ target=df-stdin
+ run_buildah build $WITH_POLICY_JSON -t ${target} - < $BUDFILES/context-from-stdin/Dockerfile
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+
+ test -s $root/scratchfile
+ run cat $root/scratchfile
+ expect_output "stdin-context" "contents of \$root/scratchfile"
+
+ # FROM scratch overrides FROM alpine
+ test ! -s $root/etc/alpine-release
+}
+
+@test "bud with Dockerfile from stdin tar" {
+ _prefetch alpine
+ target=df-stdin
+ # 'cmd1 < <(cmd2)' == 'cmd2 | cmd1' but runs cmd1 in this shell, not sub.
+ run_buildah build $WITH_POLICY_JSON -t ${target} - < <(tar -c -C $BUDFILES/context-from-stdin .)
+ run_buildah from --quiet ${target}
+ cid=$output
+ run_buildah mount ${cid}
+ root=$output
+
+ test -s $root/scratchfile
+ run cat $root/scratchfile
+ expect_output "stdin-context" "contents of \$root/scratchfile"
+
+ # FROM scratch overrides FROM alpine
+ test ! -s $root/etc/alpine-release
+}
+
+@test "bud containerfile with args" {
+ _prefetch alpine
+ target=use-args
+ touch $BUDFILES/use-args/abc.txt
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg=abc.txt $BUDFILES/use-args
+ expect_output --substring "COMMIT use-args"
+ run_buildah from --quiet ${target}
+ ctrID=$output
+ run_buildah run $ctrID ls abc.txt
+ expect_output --substring "abc.txt"
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Containerfile.destination --build-arg=testArg=abc.txt --build-arg=destination=/tmp $BUDFILES/use-args
+ expect_output --substring "COMMIT use-args"
+ run_buildah from --quiet ${target}
+ ctrID=$output
+ run_buildah run $ctrID ls /tmp/abc.txt
+ expect_output --substring "abc.txt"
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} -f Containerfile.dest_nobrace --build-arg=testArg=abc.txt --build-arg=destination=/tmp $BUDFILES/use-args
+ expect_output --substring "COMMIT use-args"
+ run_buildah from --quiet ${target}
+ ctrID=$output
+ run_buildah run $ctrID ls /tmp/abc.txt
+ expect_output --substring "abc.txt"
+
+ rm $BUDFILES/use-args/abc.txt
+}
+
+@test "bud using gitrepo and branch" {
+ if ! start_git_daemon ${TEST_SOURCES}/git-daemon/release-1.11-rhel.tar.gz ; then
+ skip "error running git daemon"
+ fi
+ run_buildah build $WITH_POLICY_JSON --layers -t gittarget -f $BUDFILES/shell/Dockerfile git://localhost:${GITPORT}/repo#release-1.11-rhel
+}
+
+@test "bud using gitrepo with .git and branch" {
+ run_buildah build $WITH_POLICY_JSON --layers -t gittarget -f $BUDFILES/shell/Dockerfile https://github.com/containers/buildah.git#release-1.11-rhel
+}
+
+# Fixes #1906: buildah was not detecting changed tarfile
+@test "bud containerfile with tar archive in copy" {
+ _prefetch busybox
+ # First check to verify cache is used if the tar file does not change
+ target=copy-archive
+ date > $BUDFILES/${target}/test
+ tar -C $TEST_SOURCES -cJf $BUDFILES/${target}/test.tar.xz bud/${target}/test
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} $BUDFILES/${target}
+ expect_output --substring "COMMIT copy-archive"
+
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} $BUDFILES/${target}
+ expect_output --substring " Using cache"
+ expect_output --substring "COMMIT copy-archive"
+
+ # Now test that we do NOT use cache if the tar file changes
+ echo This is a change >> $BUDFILES/${target}/test
+ tar -C $TEST_SOURCES -cJf $BUDFILES/${target}/test.tar.xz bud/${target}/test
+ run_buildah build $WITH_POLICY_JSON --layers -t ${target} $BUDFILES/${target}
+ if [[ "$output" =~ " Using cache" ]]; then
+ expect_output "[no instance of 'Using cache']" "no cache used"
+ fi
+ expect_output --substring "COMMIT copy-archive"
+
+ rm -f $BUDFILES/${target}/test*
+}
+
+@test "bud pull never" {
+ target=pull
+ run_buildah 125 build $WITH_POLICY_JSON -t ${target} --pull-never $BUDFILES/pull
+ expect_output --substring "busybox: image not known"
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} --pull $BUDFILES/pull
+ expect_output --substring "COMMIT pull"
+
+ run_buildah build $WITH_POLICY_JSON -t ${target} --pull=never $BUDFILES/pull
+ expect_output --substring "COMMIT pull"
+}
+
+@test "bud pull false no local image" {
+ target=pull
+ run_buildah build $WITH_POLICY_JSON -t ${target} --pull=false $BUDFILES/pull
+ expect_output --substring "COMMIT pull"
+}
+
+@test "bud with Containerfile should fail with nonexistent authfile" {
+ target=alpine-image
+ run_buildah 125 build --authfile /tmp/nonexistent $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile
+ expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory"
+}
+
+
+@test "bud for multi-stage Containerfile with invalid registry and --authfile as a fd, should fail with no such host" {
+ target=alpine-multi-stage-image
+ run_buildah 125 build --authfile=<(echo "{ \"auths\": { \"myrepository.example\": { \"auth\": \"$(echo 'username:password' | base64 --wrap=0)\" } } }") -t ${target} --file $BUDFILES/from-invalid-registry/Containerfile
+ # Should fail with `no such host` instead of: error reading JSON file "/dev/fd/x"
+ expect_output --substring "no such host"
+}
+
+@test "bud COPY with URL should fail" {
+ local contextdir=${TEST_SCRATCH_DIR}/budcopy
+ mkdir $contextdir
+ FILE=$contextdir/Dockerfile.url
+ /bin/cat <<EOM >$FILE
+FROM alpine:latest
+COPY https://getfedora.org/index.html .
+EOM
+
+ run_buildah 125 build $WITH_POLICY_JSON -t foo -f $contextdir/Dockerfile.url
+ expect_output --substring "building .* source can.t be a URL for COPY"
+}
+
+@test "bud quiet" {
+ _prefetch alpine
+ run_buildah build --format docker -t quiet-test $WITH_POLICY_JSON -q $BUDFILES/shell
+ expect_line_count 1
+ expect_output --substring '^[0-9a-f]{64}$'
+}
+
+@test "bud COPY with Env Var in Containerfile" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -t testctr $BUDFILES/copy-envvar
+ run_buildah from testctr
+ run_buildah run testctr-working-container ls /file-0.0.1.txt
+ run_buildah rm -a
+
+ run_buildah build $WITH_POLICY_JSON --layers -t testctr $BUDFILES/copy-envvar
+ run_buildah from testctr
+ run_buildah run testctr-working-container ls /file-0.0.1.txt
+ run_buildah rm -a
+}
+
+@test "bud with custom arch" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t arch-test \
+ --arch=arm
+
+ run_buildah inspect --format "{{ .Docker.Architecture }}" arch-test
+ expect_output arm
+
+ run_buildah inspect --format "{{ .OCIv1.Architecture }}" arch-test
+ expect_output arm
+}
+
+@test "bud with custom os" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t os-test \
+ --os=windows
+
+ run_buildah inspect --format "{{ .Docker.OS }}" os-test
+ expect_output windows
+
+ run_buildah inspect --format "{{ .OCIv1.OS }}" os-test
+ expect_output windows
+}
+
+@test "bud with custom os-version" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t os-version-test \
+ --os-version=1.0
+
+ run_buildah inspect --format "{{ .Docker.OSVersion }}" os-version-test
+ expect_output 1.0
+
+ run_buildah inspect --format "{{ .OCIv1.OSVersion }}" os-version-test
+ expect_output 1.0
+}
+
+@test "bud with custom os-features" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t os-features-test \
+ --os-feature removed --os-feature removed- --os-feature win32k
+
+ run_buildah inspect --format "{{ .Docker.OSFeatures }}" os-features-test
+ expect_output '[win32k]'
+
+ run_buildah inspect --format "{{ .OCIv1.OSFeatures }}" os-features-test
+ expect_output '[win32k]'
+}
+
+@test "bud with custom platform" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t platform-test \
+ --platform=windows/arm
+
+ run_buildah inspect --format "{{ .Docker.OS }}" platform-test
+ expect_output windows
+
+ run_buildah inspect --format "{{ .OCIv1.OS }}" platform-test
+ expect_output windows
+
+ run_buildah inspect --format "{{ .Docker.Architecture }}" platform-test
+ expect_output arm
+
+ run_buildah inspect --format "{{ .OCIv1.Architecture }}" platform-test
+ expect_output arm
+}
+
+@test "bud with custom platform and empty os or arch" {
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t platform-test \
+ --platform=windows/
+
+ run_buildah inspect --format "{{ .Docker.OS }}" platform-test
+ expect_output windows
+
+ run_buildah inspect --format "{{ .OCIv1.OS }}" platform-test
+ expect_output windows
+
+ run_buildah build $WITH_POLICY_JSON \
+ -f $BUDFILES/from-scratch/Containerfile \
+ -t platform-test2 \
+ --platform=/arm
+
+ run_buildah inspect --format "{{ .Docker.Architecture }}" platform-test2
+ expect_output arm
+
+ run_buildah inspect --format "{{ .OCIv1.Architecture }}" platform-test2
+ expect_output arm
+}
+
+@test "bud Add with linked tarball" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/symlink/Containerfile.add-tar-with-link -t testctr $BUDFILES/symlink
+ run_buildah from testctr
+ run_buildah run testctr-working-container ls /tmp/testdir/testfile.txt
+ run_buildah rm -a
+ run_buildah rmi -a -f
+
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON -f $BUDFILES/symlink/Containerfile.add-tar-gz-with-link -t testctr $BUDFILES/symlink
+ run_buildah from testctr
+ run_buildah run testctr-working-container ls /tmp/testdir/testfile.txt
+ run_buildah rm -a
+ run_buildah rmi -a -f
+}
+
+@test "bud file above context directory" {
+ run_buildah 125 build $WITH_POLICY_JSON -t testctr $BUDFILES/context-escape-dir/testdir
+ expect_output --substring "escaping context directory error"
+}
+
+@test "bud-multi-stage-args-scope" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON --layers -t multi-stage-args --build-arg SECRET=secretthings -f Dockerfile.arg $BUDFILES/multi-stage-builds
+ run_buildah from --name test-container multi-stage-args
+ run_buildah run test-container -- cat test_file
+ expect_output ""
+}
+
+@test "bud-multi-stage-args-history" {
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON --layers -t multi-stage-args --build-arg SECRET=secretthings -f Dockerfile.arg $BUDFILES/multi-stage-builds
+ run_buildah inspect --format '{{range .History}}{{println .CreatedBy}}{{end}}' multi-stage-args
+ run grep "secretthings" <<< "$output"
+ expect_output ""
+
+ run_buildah inspect --format '{{range .OCIv1.History}}{{println .CreatedBy}}{{end}}' multi-stage-args
+ run grep "secretthings" <<< "$output"
+ expect_output ""
+
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' multi-stage-args
+ run grep "secretthings" <<< "$output"
+ expect_output ""
+}
+
+@test "bud with encrypted FROM image" {
+ _prefetch busybox
+ local contextdir=${TEST_SCRATCH_DIR}/tmp
+ mkdir $contextdir
+ openssl genrsa -out $contextdir/mykey.pem 1024
+ openssl genrsa -out $contextdir/mykey2.pem 1024
+ openssl rsa -in $contextdir/mykey.pem -pubout > $contextdir/mykey.pub
+ start_registry
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --encryption-key jwe:$contextdir/mykey.pub busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ target=busybox-image
+ echo FROM localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest > $contextdir/Dockerfile
+
+ # Try to build from encrypted image without key
+ run_buildah 1 build $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword -t ${target} -f $contextdir/Dockerfile
+ assert "$output" =~ "archive/tar: invalid tar header"
+
+ # Try to build from encrypted image with wrong key
+ run_buildah 125 build $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key $contextdir/mykey2.pem -t ${target} -f $contextdir/Dockerfile
+ assert "$output" =~ "no suitable key found for decrypting layer key"
+ assert "$output" =~ "- JWE: No suitable private key found for decryption"
+
+ # Try to build with the correct key
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key $contextdir/mykey.pem -t ${target} -f $contextdir/Dockerfile
+ assert "$output" =~ "Successfully tagged localhost:$REGISTRY_PORT/"
+
+ rm -rf $contextdir
+}
+
+@test "bud with --build-arg" {
+ _prefetch alpine busybox
+ target=busybox-image
+
+ # Envariable not present at all
+ run_buildah --log-level "warn" bud $WITH_POLICY_JSON -t ${target} $BUDFILES/build-arg
+ expect_output --substring 'missing \\"foo\\" build argument. Try adding'
+
+ # Envariable explicitly set on command line
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg foo=bar $BUDFILES/build-arg
+ assert "${lines[3]}" = "bar"
+
+ # Envariable from environment
+ export foo=$(random_string 20)
+ run_buildah build $WITH_POLICY_JSON -t ${target} --build-arg foo $BUDFILES/build-arg
+ assert "${lines[3]}" = "$foo"
+}
+
+@test "bud arg and env var with same name" {
+ # Regression test for https://github.com/containers/buildah/issues/2345
+ run_buildah build $WITH_POLICY_JSON -t testctr $BUDFILES/dupe-arg-env-name
+ expect_output --substring "https://example.org/bar"
+}
+
+@test "bud copy chown with newuser" {
+ # Regression test for https://github.com/containers/buildah/issues/2192
+ run_buildah build $WITH_POLICY_JSON -t testctr -f $BUDFILES/copy-chown/Containerfile.chown_user $BUDFILES/copy-chown
+ expect_output --substring "myuser myuser"
+}
+
+@test "bud-builder-identity" {
+ _prefetch alpine
+ parent=alpine
+ target=no-change-image
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "io.buildah.version"}}' $target
+ expect_output "$buildah_version"
+}
+
+@test "run check --from with arg" {
+ skip_if_no_runtime
+
+ ${OCI} --version
+ _prefetch alpine
+ _prefetch debian
+
+ run_buildah build --build-arg base=alpine --build-arg toolchainname=busybox --build-arg destinationpath=/tmp --pull=false $WITH_POLICY_JSON -f $BUDFILES/from-with-arg/Containerfile .
+ expect_output --substring "FROM alpine"
+ expect_output --substring 'STEP 4/4: COPY --from=\$\{toolchainname\} \/ \$\{destinationpath\}'
+ run_buildah rm -a
+}
+
+@test "bud preserve rootfs for --mount=type=bind,from=" {
+ _prefetch alpine
+ run_buildah build --build-arg NONCE="$(date)" --layers --pull=false $WITH_POLICY_JSON -f Dockerfile.3 $BUDFILES/cache-stages
+ expect_output --substring "Worked"
+}
+
+@test "bud timestamp" {
+ _prefetch alpine
+ timestamp=40
+ run_buildah build --timestamp=${timestamp} --quiet --pull=false $WITH_POLICY_JSON -t timestamp -f Dockerfile.1 $BUDFILES/cache-stages
+ cid=$output
+ run_buildah inspect --format '{{ .Docker.Created }}' timestamp
+ expect_output --substring "1970-01-01"
+ run_buildah inspect --format '{{ .OCIv1.Created }}' timestamp
+ expect_output --substring "1970-01-01"
+ run_buildah inspect --format '{{ .History }}' timestamp
+ expect_output --substring "1970-01-01 00:00:${timestamp}"
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON timestamp
+ cid=$output
+ run_buildah run $cid ls -l /tmpfile
+ expect_output --substring "1970"
+
+ run_buildah images --format "{{.Created}}" timestamp
+ expect_output ${timestamp}
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "bud timestamp compare" {
+ _prefetch alpine
+ TIMESTAMP=$(date '+%s')
+ run_buildah build --timestamp=${TIMESTAMP} --quiet --pull=false $WITH_POLICY_JSON -t timestamp -f Dockerfile.1 $BUDFILES/cache-stages
+ cid=$output
+
+ run_buildah images --format "{{.Created}}" timestamp
+ expect_output ${timestamp}
+
+ run_buildah build --timestamp=${TIMESTAMP} --quiet --pull=false $WITH_POLICY_JSON -t timestamp -f Dockerfile.1 $BUDFILES/cache-stages
+ expect_output "$cid"
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "bud with-rusage" {
+ _prefetch alpine
+ run_buildah build --log-rusage --layers --pull=false --format docker $WITH_POLICY_JSON $BUDFILES/shell
+ cid=$output
+ # expect something that looks like it was formatted using pkg/rusage.FormatDiff()
+ expect_output --substring ".*\(system\).*\(user\).*\(elapsed\).*input.*output"
+}
+
+@test "bud with-rusage-logfile" {
+ _prefetch alpine
+ run_buildah build --log-rusage --rusage-logfile ${TEST_SCRATCH_DIR}/foo.log --layers --pull=false --format docker $WITH_POLICY_JSON $BUDFILES/shell
+ # the logfile should exist
+ if [ ! -e ${TEST_SCRATCH_DIR}/foo.log ]; then die "rusage-logfile foo.log did not get created!"; fi
+ # expect that foo.log only contains lines that were formatted using pkg/rusage.FormatDiff()
+ formatted_lines=$(grep ".*\(system\).*\(user\).*\(elapsed\).*input.*output" ${TEST_SCRATCH_DIR}/foo.log | wc -l)
+ line_count=$(wc -l <${TEST_SCRATCH_DIR}/foo.log)
+ if [[ "$formatted_lines" -ne "$line_count" ]]; then
+ die "Got ${formatted_lines} lines formatted with pkg/rusage.FormatDiff() but rusage-logfile has ${line_count} lines"
+ fi
+}
+
+@test "bud-caching-from-scratch" {
+ _prefetch alpine
+ # run the build once
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON $BUDFILES/cache-scratch
+ iid="$output"
+
+ # now run it again - the cache should give us the same final image ID
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON $BUDFILES/cache-scratch
+ assert "$output" = "$iid"
+
+ # now run it *again*, except with more content added at an intermediate step, which should invalidate the cache
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.different1 $BUDFILES/cache-scratch
+ assert "$output" !~ "$iid"
+
+ # now run it *again* again, except with more content added at an intermediate step, which should invalidate the cache
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.different2 $BUDFILES/cache-scratch
+ assert "$output" !~ "$iid"
+}
+
+@test "bud-caching-from-scratch-config" {
+ _prefetch alpine
+ # run the build once
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.config $BUDFILES/cache-scratch
+ iid="$output"
+
+ # now run it again - the cache should give us the same final image ID
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.config $BUDFILES/cache-scratch
+ assert "$output" = "$iid"
+
+ # now run it *again*, except with more content added at an intermediate step, which should invalidate the cache
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.different1 $BUDFILES/cache-scratch
+ assert "$output" !~ "$iid"
+
+ # now run it *again* again, except with more content added at an intermediate step, which should invalidate the cache
+ run_buildah build --quiet --layers --pull=false --format docker $WITH_POLICY_JSON -f Dockerfile.different2 $BUDFILES/cache-scratch
+ assert "$output" !~ "$iid"
+}
+
+@test "bud capabilities test" {
+ _prefetch busybox
+ # something not enabled by default in containers.conf
+ run_buildah build --cap-add cap_sys_ptrace -t testcap $WITH_POLICY_JSON -f $BUDFILES/capabilities/Dockerfile
+ expect_output --substring "uid=3267"
+ expect_output --substring "CapBnd: 00000000a80c25fb"
+ expect_output --substring "CapEff: 0000000000000000"
+
+ # some things enabled by default in containers.conf
+ run_buildah build --cap-drop cap_chown,cap_dac_override,cap_fowner -t testcapd $WITH_POLICY_JSON -f $BUDFILES/capabilities/Dockerfile
+ expect_output --substring "uid=3267"
+ expect_output --substring "CapBnd: 00000000a80425f0"
+ expect_output --substring "CapEff: 0000000000000000"
+}
+
+@test "bud does not gobble stdin" {
+ _prefetch alpine
+
+ ctxdir=${TEST_SCRATCH_DIR}/bud
+ mkdir -p $ctxdir
+ cat >$ctxdir/Dockerfile <<EOF
+FROM alpine
+RUN true
+EOF
+
+ random_msg=$(head -10 /dev/urandom | tr -dc a-zA-Z0-9 | head -c12)
+
+ # Prior to #2708, buildah bud would gobble up its stdin even if it
+ # didn't actually use it. This prevented the use of 'cmdlist | bash';
+ # if 'buildah bud' was in cmdlist, everything past it would be lost.
+ #
+ # This is ugly but effective: it checks that buildah passes stdin untouched.
+ passthru=$(echo "$random_msg" | (run_buildah build --quiet $WITH_POLICY_JSON -t stdin-test ${ctxdir} >/dev/null; cat))
+
+ expect_output --from="$passthru" "$random_msg" "stdin was passed through"
+}
+
+@test "bud cache by format" {
+ # Build first in Docker format. Whether we do OCI or Docker first shouldn't matter, so we picked one.
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/first-docker --format docker --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format
+
+ # Build in OCI format. Cache should not re-use the same images, so we should get a different image ID.
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/first-oci --format oci --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format
+
+ # Build in Docker format again. Cache traversal should 100% hit the Docker image, so we should get its image ID.
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/second-docker --format docker --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format
+
+ # Build in OCI format again. Cache traversal should 100% hit the OCI image, so we should get its image ID.
+ run_buildah build --iidfile ${TEST_SCRATCH_DIR}/second-oci --format oci --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format
+
+ # Compare them. The two images we built in Docker format should be the same, the two we built in OCI format
+ # should be the same, but the OCI and Docker format images should be different.
+ assert "$(< ${TEST_SCRATCH_DIR}/first-docker)" = "$(< ${TEST_SCRATCH_DIR}/second-docker)" \
+ "iidfile(first docker) == iidfile(second docker)"
+ assert "$(< ${TEST_SCRATCH_DIR}/first-oci)" = "$(< ${TEST_SCRATCH_DIR}/second-oci)" \
+ "iidfile(first oci) == iidfile(second oci)"
+
+ assert "$(< ${TEST_SCRATCH_DIR}/first-docker)" != "$(< ${TEST_SCRATCH_DIR}/first-oci)" \
+ "iidfile(first docker) != iidfile(first oci)"
+}
+
+@test "bud cache add-copy-chown" {
+ # Build each variation of COPY (from context, from previous stage) and ADD (from context, not overriding an archive, URL) twice.
+ # Each second build should produce an image with the same ID as the first build, because the cache matches, but they should
+ # otherwise all be different.
+ local actions="copy prev add tar url";
+ for i in 1 2 3; do
+ for action in $actions; do
+ # iidfiles are 1 2 3, but dockerfiles are only 1 2 then back to 1
+ iidfile=${TEST_SCRATCH_DIR}/${action}${i}
+ containerfile=Dockerfile.${action}$(((i-1) % 2 + 1))
+
+ run_buildah build --iidfile $iidfile --layers --quiet $WITH_POLICY_JSON -f $containerfile $BUDFILES/cache-chown
+ done
+ done
+
+ for action in $actions; do
+ # The third round of builds should match all of the first rounds by way
+ # of caching.
+ assert "$(< ${TEST_SCRATCH_DIR}/${action}1)" = "$(< ${TEST_SCRATCH_DIR}/${action}3)" \
+ "iidfile(${action}1) = iidfile(${action}3)"
+
+ # The second round of builds should not match the first rounds, since
+ # the different ownership makes the changes look different to the cache,
+ # except for cases where we extract an archive, where --chown is ignored.
+ local op="!="
+ if [[ $action = "tar" ]]; then
+ op="=";
+ fi
+ assert "$(< ${TEST_SCRATCH_DIR}/${action}1)" $op "$(< ${TEST_SCRATCH_DIR}/${action}2)" \
+ "iidfile(${action}1) $op iidfile(${action}2)"
+
+ # The first rounds of builds should all be different from each other,
+ # as a sanity thing.
+ for other in $actions; do
+ if [[ $other != $action ]]; then
+ assert "$(< ${TEST_SCRATCH_DIR}/${action}1)" != "$(< ${TEST_SCRATCH_DIR}/${other}1)" \
+ "iidfile(${action}1) != iidfile(${other}1)"
+ fi
+ done
+ done
+}
+
+@test "bud-terminal" {
+ run_buildah build $BUDFILES/terminal
+}
+
+@test "bud --ignorefile containerignore" {
+ _prefetch alpine busybox
+
+ CONTEXTDIR=${TEST_SCRATCH_DIR}/dockerignore
+ cp -r $BUDFILES/dockerignore ${CONTEXTDIR}
+ mv ${CONTEXTDIR}/.dockerignore ${TEST_SCRATCH_DIR}/containerignore
+
+ run_buildah build -t testbud $WITH_POLICY_JSON -f ${CONTEXTDIR}/Dockerfile.succeed --ignorefile ${TEST_SCRATCH_DIR}/containerignore ${CONTEXTDIR}
+
+ run_buildah from --name myctr testbud
+
+ run_buildah 1 run myctr ls -l test1.txt
+ expect_output --substring "ls: test1.txt: No such file or directory"
+
+ run_buildah run myctr ls -l test2.txt
+
+ run_buildah 1 run myctr ls -l sub1.txt
+ expect_output --substring "ls: sub1.txt: No such file or directory"
+
+ run_buildah 1 run myctr ls -l sub2.txt
+ expect_output --substring "ls: sub2.txt: No such file or directory"
+
+ run_buildah 1 run myctr ls -l subdir/
+ expect_output --substring "ls: subdir/: No such file or directory"
+}
+
+@test "bud with network options" {
+ _prefetch alpine
+ target=alpine-image
+
+ run_buildah build --network=none $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile
+ expect_output --substring "FROM alpine"
+
+ run_buildah build --network=private $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile
+ expect_output --substring "FROM alpine"
+
+ run_buildah build --network=container $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile
+ expect_output --substring "FROM alpine"
+}
+
+@test "bud-replace-from-in-containerfile" {
+ _prefetch alpine busybox
+ # override the first FROM (fedora) image in the Containerfile
+ # with alpine, leave the second (busybox) alone.
+ run_buildah build $WITH_POLICY_JSON --from=alpine $BUDFILES/build-with-from
+ expect_output --substring "\[1/2] STEP 1/1: FROM alpine AS builder"
+ expect_output --substring "\[2/2] STEP 1/2: FROM busybox"
+}
+
+@test "bud test no --stdin" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN read -t 1 x && echo test got \<\$x\>
+RUN touch /tmp/done
+_EOF
+
+ # fail without --stdin
+ run_buildah 1 bud -t testbud $WITH_POLICY_JSON ${mytmpdir} <<< input
+ expect_output --substring "building .*: exit status 1"
+
+ run_buildah build --stdin -t testbud $WITH_POLICY_JSON ${mytmpdir} <<< input
+ expect_output --substring "test got <input>"
+}
+
+@test "bud with --arch flag" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+#RUN arch
+_EOF
+
+ run_buildah build --arch=arm64 -t arch-test $WITH_POLICY_JSON ${mytmpdir} <<< input
+# expect_output --substring "aarch64"
+
+# run_buildah from --quiet --pull=false $WITH_POLICY_JSON arch-test
+# cid=$output
+# run_buildah run $cid arch
+# expect_output --substring "aarch64"
+}
+
+@test "bud with --manifest flag new manifest" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run echo hello
+_EOF
+
+ run_buildah build -q --manifest=testlist -t arch-test $WITH_POLICY_JSON ${mytmpdir} <<< input
+ cid=$output
+ run_buildah images
+ expect_output --substring testlist
+
+ run_buildah inspect --format '{{ .FromImageDigest }}' $cid
+ digest=$output
+
+ run_buildah manifest inspect testlist
+ expect_output --substring $digest
+}
+
+@test "bud with --manifest flag existing manifest" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run echo hello
+_EOF
+
+ run_buildah manifest create testlist
+
+ run_buildah build -q --manifest=testlist -t arch-test $WITH_POLICY_JSON ${mytmpdir} <<< input
+ cid=$output
+ run_buildah images
+ expect_output --substring testlist
+
+ run_buildah inspect --format '{{ .FromImageDigest }}' $cid
+ digest=$output
+
+ run_buildah manifest inspect testlist
+ expect_output --substring $digest
+}
+
+@test "bud test empty newdir" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+FROM alpine as galaxy
+
+RUN mkdir -p /usr/share/ansible/roles /usr/share/ansible/collections
+RUN echo "bar"
+RUN echo "foo" > /usr/share/ansible/collections/file.txt
+
+FROM galaxy
+
+RUN mkdir -p /usr/share/ansible/roles /usr/share/ansible/collections
+COPY --from=galaxy /usr/share/ansible/roles /usr/share/ansible/roles
+COPY --from=galaxy /usr/share/ansible/collections /usr/share/ansible/collections
+_EOF
+
+ run_buildah build --layers $WITH_POLICY_JSON -t testbud $mytmpdir
+ expect_output --substring "COPY --from=galaxy /usr/share/ansible/collections /usr/share/ansible/collections"
+}
+
+@test "bud retain intermediary image" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile.a << _EOF
+FROM alpine
+LABEL image=a
+RUN echo foo
+_EOF
+
+cat > $mytmpdir/Containerfile.b << _EOF
+FROM image-a
+FROM scratch
+_EOF
+
+ run_buildah build -f Containerfile.a -q --manifest=testlist -t image-a $WITH_POLICY_JSON ${mytmpdir} <<< input
+ cid=$output
+ run_buildah images -f "label=image=a"
+ expect_output --substring image-a
+
+ run_buildah build -f Containerfile.b -q --manifest=testlist -t image-b $WITH_POLICY_JSON ${mytmpdir} <<< input
+ cid=$output
+ run_buildah images
+ expect_output --substring image-a
+}
+
+@test "bud --pull=false --arch test" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+cat > $mytmpdir/Containerfile << _EOF
+FROM $SAFEIMAGE
+_EOF
+ run_buildah build --pull=false -q --arch=amd64 -t image-amd $WITH_POLICY_JSON ${mytmpdir}
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' image-amd
+ expect_output amd64
+
+ # Tag the image to localhost/safeimage to make sure that the image gets
+ # pulled since the local one does not match the requested architecture.
+ run_buildah tag image-amd localhost/${SAFEIMAGE_NAME}:${SAFEIMAGE_TAG}
+ run_buildah build --pull=false -q --arch=arm64 -t image-arm $WITH_POLICY_JSON ${mytmpdir}
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' image-arm
+ expect_output arm64
+
+ run_buildah inspect --format '{{ .FromImageID }}' image-arm
+ fromiid=$output
+
+ run_buildah inspect --format '{{ .OCIv1.Architecture }}' $fromiid
+ expect_output arm64
+}
+
+@test "bud --file with directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+_EOF
+
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON --file ${mytmpdir} .
+}
+
+@test "bud --authfile" {
+ _prefetch alpine
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth alpine docker://localhost:${REGISTRY_PORT}/buildah/alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+FROM localhost:${REGISTRY_PORT}/buildah/alpine
+RUN touch /test
+_EOF
+ run_buildah build -t myalpine --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/alpine
+ run_buildah rmi myalpine
+}
+
+@test "build verify cache behaviour with --cache-ttl" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world
+_EOF
+
+ # Build with --timestamp somewhere in the past
+ run_buildah build $WITH_POLICY_JSON --timestamp 1628099045 --layers -t source -f $contextdir/Dockerfile1
+ # Specify --cache-ttl 0.5s and cache should
+ # not be used since cached image is created
+ # with timestamp somewhere in past ( in ~2021 )
+ run_buildah build $WITH_POLICY_JSON --cache-ttl=0.5s --layers -t source -f $contextdir/Dockerfile1
+ # Should not contain `Using cache` since all
+ # cached layers are 1s old.
+ assert "$output" !~ "Using cache"
+ # clean all images and cache
+ run_buildah rmi --all -f
+ _prefetch alpine
+ run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile1
+ # Cache should be used since our ttl is 1h but
+ # cache layers are just built so they should be
+ # few seconds old.
+ run_buildah build $WITH_POLICY_JSON --cache-ttl=1h --layers -t source -f $contextdir/Dockerfile1
+ # must use already cached images.
+ expect_output --substring "Using cache"
+}
+
+@test "build verify cache behaviour with --cache-ttl=0s" {
+ _prefetch alpine
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+ cat > $contextdir/Dockerfile1 << _EOF
+FROM alpine
+RUN touch hello
+RUN echo world
+_EOF
+
+ # Build with --timestamp somewhere in the past
+ run_buildah build $WITH_POLICY_JSON --timestamp 1628099045 --layers -t source -f $contextdir/Dockerfile1
+ # Specify --cache-ttl 0.5s and cache should
+ # not be used since cached image is created
+ # with timestamp somewhere in past ( in ~2021 )
+ run_buildah --log-level debug build $WITH_POLICY_JSON --cache-ttl=0 --layers -t source -f $contextdir/Dockerfile1
+ # Should not contain `Using cache` since all
+ # cached layers are 1s old.
+ assert "$output" !~ "Using cache"
+ expect_output --substring "Setting --no-cache=true"
+}
+
+@test "build test pushing and pulling from multiple remote cache sources" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ echo something > ${mytmpdir}/somefile
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo hello
+RUN echo world
+RUN touch hello
+ADD somefile somefile
+
+FROM alpine
+RUN echo hello
+COPY --from=0 hello hello
+_EOF
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+
+ # ------ Test case ------ #
+ # prepare expected output beforehand
+ # must push cache twice i.e for first step and second step
+ run printf "STEP 2/5: RUN echo hello\nhello\n--> Pushing cache"
+ step1=$output
+ run printf "STEP 3/5: RUN echo world\nworld\n--> Pushing cache"
+ step2=$output
+ run printf "STEP 4/5: RUN touch hello\n--> Pushing cache"
+ step3=$output
+ run printf "STEP 5/5: ADD somefile somefile\n--> Pushing cache"
+ step4=$output
+ # First run step in second stage should not be pushed since its already pushed
+ run printf "STEP 2/3: RUN echo hello\n--> Using cache"
+ step5=$output
+ # Last step is `COPY --from=0 hello hello' so it must be committed and pushed
+ # actual output is `[2/2] STEP 3/3: COPY --from=0 hello hello\n[2/2] COMMIT test\n-->Pushing cache`
+ # but lets match smaller suffix
+ run printf "COMMIT test\n--> Pushing cache"
+ step6=$output
+
+ # actually run build
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-to localhost:${REGISTRY_PORT}/temp2 --cache-to localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step4"
+ expect_output --substring "$step5"
+ expect_output --substring "$step6"
+
+ # clean all cache and intermediate images
+ # to make sure that we are only using cache
+ # from remote repo and not the local storage.
+ run_buildah rmi --all -f
+
+ # ------ Test case ------ #
+ # expect cache to be pushed on remote stream
+ # now a build on clean slate must pull cache
+ # from remote instead of actually computing the
+ # run steps
+ run printf "STEP 2/5: RUN echo hello\n--> Cache pulled from remote"
+ step1=$output
+ run printf "STEP 3/5: RUN echo world\n--> Cache pulled from remote"
+ step2=$output
+ run printf "STEP 4/5: RUN touch hello\n--> Cache pulled from remote"
+ step3=$output
+ run printf "STEP 5/5: ADD somefile somefile\n--> Cache pulled from remote"
+ step4=$output
+ # First run step in second stage should not be pulled since its already pulled
+ run printf "STEP 2/3: RUN echo hello\n--> Using cache"
+ step5=$output
+ run printf "COPY --from=0 hello hello\n--> Cache pulled from remote"
+ step6=$output
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step4"
+ expect_output --substring "$step5"
+ expect_output --substring "$step6"
+
+ ##### Test when cache source is: localhost:${REGISTRY_PORT}/temp2
+
+ # clean all cache and intermediate images
+ # to make sure that we are only using cache
+ # from remote repo and not the local storage.
+ run_buildah rmi --all -f
+
+ # ------ Test case ------ #
+ # expect cache to be pushed on remote stream
+ # now a build on clean slate must pull cache
+ # from remote instead of actually computing the
+ # run steps
+ run printf "STEP 2/5: RUN echo hello\n--> Cache pulled from remote"
+ step1=$output
+ run printf "STEP 3/5: RUN echo world\n--> Cache pulled from remote"
+ step2=$output
+ run printf "STEP 4/5: RUN touch hello\n--> Cache pulled from remote"
+ step3=$output
+ run printf "STEP 5/5: ADD somefile somefile\n--> Cache pulled from remote"
+ step4=$output
+ # First run step in second stage should not be pulled since its already pulled
+ run printf "STEP 2/3: RUN echo hello\n--> Using cache"
+ step5=$output
+ run printf "COPY --from=0 hello hello\n--> Cache pulled from remote"
+ step6=$output
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp2 -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step4"
+ expect_output --substring "$step5"
+ expect_output --substring "$step6"
+}
+
+@test "build test pushing and pulling from remote cache sources" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ echo something > ${mytmpdir}/somefile
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo hello
+RUN echo world
+RUN touch hello
+ADD somefile somefile
+
+FROM alpine
+RUN echo hello
+COPY --from=0 hello hello
+RUN --mount=type=cache,id=YfHI60aApFM-target,target=/target echo world > /target/hello
+_EOF
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+
+ # ------ Test case ------ #
+ # prepare expected output beforehand
+ # must push cache twice i.e for first step and second step
+ run printf "STEP 2/5: RUN echo hello\nhello\n--> Pushing cache"
+ step1=$output
+ run printf "STEP 3/5: RUN echo world\nworld\n--> Pushing cache"
+ step2=$output
+ run printf "STEP 4/5: RUN touch hello\n--> Pushing cache"
+ step3=$output
+ run printf "STEP 5/5: ADD somefile somefile\n--> Pushing cache"
+ step4=$output
+ # First run step in second stage should not be pushed since its already pushed
+ run printf "STEP 2/4: RUN echo hello\n--> Using cache"
+ step5=$output
+ # Last step is `COPY --from=0 hello hello' so it must be committed and pushed
+ # actual output is `[2/2] STEP 3/3: COPY --from=0 hello hello\n[2/2] COMMIT test\n-->Pushing cache`
+ # but lets match smaller suffix
+ run printf "COMMIT test\n--> Pushing cache"
+ step6=$output
+
+ # actually run build
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-to localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step4"
+ expect_output --substring "$step5"
+ expect_output --substring "$step6"
+
+ # clean all cache and intermediate images
+ # to make sure that we are only using cache
+ # from remote repo and not the local storage.
+ run_buildah rmi --all -f
+
+ # ------ Test case ------ #
+ # expect cache to be pushed on remote stream
+ # now a build on clean slate must pull cache
+ # from remote instead of actually computing the
+ # run steps
+ run printf "STEP 2/5: RUN echo hello\n--> Cache pulled from remote"
+ step1=$output
+ run printf "STEP 3/5: RUN echo world\n--> Cache pulled from remote"
+ step2=$output
+ run printf "STEP 4/5: RUN touch hello\n--> Cache pulled from remote"
+ step3=$output
+ run printf "STEP 5/5: ADD somefile somefile\n--> Cache pulled from remote"
+ step4=$output
+ # First run step in second stage should not be pulled since its already pulled
+ run printf "STEP 2/4: RUN echo hello\n--> Using cache"
+ step5=$output
+ run printf "COPY --from=0 hello hello\n--> Cache pulled from remote"
+ step6=$output
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp --cache-to localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step4"
+ expect_output --substring "$step5"
+ expect_output --substring "$step6"
+
+ # ------ Test case ------ #
+ # Try building again with --cache-from to make sure
+ # we don't pull image if we already have it in our
+ # local storage
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ # must use cache since we have cache in local storage
+ expect_output --substring "Using cache"
+ # should not pull cache if its already in local storage
+ assert "$output" !~ "Cache pulled"
+
+ # ------ Test case ------ #
+ # Build again with --cache-to and --cache-from
+ # Since intermediate images are already present
+ # on local storage so nothing must be pulled but
+ # intermediate must be pushed since buildah is not
+ # aware if they on remote repo or not.
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp --cache-to localhost:${REGISTRY_PORT}/temp -t test -f ${mytmpdir}/Containerfile ${mytmpdir}
+ # must use cache since we have cache in local storage
+ expect_output --substring "Using cache"
+ # must also push cache since nothing was pulled from remote repo
+ expect_output --substring "Pushing cache"
+ # should not pull cache if its already in local storage
+ assert "$output" !~ "Cache pulled"
+
+}
+
+@test "build test pushing and pulling from remote cache sources - after adding content summary" {
+ _prefetch alpine
+
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+
+ # ------ Test case ------ #
+ # prepare expected output beforehand
+ # must push cache twice i.e for first step and second step
+ run printf "STEP 2/3: ARG VAR=hello\n--> Pushing cache"
+ step1=$output
+ run printf "STEP 3/3: RUN echo \"Hello \$VAR\""
+ step2=$output
+ run printf "Hello hello"
+ step3=$output
+ run printf "COMMIT test\n--> Pushing cache"
+ step6=$output
+
+ # actually run build
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-to localhost:${REGISTRY_PORT}/temp -t test -f $BUDFILES/cache-from/Containerfile
+ expect_output --substring "$step1"
+ #expect_output --substring "$step2"
+ expect_output --substring "$step3"
+ expect_output --substring "$step6"
+
+ # clean all cache and intermediate images
+ # to make sure that we are only using cache
+ # from remote repo and not the local storage.
+
+ # Important side-note: don't use `run_buildah rmi --all -f`
+ # since on podman-remote test this will remove prefetched alpine
+ # and it will try to pull alpine from docker.io with
+ # completely different digest (ruining our cache logic).
+ run_buildah rmi test
+
+ # ------ Test case ------ #
+ # expect cache to be pushed on remote stream
+ # now a build on clean slate must pull cache
+ # from remote instead of actually computing the
+ # run steps
+ run printf "STEP 2/3: ARG VAR=hello\n--> Cache pulled from remote"
+ step1=$output
+ run printf "VAR\"\n--> Cache pulled from remote"
+ step2=$output
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp --cache-to localhost:${REGISTRY_PORT}/temp -t test -f $BUDFILES/cache-from/Containerfile
+ expect_output --substring "$step1"
+ expect_output --substring "$step2"
+
+ # ------ Test case ------ #
+ # Try building again with --cache-from to make sure
+ # we don't pull image if we already have it in our
+ # local storage
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp -t test -f $BUDFILES/cache-from/Containerfile
+ # must use cache since we have cache in local storage
+ expect_output --substring "Using cache"
+ # should not pull cache if its already in local storage
+ assert "$output" !~ "Cache pulled"
+
+ # ------ Test case ------ #
+ # Build again with --cache-to and --cache-from
+ # Since intermediate images are already present
+ # on local storage so nothing must be pulled but
+ # intermediate must be pushed since buildah is not
+ # aware if they on remote repo or not.
+ run_buildah build $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --layers --cache-from localhost:${REGISTRY_PORT}/temp --cache-to localhost:${REGISTRY_PORT}/temp -t test -f $BUDFILES/cache-from/Containerfile
+ # must use cache since we have cache in local storage
+ expect_output --substring "Using cache"
+ # must also push cache since nothing was pulled from remote repo
+ expect_output --substring "Pushing cache"
+ # should not pull cache if its already in local storage
+ assert "$output" !~ "Cache pulled"
+
+}
+
+@test "bud with undefined build arg directory" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+ARG SECRET="Itismysecret"
+ARG NEWSECRET
+RUN echo $SECRET
+RUN touch hello
+FROM alpine
+COPY --from=0 hello .
+RUN echo "$SECRET"
+_EOF
+
+ run_buildah build -t testbud $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ assert "$output" !~ '--build-arg SECRET=<VALUE>'
+ expect_output --substring '\-\-build-arg NEWSECRET=<VALUE>'
+
+ run_buildah build -t testbud $WITH_POLICY_JSON --build-arg NEWSECRET="VerySecret" --file ${mytmpdir}/Containerfile .
+ assert "$output" !~ '--build-arg SECRET=<VALUE>'
+ assert "$output" !~ '--build-arg NEWSECRET=<VALUE>'
+
+# case should similarly honor globally declared args
+ cat > $mytmpdir/Containerfile << _EOF
+ARG SECRET="Itismysecret"
+FROM alpine
+ARG SECRET
+ARG NEWSECRET
+RUN echo $SECRET
+RUN touch hello
+FROM alpine
+COPY --from=0 hello .
+RUN echo "$SECRET"
+_EOF
+
+ run_buildah build -t testbud $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ assert "$output" !~ '--build-arg SECRET=<VALUE>'
+ expect_output --substring '\-\-build-arg NEWSECRET=<VALUE>'
+
+ run_buildah build -t testbud $WITH_POLICY_JSON --build-arg NEWSECRET="VerySecret" --file ${mytmpdir}/Containerfile .
+ assert "$output" !~ '--build-arg SECRET=<VALUE>'
+ assert "$output" !~ '--build-arg NEWSECRET=<VALUE>'
+}
+
+@test "bud with arg in from statement" {
+ _prefetch alpine
+ run_buildah build -t testbud $WITH_POLICY_JSON --build-arg app_type=x --build-arg another_app_type=m --file $BUDFILES/with-arg/Dockerfilefromarg .
+ expect_output --substring 'world'
+}
+
+@test "bud with --runtime and --runtime-flag" {
+ # This Containerfile needs us to be able to handle a working RUN instruction.
+ skip_if_no_runtime
+ skip_if_chroot
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run echo hello
+_EOF
+
+ local found_runtime=
+
+ # runc-1.0.0-70.rc92 and 1.0.1-3 have completely different
+ # debug messages. This is the only string common to both.
+ local flag_accepted_rx="level=debug.*msg=.child process in init"
+ if [ -n "$(command -v runc)" ]; then
+ found_runtime=y
+ if is_cgroupsv2; then
+ # The result with cgroup v2 depends on the version of runc.
+ run_buildah '?' bud --runtime=runc --runtime-flag=debug \
+ -q -t alpine-bud-runc $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ if [ "$status" -eq 0 ]; then
+ expect_output --substring "$flag_accepted_rx"
+ else
+ # If it fails, this is because this version of runc doesn't support cgroup v2.
+ expect_output --substring "this version of runc doesn't work on cgroups v2" "should fail by unsupportability for cgroupv2"
+ fi
+ else
+ run_buildah build --runtime=runc --runtime-flag=debug \
+ -q -t alpine-bud-runc $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --substring "$flag_accepted_rx"
+ fi
+
+ fi
+
+ if [ -n "$(command -v crun)" ]; then
+ found_runtime=y
+
+ # Use seccomp to make crun output a warning message because crun writes few logs.
+ cat > ${TEST_SCRATCH_DIR}/seccomp.json << _EOF
+{
+ "defaultAction": "SCMP_ACT_ALLOW",
+ "syscalls": [
+ {
+ "name": "unknown",
+ "action": "SCMP_ACT_KILL"
+ }
+ ]
+}
+_EOF
+
+ run_buildah build --runtime=crun --runtime-flag=debug --security-opt seccomp=${TEST_SCRATCH_DIR}/seccomp.json \
+ -q -t alpine-bud-crun $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --substring "unknown seccomp syscall"
+ fi
+
+ if [ -z "${found_runtime}" ]; then
+ die "Did not find 'runc' nor 'crun' in \$PATH - could not run this test!"
+ fi
+
+}
+
+@test "bud - invalid runtime flags test" {
+ skip_if_no_runtime
+ skip_if_chroot
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run echo hello
+_EOF
+
+ run_buildah 1 build $WITH_POLICY_JSON --runtime-flag invalidflag -t build_test $mytmpdir
+ assert "$output" =~ ".*invalidflag" "failed when passing undefined flags to the runtime"
+}
+
+@test "bud - accept at most one arg" {
+ run_buildah 125 build $WITH_POLICY_JSON $BUDFILES/dns extraarg
+ assert "$output" =~ ".*accepts at most 1 arg\(s\), received 2" "Should fail when passed extra arg after context directory"
+}
+
+@test "bud with --no-hostname" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ run_buildah build --no-cache -t testbud \
+ $WITH_POLICY_JSON $BUDFILES/no-hostname
+ assert "${lines[2]}" != "localhost" "Should be set to something other then localhost"
+
+ run_buildah build --no-hostname --no-cache -t testbud \
+ $WITH_POLICY_JSON \
+ $BUDFILES/no-hostname
+ assert "${lines[2]}" == "localhost" "Should be set to localhost"
+
+ run_buildah 1 build --network=none --no-hostname --no-cache -t testbud \
+ $WITH_POLICY_JSON \
+ -f $BUDFILES/no-hostname/Containerfile.noetc \
+ $BUDFILES/no-hostname
+ assert "$output" =~ ".*ls: /etc: No such file or directory" "/etc/ directory should be gone"
+}
+
+@test "bud with --add-host" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run grep "myhostname" /etc/hosts
+_EOF
+
+ ip=123.45.67.$(( $RANDOM % 256 ))
+ run_buildah build --add-host=myhostname:$ip -t testbud \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" --substring "^$ip\s+myhostname"
+
+ run_buildah 125 build --no-cache --add-host=myhostname:$ip \
+ --no-hosts \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --substring "\-\-no-hosts and \-\-add-host conflict, can not be used together"
+
+ run_buildah 1 build --no-cache --no-hosts \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --substring 'building at STEP "RUN grep "myhostname" /etc/hosts'
+}
+
+@test "bud with --cgroup-parent" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+ skip_if_chroot
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run cat /proc/self/cgroup
+_EOF
+
+ # with cgroup-parent
+ run_buildah --cgroup-manager cgroupfs build --cgroupns=host --cgroup-parent test-cgroup -t with-flag \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ if is_cgroupsv2; then
+ expect_output --from="${lines[2]}" "0::/test-cgroup"
+ else
+ expect_output --substring "/test-cgroup"
+ fi
+ # without cgroup-parent
+ run_buildah --cgroup-manager cgroupfs build -t without-flag \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ if [ -n "$(grep "test-cgroup" <<< "$output")" ]; then
+ die "Unexpected cgroup."
+ fi
+}
+
+@test "bud with --cpu-period and --cpu-quota" {
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ if is_cgroupsv2; then
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run cat /sys/fs/cgroup/\$(awk -F: '{print \$NF}' /proc/self/cgroup)/cpu.max
+_EOF
+ else
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run echo "\$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us) \$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)"
+_EOF
+ fi
+
+ run_buildah build --cpu-period=1234 --cpu-quota=5678 -t testcpu \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "5678 1234"
+}
+
+@test "bud check mount /sys/fs/cgroup" {
+ skip_if_rootless_and_cgroupv1
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run ls /sys/fs/cgroup
+_EOF
+ run_buildah build $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --substring "cpu"
+ expect_output --substring "memory"
+}
+
+@test "bud with --cpu-shares" {
+ skip_if_chroot
+ skip_if_rootless_environment
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ local shares=12345
+ local expect=
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ if is_cgroupsv2; then
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "weight " && cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpu.weight
+_EOF
+ expect="weight $((1 + ((${shares} - 2) * 9999) / 262142))"
+ else
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "weight " && cat /sys/fs/cgroup/cpu/cpu.shares
+_EOF
+ expect="weight ${shares}"
+ fi
+
+ run_buildah build --cpu-shares=${shares} -t testcpu \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "${expect}"
+}
+
+@test "bud with --cpuset-cpus" {
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ if is_cgroupsv2; then
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "cpuset-cpus " && cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpuset.cpus
+_EOF
+ else
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "cpuset-cpus " && cat /sys/fs/cgroup/cpuset/cpuset.cpus
+_EOF
+ fi
+
+ run_buildah build --cpuset-cpus=0 -t testcpuset \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "cpuset-cpus 0"
+}
+
+@test "bud with --cpuset-mems" {
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ if is_cgroupsv2; then
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "cpuset-mems " && cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpuset.mems
+_EOF
+ else
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "cpuset-mems " && cat /sys/fs/cgroup/cpuset/cpuset.mems
+_EOF
+ fi
+
+ run_buildah build --cpuset-mems=0 -t testcpuset \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "cpuset-mems 0"
+}
+
+@test "bud with --isolation" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+ test -z "${BUILDAH_ISOLATION}" || skip "BUILDAH_ISOLATION=${BUILDAH_ISOLATION} overrides --isolation"
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run readlink /proc/self/ns/pid
+_EOF
+
+ run readlink /proc/self/ns/pid
+ host_pidns=$output
+ run_buildah build --isolation chroot -t testisolation --pid private \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ # chroot isolation doesn't make a new PID namespace.
+ expect_output --from="${lines[2]}" "${host_pidns}"
+}
+
+@test "bud with --pull-always" {
+ _prefetch docker.io/library/alpine
+ run_buildah build --pull-always $WITH_POLICY_JSON -t testpull $BUDFILES/containerfile
+ expect_output --from="${lines[1]}" "Trying to pull docker.io/library/alpine:latest..."
+ run_buildah build --pull=always $WITH_POLICY_JSON -t testpull $BUDFILES/containerfile
+ expect_output --from="${lines[1]}" "Trying to pull docker.io/library/alpine:latest..."
+}
+
+@test "bud with --memory and --memory-swap" {
+ skip_if_chroot
+ skip_if_no_runtime
+ skip_if_rootless_and_cgroupv1
+ skip_if_rootless_environment
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+
+ local expect_swap=
+ if is_cgroupsv2; then
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "memory-max=" && cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/memory.max
+run printf "memory-swap-result=" && cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/memory.swap.max
+_EOF
+ expect_swap=31457280
+ else
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "memory-max=" && cat /sys/fs/cgroup/memory/memory.limit_in_bytes
+run printf "memory-swap-result=" && cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes
+_EOF
+ expect_swap=73400320
+ fi
+
+ run_buildah build --memory=40m --memory-swap=70m -t testmemory \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "memory-max=41943040"
+ expect_output --from="${lines[4]}" "memory-swap-result=${expect_swap}"
+}
+
+@test "bud with --shm-size" {
+ skip_if_chroot
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run df -h /dev/shm
+_EOF
+
+ run_buildah build --shm-size=80m -t testshm \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[3]}" --substring "shm\s+80.0M"
+}
+
+@test "bud with --ulimit" {
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/Containerfile << _EOF
+from alpine
+run printf "ulimit=" && ulimit -t
+_EOF
+
+ run_buildah build --ulimit cpu=300 -t testulimit \
+ $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ expect_output --from="${lines[2]}" "ulimit=300"
+}
+
+@test "bud with .dockerignore #3" {
+ run_buildah build -t test $WITH_POLICY_JSON $BUDFILES/copy-globs
+ run_buildah build -t test2 -f Containerfile.missing $WITH_POLICY_JSON $BUDFILES/copy-globs
+
+ run_buildah 125 build -t test3 -f Containerfile.bad $WITH_POLICY_JSON $BUDFILES/copy-globs
+ expect_output --substring 'building.*"COPY \*foo /testdir".*no such file or directory'
+}
+
+@test "bud with containerfile secret" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ run_buildah build --secret=id=mysecret,src=${mytmpdir}/mysecret $WITH_POLICY_JSON -t secretimg -f $BUDFILES/run-mounts/Dockerfile.secret $BUDFILES/run-mounts
+ expect_output --substring "SOMESECRETDATA"
+
+ run_buildah from secretimg
+ run_buildah 1 run secretimg-working-container cat /run/secrets/mysecret
+ expect_output --substring "cat: can't open '/run/secrets/mysecret': No such file or directory"
+ run_buildah rm -a
+}
+
+@test "bud with containerfile secret and secret is accessed twice and build should be successful" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ cat > $mytmpdir/Dockerfile << _EOF
+FROM alpine
+
+RUN --mount=type=secret,id=mysecret,dst=/home/root/mysecret cat /home/root/mysecret
+
+RUN --mount=type=secret,id=mysecret,dst=/home/root/mysecret2 echo hello && cat /home/root/mysecret2
+_EOF
+
+ run_buildah build --secret=id=mysecret,src=${mytmpdir}/mysecret $WITH_POLICY_JSON -t secretimg -f ${mytmpdir}/Dockerfile
+ expect_output --substring "hello"
+ expect_output --substring "SOMESECRETDATA"
+}
+
+@test "bud with containerfile secret accessed on second RUN" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ run_buildah 1 bud --secret=id=mysecret,src=${mytmpdir}/mysecret $WITH_POLICY_JSON -t secretimg -f $BUDFILES/run-mounts/Dockerfile.secret-access $BUDFILES/run-mounts
+ expect_output --substring "SOMESECRETDATA"
+ expect_output --substring "cat: can't open '/mysecret': No such file or directory"
+}
+
+@test "bud with default mode perms" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ run_buildah bud --secret=id=mysecret,src=${mytmpdir}/mysecret $WITH_POLICY_JSON -t secretmode -f $BUDFILES/run-mounts/Dockerfile.secret-mode $BUDFILES/run-mounts
+ expect_output --substring "400"
+}
+
+@test "bud with containerfile secret options" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ run_buildah build --secret=id=mysecret,src=${mytmpdir}/mysecret $WITH_POLICY_JSON -t secretopts -f $BUDFILES/run-mounts/Dockerfile.secret-options $BUDFILES/run-mounts
+ expect_output --substring "444"
+ expect_output --substring "1000"
+ expect_output --substring "1001"
+}
+
+@test "bud with containerfile secret not required" {
+ _prefetch alpine
+
+ run_buildah build $WITH_POLICY_JSON -t secretnotreq -f $BUDFILES/run-mounts/Dockerfile.secret-not-required $BUDFILES/run-mounts
+ run_buildah 1 build $WITH_POLICY_JSON -t secretnotreq -f $BUDFILES/run-mounts/Dockerfile.secret-required-false $BUDFILES/run-mounts
+ expect_output --substring "No such file or directory"
+ assert "$output" !~ "secret required but no secret with id mysecret found"
+}
+
+@test "bud with containerfile secret required" {
+ _prefetch alpine
+
+ run_buildah 125 build $WITH_POLICY_JSON -t secretreq -f $BUDFILES/run-mounts/Dockerfile.secret-required $BUDFILES/run-mounts
+ expect_output --substring "secret required but no secret with id mysecret found"
+
+ # Also test secret required without value
+ run_buildah 125 build $WITH_POLICY_JSON -t secretreq -f $BUDFILES/run-mounts/Dockerfile.secret-required-wo-value $BUDFILES/run-mounts
+ expect_output --substring "secret required but no secret with id mysecret found"
+}
+
+@test "bud with containerfile env secret" {
+ export MYSECRET=SOMESECRETDATA
+ run_buildah build --secret=id=mysecret,src=MYSECRET,type=env $WITH_POLICY_JSON -t secretimg -f $BUDFILES/run-mounts/Dockerfile.secret $BUDFILES/run-mounts
+ expect_output --substring "SOMESECRETDATA"
+
+ run_buildah from secretimg
+ run_buildah 1 run secretimg-working-container cat /run/secrets/mysecret
+ expect_output --substring "cat: can't open '/run/secrets/mysecret': No such file or directory"
+ run_buildah rm -a
+
+ run_buildah build --secret=id=mysecret,env=MYSECRET $WITH_POLICY_JSON -t secretimg -f $BUDFILES/run-mounts/Dockerfile.secret $BUDFILES/run-mounts
+ expect_output --substring "SOMESECRETDATA"
+
+ run_buildah from secretimg
+ run_buildah 1 run secretimg-working-container cat /run/secrets/mysecret
+ expect_output --substring "cat: can't open '/run/secrets/mysecret': No such file or directory"
+ run_buildah rm -a
+}
+
+@test "bud with containerfile env secret priority" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ cat > $mytmpdir/mysecret << _EOF
+SOMESECRETDATA
+_EOF
+
+ export mysecret=ENVDATA
+ run_buildah build --secret=id=mysecret $WITH_POLICY_JSON -t secretimg -f $BUDFILES/run-mounts/Dockerfile.secret $BUDFILES/run-mounts
+ expect_output --substring "ENVDATA"
+}
+
+@test "bud-multiple-platform-values" {
+ skip "FIXME: #4396 - this test is broken, and is failing gating tests"
+ outputlist=testlist
+ # check if we can run a couple of 32-bit versions of an image, and if we can,
+ # assume that emulation for other architectures is in place.
+ os=`go env GOOS`
+ run_buildah from $WITH_POLICY_JSON --name try-386 --platform=$os/386 alpine
+ run_buildah '?' run try-386 true
+ if test $status -ne 0 ; then
+ skip "unable to run 386 container, assuming emulation is not available"
+ fi
+ run_buildah from $WITH_POLICY_JSON --name try-arm --platform=$os/arm alpine
+ run_buildah '?' run try-arm true
+ if test $status -ne 0 ; then
+ skip "unable to run arm container, assuming emulation is not available"
+ fi
+
+ # build for those architectures - RUN gets exercised
+ run_buildah build $WITH_POLICY_JSON --jobs=0 --platform=$os/arm,$os/386 --manifest $outputlist $BUDFILES/multiarch
+ run_buildah manifest inspect $outputlist
+ list="$output"
+ run jq -r '.manifests[0].digest' <<< "$list"
+ d1="$output"
+ run jq -r '.manifests[1].digest' <<< "$list"
+ d2="$output"
+ assert "$d1" =~ "^sha256:[0-9a-f]{64}\$"
+ assert "$d2" =~ "^sha256:[0-9a-f]{64}\$"
+ assert "$d1" != "$d2" "digest(arm) should != digest(386)"
+}
+
+@test "bud-multiple-platform-no-partial-manifest-list" {
+ outputlist=localhost/testlist
+ run_buildah 1 bud $WITH_POLICY_JSON --platform=linux/arm,linux/amd64 --manifest $outputlist -f $BUDFILES/multiarch/Dockerfile.fail $BUDFILES/multiarch
+ expect_output --substring "building at STEP \"RUN test .arch. = x86_64"
+ run_buildah 125 manifest inspect $outputlist
+ expect_output --substring "reading image .* pinging container registry"
+}
+
+@test "bud-multiple-platform-failure" {
+ # check if we can run a couple of 32-bit versions of an image, and if we can,
+ # assume that emulation for other architectures is in place.
+ os=$(go env GOOS)
+ if [[ "$os" != linux ]]; then
+ skip "GOOS is '$os'; this test can only run on linux"
+ fi
+ run_buildah from $WITH_POLICY_JSON --name try-386 --platform=$os/386 alpine
+ run_buildah '?' run try-386 true
+ if test $status -ne 0 ; then
+ skip "unable to run 386 container, assuming emulation is not available"
+ fi
+ run_buildah from $WITH_POLICY_JSON --name try-arm --platform=$os/arm alpine
+ run_buildah '?' run try-arm true
+ if test $status -ne 0 ; then
+ skip "unable to run arm container, assuming emulation is not available"
+ fi
+ outputlist=localhost/testlist
+ run_buildah 1 build $WITH_POLICY_JSON \
+ --jobs=0 \
+ --platform=linux/arm64,linux/amd64 \
+ --manifest $outputlist \
+ --build-arg SAFEIMAGE=$SAFEIMAGE \
+ -f $BUDFILES/multiarch/Dockerfile.fail-multistage \
+ $BUDFILES/multiarch
+ expect_output --substring 'building at STEP "RUN false"'
+}
+
+@test "bud-multiple-platform-no-run" {
+ outputlist=localhost/testlist
+ run_buildah build $WITH_POLICY_JSON \
+ --jobs=0 \
+ --all-platforms \
+ --manifest $outputlist \
+ --build-arg SAFEIMAGE=$SAFEIMAGE \
+ -f $BUDFILES/multiarch/Dockerfile.no-run \
+ $BUDFILES/multiarch
+
+ run_buildah manifest inspect $outputlist
+ manifests=$(jq -r '.manifests[].platform.architecture' <<<"$output" |sort|fmt)
+ assert "$manifests" = "amd64 arm64 ppc64le s390x" "arch list in manifest"
+}
+
+# attempts to resolve heading arg as base-image with --all-platforms
+@test "bud-multiple-platform-with-base-as-default-arg" {
+ outputlist=localhost/testlist
+ run_buildah build $WITH_POLICY_JSON \
+ --jobs=1 \
+ --all-platforms \
+ --manifest $outputlist \
+ -f $BUDFILES/all-platform/Containerfile.default-arg \
+ $BUDFILES/all-platform
+
+ run_buildah manifest inspect $outputlist
+ manifests=$(jq -r '.manifests[].platform.architecture' <<<"$output" |sort|fmt)
+ assert "$manifests" = "386 amd64 arm arm arm64 ppc64le s390x" "arch list in manifest"
+}
+
+@test "bud-multiple-platform for --all-platform with additional-build-context" {
+ outputlist=localhost/testlist
+ local contextdir=${TEST_SCRATCH_DIR}/bud/platform
+ mkdir -p $contextdir
+
+cat > $contextdir/Dockerfile1 << _EOF
+FROM busybox
+_EOF
+
+ # Pulled images must be $SAFEIMAGE since we configured --build-context
+ run_buildah build $WITH_POLICY_JSON --all-platforms --build-context busybox=docker://$SAFEIMAGE --manifest $outputlist -f $contextdir/Dockerfile1
+ # must contain pulling logs for $SAFEIMAGE instead of busybox
+ expect_output --substring "STEP 1/1: FROM $SAFEIMAGE"
+ assert "$output" =~ "\[linux/s390x\] COMMIT"
+ assert "$output" =~ "\[linux/ppc64le\] COMMIT"
+ assert "$output" !~ "busybox"
+
+ # Confirm the manifests and their architectures. It is not possible for
+ # this to change, unless we bump $SAFEIMAGE to a new versioned tag.
+ run_buildah manifest inspect $outputlist
+ manifests=$(jq -r '.manifests[].platform.architecture' <<<"$output" |sort|fmt)
+ assert "$manifests" = "amd64 arm64 ppc64le s390x" "arch list in manifest"
+}
+
+# * Performs multi-stage build with label1=value1 and verifies
+# * Relabels build with label1=value2 and verifies
+# * Rebuild with label1=value1 and makes sure everything is used from cache
+@test "bud-multistage-relabel" {
+ _prefetch alpine busybox
+ run_buildah inspect --format "{{.FromImageDigest}}" busybox
+ fromDigest="$output"
+
+ target=relabel
+ run_buildah build --layers --label "label1=value1" $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.reused $BUDFILES/multi-stage-builds
+
+ # Store base digest of first image
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ firstDigest="$output"
+
+ # Store image id of first build
+ run_buildah inspect --format '{{ .FromImageID }}' ${target}
+ firstImageID="$output"
+
+ # Label of first build must contain label1:value1
+ run_buildah inspect --format '{{ .Docker.ContainerConfig.Labels }}' ${target}
+ expect_output --substring "label1:value1"
+
+ # Rebuild with new label
+ run_buildah build --layers --label "label1=value2" $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.reused $BUDFILES/multi-stage-builds
+
+ # Base digest should match with first build
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$firstDigest" "base digest from busybox"
+
+ # Label of second build must contain label1:value2
+ run_buildah inspect --format '{{ .Docker.ContainerConfig.Labels }}' ${target}
+ expect_output --substring "label1:value2"
+
+ # Rebuild everything with label1=value1 and everything should be cached from first image
+ run_buildah build --layers --label "label1=value1" $WITH_POLICY_JSON -t ${target} -f $BUDFILES/multi-stage-builds/Dockerfile.reused $BUDFILES/multi-stage-builds
+
+ # Entire image must be picked from cache
+ run_buildah inspect --format '{{ .FromImageID }}' ${target}
+ expect_output "$firstImageID" "Image ID cached from first build"
+
+ run_buildah rmi -f ${target}
+}
+
+
+@test "bud-from-relabel" {
+ _prefetch alpine busybox
+
+ run_buildah inspect --format "{{.FromImageDigest}}" alpine
+ alpineDigest="$output"
+
+ run_buildah inspect --format "{{.FromImageDigest}}" busybox
+ busyboxDigest="$output"
+
+ target=relabel2
+ run_buildah build --layers --label "label1=value1" --from=alpine -t ${target} $BUDFILES/from-scratch
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$alpineDigest" "base digest from alpine"
+
+ # Label of second build must contain label1:value1
+ run_buildah inspect --format '{{ .Docker.ContainerConfig.Labels }}' ${target}
+ expect_output --substring "label1:value1"
+
+
+ run_buildah build --layers --label "label1=value2" --from=busybox -t ${target} $BUDFILES/from-scratch
+
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' ${target}
+ expect_output "$busyboxDigest" "base digest from busybox"
+
+ # Label of second build must contain label1:value2
+ run_buildah inspect --format '{{ .Docker.ContainerConfig.Labels }}' ${target}
+ expect_output --substring "label1:value2"
+
+ run_buildah rmi -f ${target}
+}
+
+@test "bud with run should not leave mounts behind cleanup test" {
+ skip_if_in_container
+ skip_if_no_podman
+
+ # Create target dir where we will export tar
+ target=cleanable
+ local contextdir=${TEST_SCRATCH_DIR}/${target}
+ mkdir $contextdir
+
+ # Build and export container to tar
+ run_buildah build --no-cache $WITH_POLICY_JSON -t ${target} -f $BUDFILES/containerfile/Containerfile.in $BUDFILES/containerfile
+ podman export $(podman create --name ${target} --net=host ${target}) --output=$contextdir.tar
+
+ # We are done exporting so remove images and containers which are not needed
+ podman rm -f ${target}
+ run_buildah rmi ${target}
+
+ # Explode tar
+ tar -xf $contextdir.tar -C $contextdir
+ count=$(ls -A $contextdir/run | wc -l)
+ ## exported /run should be empty
+ assert "$count" == "0"
+}
+
+@test "bud with custom files in /run/ should persist cleanup test" {
+ skip_if_in_container
+ skip_if_no_podman
+
+ # Create target dir where we will export tar
+ target=cleanable
+ local contextdir=${TEST_SCRATCH_DIR}/${target}
+ mkdir $contextdir
+
+ # Build and export container to tar
+ run_buildah build --no-cache $WITH_POLICY_JSON -t ${target} -f $BUDFILES/add-run-dir/Dockerfile
+ podman export $(podman create --name ${target} --net=host ${target}) --output=$contextdir.tar
+
+ # We are done exporting so remove images and containers which are not needed
+ podman rm -f ${target}
+ run_buildah rmi ${target}
+
+ # Explode tar
+ tar -xf $contextdir.tar -C $contextdir
+ count=$(ls -A $contextdir/run | wc -l)
+ ## exported /run should not be empty
+ assert "$count" == "1"
+}
+
+@test "bud-with-mount-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=$BUDFILES/buildkit-mount
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-no-source-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile2 $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-with-only-target-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile6 $contextdir/
+ expect_output --substring "hello"
+}
+
+@test "bud-with-mount-no-subdir-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile $contextdir/subdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-relative-path-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile4 $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-with-rw-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build --isolation chroot -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfile3 $contextdir/subdir/
+ expect_output --substring "world"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-verify-if-we-dont-clean-prexisting-path" {
+ skip_if_no_runtime
+ skip_if_in_container
+ run_buildah 1 build -t testbud $WITH_POLICY_JSON --secret id=secret-foo,src=$BUDFILES/verify-cleanup/secret1.txt -f $BUDFILES/verify-cleanup/Dockerfile $BUDFILES/verify-cleanup/
+ expect_output --substring "hello"
+ expect_output --substring "secrettext"
+ expect_output --substring "Directory /tmp exists."
+ expect_output --substring "Directory /var/tmp exists."
+ expect_output --substring "Directory /testdir DOES NOT exists."
+ expect_output --substring "Cache Directory /cachedir DOES NOT exists."
+ expect_output --substring "Secret File secret1.txt DOES NOT exists."
+ expect_output --substring "/tmp/hey: No such file or directory"
+}
+
+@test "bud-with-mount-with-tmpfs-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ # tmpfs mount: target should be available on container without creating any special directory on container
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfiletmpfs
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-with-tmpfs-with-copyup-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfiletmpfscopyup
+ expect_output --substring "certs"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-cache-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ # try writing something to persistent cache
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachewrite
+ # try reading something from persistent cache in a different build
+ run_buildah build -t testbud2 $WITH_POLICY_JSON -f $contextdir/Dockerfilecacheread
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+ run_buildah rmi -f testbud2
+}
+
+@test "bud-with-mount-cache-like-buildkit with buildah prune should clear the cache" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ # try writing something to persistent cache
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachewrite
+ # prune the mount cache
+ run_buildah prune
+ # try reading something from persistent cache in a different build
+ run_buildah 1 build -t testbud2 $WITH_POLICY_JSON -f $contextdir/Dockerfilecacheread
+ expect_output --substring "No such file or directory"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-cache-like-buildkit-verify-default-selinux-option" {
+ skip_if_no_runtime
+ skip_if_in_container
+ _prefetch alpine
+ # try writing something to persistent cache
+ TMPDIR=${TEST_SCRATCH_DIR} run_buildah build -t testbud $WITH_POLICY_JSON -f $BUDFILES/buildkit-mount/Dockerfilecachewritewithoutz
+ # try reading something from persistent cache in a different build
+ TMPDIR=${TEST_SCRATCH_DIR} run_buildah build -t testbud2 $WITH_POLICY_JSON -f $BUDFILES/buildkit-mount/Dockerfilecachereadwithoutz
+ buildah_cache_dir="${TEST_SCRATCH_DIR}/buildah-cache-$UID"
+ # buildah cache parent must have been created for our uid specific to this test
+ test -d "$buildah_cache_dir"
+ expect_output --substring "hello"
+}
+
+@test "bud-with-mount-cache-like-buildkit-locked-across-steps" {
+ # Note: this test is just testing syntax for sharing, actual behaviour test needs parallel build in order to test locking.
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ # try writing something to persistent cache
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachewritesharing
+ expect_output --substring "world"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-multiple-mount-keeps-default-bind-mount" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount
+ cp -R $BUDFILES/buildkit-mount $contextdir
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilemultiplemounts $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud with user in groups" {
+ target=bud-group
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/group
+}
+
+@test "build proxy" {
+ _prefetch alpine
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+run printenv
+_EOF
+ target=env-image
+ check="FTP_PROXY="FTP" ftp_proxy=ftp http_proxy=http HTTPS_PROXY=HTTPS"
+ bogus="BOGUS_PROXY=BOGUS"
+ eval $check $bogus run_buildah build --unsetenv PATH $WITH_POLICY_JSON -t oci-${target} -f $mytmpdir/Containerfile .
+ for i in $check; do
+ expect_output --substring "$i" "Environment variables available within build"
+ done
+ if [ -n "$(grep "$bogus" <<< "$output")" ]; then
+ die "Unexpected bogus environment."
+ fi
+}
+
+@test "bud-with-mount-bind-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbase $contextdir/
+ # try reading something from another image in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilebindfrom
+ expect_output --substring "hello"
+ run_buildah rmi -f buildkitbase
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-writeable-mount-bind-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbase $contextdir/
+ # try reading something from another image in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilebindfromwriteable
+ expect_output --substring "world"
+ run_buildah rmi -f buildkitbase
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-bind-from-without-source-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbase $contextdir/
+ # try reading something from another image in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilebindfromwithoutsource
+ expect_output --substring "hello"
+ run_buildah rmi -f buildkitbase
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-bind-from-with-empty-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbase $contextdir/
+ # try reading something from image in a different build
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilebindfromwithemptyfrom
+ expect_output --substring "points to an empty value"
+ run_buildah rmi -f buildkitbase
+}
+
+@test "bud-with-mount-cache-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # try reading something from persistent cache in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachefrom $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+# following test must fail
+@test "bud-with-mount-cache-image-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbase $contextdir/
+
+ # try reading something from persistent cache in a different build
+ run_buildah 125 build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachefromimage
+ expect_output --substring "no stage found with name buildkitbase"
+ run_buildah rmi -f buildkitbase
+}
+
+@test "bud-with-mount-cache-multiple-from-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # try reading something from persistent cache in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilecachemultiplefrom $contextdir/
+ expect_output --substring "hello"
+ expect_output --substring "hello2"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-bind-from-relative-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t buildkitbaserelative $WITH_POLICY_JSON -f $contextdir/Dockerfilebuildkitbaserelative $contextdir/
+ # try reading something from image in a different build
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilebindfromrelative
+ expect_output --substring "hello"
+ run_buildah rmi -f buildkitbaserelative
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-bind-from-multistage-relative-like-buildkit" {
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ skip_if_no_runtime
+ skip_if_in_container
+ # build base image which we will use as our `from`
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilemultistagefrom $contextdir/
+ expect_output --substring "hello"
+ run_buildah rmi -f testbud
+}
+
+@test "bud-with-mount-bind-from-cache-multistage-relative-like-buildkit" {
+ skip_if_no_runtime
+ skip_if_in_container
+ local contextdir=${TEST_SCRATCH_DIR}/buildkit-mount-from
+ cp -R $BUDFILES/buildkit-mount-from $contextdir
+ # build base image which we will use as our `from`
+ run_buildah build -t testbud $WITH_POLICY_JSON -f $contextdir/Dockerfilemultistagefromcache $contextdir/
+ expect_output --substring "hello"
+ expect_output --substring "hello2"
+ run_buildah rmi -f testbud
+}
+
+@test "bud with network names" {
+ skip_if_no_runtime
+ skip_if_in_container
+ skip_if_rootless_environment
+
+ _prefetch alpine
+
+ run_buildah 125 bud $WITH_POLICY_JSON --network notexists $BUDFILES/network
+ expect_output --substring "network not found"
+
+ if test "$BUILDAH_ISOLATION" = "oci"; then
+ run_buildah bud $WITH_POLICY_JSON --network podman $BUDFILES/network
+ # default subnet is 10.88.0.0/16
+ expect_output --substring "10.88."
+ fi
+}
+
+@test "bud with --network slirp4netns" {
+ skip_if_no_runtime
+ skip_if_in_container
+ skip_if_chroot
+
+ _prefetch alpine
+
+ run_buildah bud $WITH_POLICY_JSON --network slirp4netns $BUDFILES/network
+ # default subnet is 10.0.2.100/24
+ assert "$output" =~ "10.0.2.100/24" "ip addr shows default subnet"
+
+ run_buildah bud $WITH_POLICY_JSON --network slirp4netns:cidr=192.168.255.0/24,mtu=2000 $BUDFILES/network
+ assert "$output" =~ "192.168.255.100/24" "ip addr shows custom subnet"
+ assert "$output" =~ "mtu 2000" "ip addr shows mtu 2000"
+}
+
+@test "bud with --network pasta" {
+ skip_if_no_runtime
+ skip_if_chroot
+ skip_if_root_environment "pasta only works rootless"
+
+ # FIXME: unskip when we have a new pasta version with:
+ # https://archives.passt.top/passt-dev/20230623082531.25947-2-pholzing@redhat.com/
+ skip "pasta bug prevents this from working"
+
+ _prefetch alpine
+
+ # pasta by default copies the host ip
+ ip=$(hostname -I | cut -f 1 -d " ")
+
+ run_buildah bud $WITH_POLICY_JSON --network pasta $BUDFILES/network
+ assert "$output" =~ "$ip" "ip addr shows default subnet"
+
+ # check some entwork options, it accepts raw pasta(1) areguments
+ mac="9a:dd:31:ea:92:98"
+ run_buildah bud $WITH_POLICY_JSON --network pasta:--mtu,2000,--ns-mac-addr,"$mac" $BUDFILES/network
+ assert "$output" =~ "$mac" "ip addr shows custom mac address"
+ assert "$output" =~ "mtu 2000" "ip addr shows mtu 2000"
+}
+
+@test "bud WORKDIR owned by USER" {
+ _prefetch alpine
+ target=alpine-image
+ ctr=alpine-ctr
+ run_buildah build $WITH_POLICY_JSON -t ${target} $BUDFILES/workdir-user
+ expect_output --substring "1000:1000 /home/http/public"
+}
+
+function build_signalled {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ mkfifo ${TEST_SCRATCH_DIR}/pipe
+ # start the build running in the background - don't use the function wrapper because that sets '$!' to a value that's not what we want
+ ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} $WITH_POLICY_JSON build $BUDFILES/long-sleep > ${TEST_SCRATCH_DIR}/pipe 2>&1 &
+ buildah_pid="${!}"
+ echo buildah is pid ${buildah_pid}
+ # save what's written to the fifo to a plain file
+ coproc cat ${TEST_SCRATCH_DIR}/pipe > ${TEST_SCRATCH_DIR}/log
+ cat_pid="${COPROC_PID}"
+ echo cat is pid ${cat_pid}
+ # kill the buildah process early
+ sleep 30
+ kill -s ${1} "${buildah_pid}"
+ # wait for output to stop getting written from anywhere
+ wait "${buildah_pid}" "${cat_pid}"
+ echo log:
+ cat ${TEST_SCRATCH_DIR}/log
+ echo checking:
+ ! grep 'not fully killed' ${TEST_SCRATCH_DIR}/log
+}
+
+@test "build interrupted" {
+ build_signalled SIGINT
+}
+
+@test "build terminated" {
+ build_signalled SIGTERM
+}
+
+@test "build killed" {
+ build_signalled SIGKILL
+}
+
+@test "build-multiple-parse" {
+ _prefetch alpine
+ echo 'FROM alpine' | tee ${TEST_SCRATCH_DIR}/Dockerfile1
+ echo '# escape=|\nFROM alpine' | tee ${TEST_SCRATCH_DIR}/Dockerfile2
+ run_buildah 125 build -f ${TEST_SCRATCH_DIR}/Dockerfile1 -f ${TEST_SCRATCH_DIR}/Dockerfile2 ${TEST_SCRATCH_DIR}
+ assert "$output" =~ "parsing additional Dockerfile .*Dockerfile2: invalid ESCAPE"
+}
+
+@test "build-with-network-test" {
+ skip_if_in_container # Test only works in OCI isolation, which doesn't work in CI/CD systems. Buildah defaults to chroot isolation
+
+ image="quay.io/libpod/alpine_nginx:latest"
+ _prefetch $image
+ cat > ${TEST_SCRATCH_DIR}/Containerfile << _EOF
+FROM $image
+RUN curl -k -o /dev/null http://www.redhat.com:80
+_EOF
+
+ # curl results show success
+ run_buildah build ${WITH_POLICY_JSON} ${TEST_SCRATCH_DIR}
+
+ # A proper test would use ping or nc, and check for ENETUNREACH.
+ # But in a tightly firewalled environment, even the expected-success
+ # test will fail. A not-quite-equivalent workaround is to use curl
+ # and hope that $http_proxy is set; we then rely on curl to fail
+ # in a slightly different way
+ expect_rc=6
+ expect_err="Could not resolve host: www.redhat.com"
+ if [[ $http_proxy != "" ]]; then
+ expect_rc=5
+ expect_err="Could not resolve proxy:"
+ fi
+ run_buildah $expect_rc build --network=none ${WITH_POLICY_JSON} ${TEST_SCRATCH_DIR}
+ expect_output --substring "$expect_err"
+}
+
+@test "build-with-no-new-privileges-test" {
+ _prefetch alpine
+ cat > ${TEST_SCRATCH_DIR}/Containerfile << _EOF
+FROM alpine
+RUN grep NoNewPrivs /proc/self/status
+_EOF
+
+ run_buildah build --security-opt no-new-privileges $WITH_POLICY_JSON ${TEST_SCRATCH_DIR}
+ expect_output --substring "NoNewPrivs:.*1"
+}
+
+@test "build --group-add" {
+ skip_if_no_runtime
+ id=$RANDOM
+
+ _prefetch alpine
+ cat > ${TEST_SCRATCH_DIR}/Containerfile << _EOF
+FROM alpine
+RUN id -G
+_EOF
+
+ run_buildah build --group-add $id $WITH_POLICY_JSON ${TEST_SCRATCH_DIR}
+ expect_output --substring "$id"
+
+ if is_rootless && has_supplemental_groups; then
+ run_buildah build --group-add keep-groups $WITH_POLICY_JSON ${TEST_SCRATCH_DIR}
+ expect_output --substring "65534"
+ fi
+}
+
+@test "build-env-precedence" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ run_buildah build --no-cache --env E=F --env G=H --env I=J --env K=L -f ${BUDFILES}/env/Dockerfile.env-precedence ${BUDFILES}/env
+ expect_output --substring "a=b c=d E=F G=H"
+ expect_output --substring "a=b c=d E=E G=G"
+ expect_output --substring "w=x y=z I=J K=L"
+ expect_output --substring "w=x y=z I=I K=K"
+
+ run_buildah build --no-cache --layers --env E=F --env G=H --env I=J --env K=L -f ${BUDFILES}/env/Dockerfile.env-precedence ${BUDFILES}/env
+ expect_output --substring "a=b c=d E=F G=H"
+ expect_output --substring "a=b c=d E=E G=G"
+ expect_output --substring "w=x y=z I=J K=L"
+ expect_output --substring "w=x y=z I=I K=K"
+}
+
+@test "build prints 12-digit hash" {
+ run_buildah build -t test -f $BUDFILES/containerfile/Containerfile .
+ regex='--> [0-9a-zA-Z]{12}'
+ if ! [[ $output =~ $regex ]]; then
+ false
+ fi
+}
+
+@test "build with name path changes" {
+ _prefetch busybox
+ run_buildah build --no-cache --quiet --pull=false $WITH_POLICY_JSON -t foo/bar $BUDFILES/commit/name-path-changes/
+ run_buildah build --no-cache --quiet --pull=false $WITH_POLICY_JSON -t bar $BUDFILES/commit/name-path-changes/
+ run_buildah images
+ expect_output --substring "localhost/foo/bar"
+ expect_output --substring "localhost/bar"
+}
diff --git a/tests/bud/add-checksum/Containerfile b/tests/bud/add-checksum/Containerfile
new file mode 100644
index 0000000..55de0de
--- /dev/null
+++ b/tests/bud/add-checksum/Containerfile
@@ -0,0 +1,2 @@
+FROM alpine
+ADD --checksum=sha256:4fd3aed66b5488b45fe83dd11842c2324fadcc38e1217bb45fbd28d660afdd39 https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md /
diff --git a/tests/bud/add-checksum/Containerfile.bad b/tests/bud/add-checksum/Containerfile.bad
new file mode 100644
index 0000000..2c86813
--- /dev/null
+++ b/tests/bud/add-checksum/Containerfile.bad
@@ -0,0 +1,2 @@
+FROM alpine
+ADD --checksum https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md /
diff --git a/tests/bud/add-checksum/Containerfile.bad-checksum b/tests/bud/add-checksum/Containerfile.bad-checksum
new file mode 100644
index 0000000..7c043d2
--- /dev/null
+++ b/tests/bud/add-checksum/Containerfile.bad-checksum
@@ -0,0 +1,2 @@
+FROM alpine
+ADD --checksum=sha256:0000000000000000000000000000000000000000000000000000000000000000 https://raw.githubusercontent.com/containers/buildah/bf3b55ba74102cc2503eccbaeffe011728d46b20/README.md /
diff --git a/tests/bud/add-chmod/Dockerfile b/tests/bud/add-chmod/Dockerfile
new file mode 100644
index 0000000..88b4394
--- /dev/null
+++ b/tests/bud/add-chmod/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+
+ADD --chmod=777 addchmod.txt /tmp
+RUN ls -l /tmp/addchmod.txt
+CMD /bin/sh
+
diff --git a/tests/bud/add-chmod/Dockerfile.bad b/tests/bud/add-chmod/Dockerfile.bad
new file mode 100644
index 0000000..0670f4e
--- /dev/null
+++ b/tests/bud/add-chmod/Dockerfile.bad
@@ -0,0 +1,6 @@
+FROM alpine
+
+ADD --chmod 777 addchmod.txt /tmp
+RUN ls -l /tmp/addchmod.txt
+CMD /bin/sh
+
diff --git a/tests/bud/add-chmod/Dockerfile.combined b/tests/bud/add-chmod/Dockerfile.combined
new file mode 100644
index 0000000..aa02a36
--- /dev/null
+++ b/tests/bud/add-chmod/Dockerfile.combined
@@ -0,0 +1,6 @@
+FROM alpine
+
+ADD --chmod=777 --chown=2367:3267 addchmod.txt /tmp
+RUN stat -c "chmod:%a user:%u group:%g" /tmp/addchmod.txt
+CMD /bin/sh
+
diff --git a/tests/bud/add-chmod/addchmod.txt b/tests/bud/add-chmod/addchmod.txt
new file mode 100644
index 0000000..323c551
--- /dev/null
+++ b/tests/bud/add-chmod/addchmod.txt
@@ -0,0 +1 @@
+File for testing ADD with chmod in a Dockerfile.
diff --git a/tests/bud/add-chown/Dockerfile b/tests/bud/add-chown/Dockerfile
new file mode 100644
index 0000000..707ada8
--- /dev/null
+++ b/tests/bud/add-chown/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+
+ADD --chown=2367:3267 addchown.txt /tmp
+RUN stat -c "user:%u group:%g" /tmp/addchown.txt
+CMD /bin/sh
+
diff --git a/tests/bud/add-chown/Dockerfile.bad b/tests/bud/add-chown/Dockerfile.bad
new file mode 100644
index 0000000..876d91d
--- /dev/null
+++ b/tests/bud/add-chown/Dockerfile.bad
@@ -0,0 +1,6 @@
+FROM alpine
+
+ADD --chown 2367:3267 addchown.txt /tmp
+RUN stat -c "user:%u group:%g" /tmp/addchown.txt
+CMD /bin/sh
+
diff --git a/tests/bud/add-chown/addchown.txt b/tests/bud/add-chown/addchown.txt
new file mode 100644
index 0000000..d483cb2
--- /dev/null
+++ b/tests/bud/add-chown/addchown.txt
@@ -0,0 +1 @@
+File for testing COPY with chown in a Dockerfile.
diff --git a/tests/bud/add-create-absolute-path/Dockerfile b/tests/bud/add-create-absolute-path/Dockerfile
new file mode 100644
index 0000000..567f2b1
--- /dev/null
+++ b/tests/bud/add-create-absolute-path/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu
+ADD distutils.cfg /usr/lib/python3.7/distutils/distutils.cfg
+RUN stat -c "permissions=%a" /usr/lib/python3.7/distutils
diff --git a/tests/bud/add-create-absolute-path/distutils.cfg b/tests/bud/add-create-absolute-path/distutils.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/add-create-absolute-path/distutils.cfg
diff --git a/tests/bud/add-create-relative-path/Dockerfile b/tests/bud/add-create-relative-path/Dockerfile
new file mode 100644
index 0000000..0e6d726
--- /dev/null
+++ b/tests/bud/add-create-relative-path/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu
+COPY distutils.cfg lib/custom/distutils.cfg
+RUN stat -c "permissions=%a" lib/custom
diff --git a/tests/bud/add-create-relative-path/distutils.cfg b/tests/bud/add-create-relative-path/distutils.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/add-create-relative-path/distutils.cfg
diff --git a/tests/bud/add-file/Dockerfile b/tests/bud/add-file/Dockerfile
new file mode 100644
index 0000000..bd6d0c5
--- /dev/null
+++ b/tests/bud/add-file/Dockerfile
@@ -0,0 +1,8 @@
+FROM busybox
+
+ADD file /var/www/
+VOLUME /var/www
+ADD file /var/
+VOLUME /var
+ADD file2 /var/
+
diff --git a/tests/bud/add-file/file b/tests/bud/add-file/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/add-file/file
diff --git a/tests/bud/add-file/file2 b/tests/bud/add-file/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/add-file/file2
diff --git a/tests/bud/add-run-dir/Dockerfile b/tests/bud/add-run-dir/Dockerfile
new file mode 100644
index 0000000..d4b4ebe
--- /dev/null
+++ b/tests/bud/add-run-dir/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN touch /run/hello
diff --git a/tests/bud/addtl-tags/Dockerfile b/tests/bud/addtl-tags/Dockerfile
new file mode 100644
index 0000000..dc5aa56
--- /dev/null
+++ b/tests/bud/addtl-tags/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+WORKDIR /
diff --git a/tests/bud/all-platform/Containerfile.default-arg b/tests/bud/all-platform/Containerfile.default-arg
new file mode 100644
index 0000000..bd945f7
--- /dev/null
+++ b/tests/bud/all-platform/Containerfile.default-arg
@@ -0,0 +1,2 @@
+ARG foo=alpine
+FROM $foo
diff --git a/tests/bud/base-with-arg/Containerfile b/tests/bud/base-with-arg/Containerfile
new file mode 100644
index 0000000..8ed7bd5
--- /dev/null
+++ b/tests/bud/base-with-arg/Containerfile
@@ -0,0 +1,20 @@
+FROM --platform=$TARGETPLATFORM alpine AS build
+LABEL architecture=$TARGETARCH
+
+FROM build AS platform-amd64
+ENV BUILT_FOR=amd64
+
+FROM build AS platform-arm64
+ENV BUILT_FOR=arm64
+
+FROM build AS platform-386
+ENV BUILT_FOR=386
+
+FROM build AS platform-arm/v7
+ENV BUILT_FOR=arm/v7
+
+FROM build AS platform-arm/v6
+ENV BUILT_FOR=arm/v6
+
+FROM platform-${TARGETARCH} AS final
+RUN echo "This is built for ${BUILT_FOR}"
diff --git a/tests/bud/base-with-arg/Containerfile2 b/tests/bud/base-with-arg/Containerfile2
new file mode 100644
index 0000000..d608f6f
--- /dev/null
+++ b/tests/bud/base-with-arg/Containerfile2
@@ -0,0 +1,11 @@
+FROM alpine as build
+ARG CUSTOM_TARGET
+
+FROM build AS platform-first
+ENV BUILT_FOR=first
+
+FROM build AS platform-second
+ENV BUILT_FOR=second
+
+FROM platform-${CUSTOM_TARGET} AS final
+RUN echo "This is built for ${BUILT_FOR}"
diff --git a/tests/bud/base-with-arg/Containerfilebad b/tests/bud/base-with-arg/Containerfilebad
new file mode 100644
index 0000000..584a016
--- /dev/null
+++ b/tests/bud/base-with-arg/Containerfilebad
@@ -0,0 +1,11 @@
+FROM alpine as build
+
+FROM build AS platform-first
+ENV BUILT_FOR=first
+
+FROM build AS platform-second
+ENV BUILT_FOR=second
+
+# Should fail since we never declared CUSTOM_TARGET
+FROM platform-${CUSTOM_TARGET} AS final
+RUN echo "This is built for ${BUILT_FOR}"
diff --git a/tests/bud/base-with-arg/first.args b/tests/bud/base-with-arg/first.args
new file mode 100644
index 0000000..c055850
--- /dev/null
+++ b/tests/bud/base-with-arg/first.args
@@ -0,0 +1 @@
+CUSTOM_TARGET=first
diff --git a/tests/bud/base-with-arg/second.args b/tests/bud/base-with-arg/second.args
new file mode 100644
index 0000000..aebbb99
--- /dev/null
+++ b/tests/bud/base-with-arg/second.args
@@ -0,0 +1 @@
+CUSTOM_TARGET=second
diff --git a/tests/bud/base-with-labels/Containerfile b/tests/bud/base-with-labels/Containerfile
new file mode 100644
index 0000000..345743d
--- /dev/null
+++ b/tests/bud/base-with-labels/Containerfile
@@ -0,0 +1,2 @@
+FROM registry.fedoraproject.org/fedora-minimal
+RUN echo hello
diff --git a/tests/bud/build-arg/Dockerfile b/tests/bud/build-arg/Dockerfile
new file mode 100644
index 0000000..fea6ee9
--- /dev/null
+++ b/tests/bud/build-arg/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ARG foo
+RUN echo $foo
diff --git a/tests/bud/build-arg/Dockerfile2 b/tests/bud/build-arg/Dockerfile2
new file mode 100644
index 0000000..0af41bd
--- /dev/null
+++ b/tests/bud/build-arg/Dockerfile2
@@ -0,0 +1,2 @@
+ARG IMAGE=busybox
+FROM ${IMAGE}
diff --git a/tests/bud/build-arg/Dockerfile3 b/tests/bud/build-arg/Dockerfile3
new file mode 100644
index 0000000..f7e8b84
--- /dev/null
+++ b/tests/bud/build-arg/Dockerfile3
@@ -0,0 +1,15 @@
+FROM busybox
+MAINTAINER jdoe <jdoe@example.com>
+ENV container="docker"
+
+RUN echo this-should-be-cached-but-it-s-not
+
+ARG USERNAME
+ARG UID
+ARG CODE
+ARG PGDATA
+ARG PORT=55555
+
+RUN echo this-should-not-be-cached-when-args-change
+
+CMD ["/container-run"]
diff --git a/tests/bud/build-arg/Dockerfile4 b/tests/bud/build-arg/Dockerfile4
new file mode 100644
index 0000000..18d6a2e
--- /dev/null
+++ b/tests/bud/build-arg/Dockerfile4
@@ -0,0 +1,3 @@
+FROM alpine
+ARG TEST
+ENV NAME=$TEST
diff --git a/tests/bud/build-with-from/Containerfile b/tests/bud/build-with-from/Containerfile
new file mode 100644
index 0000000..d25cad9
--- /dev/null
+++ b/tests/bud/build-with-from/Containerfile
@@ -0,0 +1,4 @@
+FROM fedora as builder
+FROM busybox
+COPY --from=builder /bin/df /tmp/df_tester
+
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebindfrom b/tests/bud/buildkit-mount-from/Dockerfilebindfrom
new file mode 100644
index 0000000..e81ee4e
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebindfrom
@@ -0,0 +1,3 @@
+FROM alpine
+# use from=<image> as mount source
+RUN --mount=type=bind,source=.,from=buildkitbase,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebindfromrelative b/tests/bud/buildkit-mount-from/Dockerfilebindfromrelative
new file mode 100644
index 0000000..c421a0c
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebindfromrelative
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use from=<image> as mount source
+RUN --mount=type=bind,source=subdir,from=buildkitbaserelative,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebindfromwithemptyfrom b/tests/bud/buildkit-mount-from/Dockerfilebindfromwithemptyfrom
new file mode 100644
index 0000000..5fc867f
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebindfromwithemptyfrom
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use from=<image> as mount source
+RUN --mount=type=bind,source=.,from=,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebindfromwithoutsource b/tests/bud/buildkit-mount-from/Dockerfilebindfromwithoutsource
new file mode 100644
index 0000000..3dee4e2
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebindfromwithoutsource
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use from=<image> as mount source
+RUN --mount=type=bind,from=buildkitbase,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebindfromwriteable b/tests/bud/buildkit-mount-from/Dockerfilebindfromwriteable
new file mode 100644
index 0000000..1f4cc31
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebindfromwriteable
@@ -0,0 +1,3 @@
+FROM alpine
+# use from=<image> as mount source
+RUN --mount=type=bind,source=.,from=buildkitbase,target=/test,rw echo "world" > /test/hello && cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebuildkitbase b/tests/bud/buildkit-mount-from/Dockerfilebuildkitbase
new file mode 100644
index 0000000..079440f
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebuildkitbase
@@ -0,0 +1,2 @@
+FROM scratch
+COPY hello .
diff --git a/tests/bud/buildkit-mount-from/Dockerfilebuildkitbaserelative b/tests/bud/buildkit-mount-from/Dockerfilebuildkitbaserelative
new file mode 100644
index 0000000..a5c2429
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilebuildkitbaserelative
@@ -0,0 +1,3 @@
+FROM alpine
+RUN mkdir subdir
+COPY hello /subdir
diff --git a/tests/bud/buildkit-mount-from/Dockerfilecachefrom b/tests/bud/buildkit-mount-from/Dockerfilecachefrom
new file mode 100644
index 0000000..c8b843a
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilecachefrom
@@ -0,0 +1,8 @@
+FROM alpine as builder
+RUN mkdir subdir
+COPY hello .
+
+FROM alpine as second
+RUN mkdir /test
+# use another stage as cache source
+RUN --mount=type=cache,from=builder,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilecachefromimage b/tests/bud/buildkit-mount-from/Dockerfilecachefromimage
new file mode 100644
index 0000000..8965fa6
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilecachefromimage
@@ -0,0 +1,5 @@
+FROM alpine
+RUN mkdir /test
+# use another image as cache source
+# following should fail as cache does not supports mounting image
+RUN --mount=type=cache,from=buildkitbase,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilecachemultiplefrom b/tests/bud/buildkit-mount-from/Dockerfilecachemultiplefrom
new file mode 100644
index 0000000..93aba35
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilecachemultiplefrom
@@ -0,0 +1,10 @@
+FROM alpine as builder
+COPY hello .
+
+FROM alpine as builder2
+COPY hello2 .
+
+FROM alpine
+RUN mkdir /test
+# use other stages as cache source
+RUN --mount=type=cache,from=builder,target=/test --mount=type=cache,from=builder2,target=/test2 cat /test2/hello2 && cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilemultistagefrom b/tests/bud/buildkit-mount-from/Dockerfilemultistagefrom
new file mode 100644
index 0000000..470aa03
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilemultistagefrom
@@ -0,0 +1,6 @@
+FROM alpine as builder
+RUN mkdir subdir
+COPY hello ./subdir/
+
+FROM alpine as second
+RUN --mount=type=bind,source=/subdir,from=builder,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/Dockerfilemultistagefromcache b/tests/bud/buildkit-mount-from/Dockerfilemultistagefromcache
new file mode 100644
index 0000000..26f656b
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockerfilemultistagefromcache
@@ -0,0 +1,11 @@
+FROM alpine as builder
+RUN mkdir subdir
+RUN mkdir subdir/subdir2
+COPY hello ./subdir/
+COPY hello2 ./subdir/subdir2/
+
+FROM alpine as second
+RUN --mount=type=cache,id=1,source=/subdir,from=builder,target=/test cat /test/hello
+
+FROM alpine as third
+RUN --mount=type=cache,id=2,source=/subdir/subdir2,from=builder,target=/test cat /test/hello2
diff --git a/tests/bud/buildkit-mount-from/Dockermultistagefrom b/tests/bud/buildkit-mount-from/Dockermultistagefrom
new file mode 100644
index 0000000..470aa03
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/Dockermultistagefrom
@@ -0,0 +1,6 @@
+FROM alpine as builder
+RUN mkdir subdir
+COPY hello ./subdir/
+
+FROM alpine as second
+RUN --mount=type=bind,source=/subdir,from=builder,target=/test cat /test/hello
diff --git a/tests/bud/buildkit-mount-from/hello b/tests/bud/buildkit-mount-from/hello
new file mode 100644
index 0000000..ce01362
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/hello
@@ -0,0 +1 @@
+hello
diff --git a/tests/bud/buildkit-mount-from/hello2 b/tests/bud/buildkit-mount-from/hello2
new file mode 100644
index 0000000..14be0d4
--- /dev/null
+++ b/tests/bud/buildkit-mount-from/hello2
@@ -0,0 +1 @@
+hello2
diff --git a/tests/bud/buildkit-mount/Dockerfile b/tests/bud/buildkit-mount/Dockerfile
new file mode 100644
index 0000000..6e675c9
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=bind,source=.,target=/test,z cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfile2 b/tests/bud/buildkit-mount/Dockerfile2
new file mode 100644
index 0000000..2ea99c2
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfile2
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=bind,target=/test,z cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfile3 b/tests/bud/buildkit-mount/Dockerfile3
new file mode 100644
index 0000000..79a1854
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfile3
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=bind,source=.,target=/test,z,rw echo world > /test/input_file && cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfile4 b/tests/bud/buildkit-mount/Dockerfile4
new file mode 100644
index 0000000..156436b
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfile4
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=bind,source=subdir/,target=/test,z cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfile6 b/tests/bud/buildkit-mount/Dockerfile6
new file mode 100644
index 0000000..6c504fa
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfile6
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=target=/test,z cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfilecacheread b/tests/bud/buildkit-mount/Dockerfilecacheread
new file mode 100644
index 0000000..be12315
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilecacheread
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=cache,target=/test,z cat /test/world
diff --git a/tests/bud/buildkit-mount/Dockerfilecachereadwithoutz b/tests/bud/buildkit-mount/Dockerfilecachereadwithoutz
new file mode 100644
index 0000000..5d89409
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilecachereadwithoutz
@@ -0,0 +1,3 @@
+FROM alpine
+RUN mkdir /test2
+RUN --mount=type=cache,target=/test2 cat /test2/world
diff --git a/tests/bud/buildkit-mount/Dockerfilecachewrite b/tests/bud/buildkit-mount/Dockerfilecachewrite
new file mode 100644
index 0000000..67a6593
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilecachewrite
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+RUN --mount=type=cache,target=/test,z echo hello > /test/world
diff --git a/tests/bud/buildkit-mount/Dockerfilecachewritesharing b/tests/bud/buildkit-mount/Dockerfilecachewritesharing
new file mode 100644
index 0000000..ac2a3b6
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilecachewritesharing
@@ -0,0 +1,7 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+# This locks cache
+RUN --mount=target=/test,type=cache,sharing=locked,z echo hello > /test/world && cat /test/world
+# Cache must be unlocked so it can be locked again
+RUN --mount=target=/test,sharing=locked,type=cache,z echo world > /test/world && cat /test/world
diff --git a/tests/bud/buildkit-mount/Dockerfilecachewritewithoutz b/tests/bud/buildkit-mount/Dockerfilecachewritewithoutz
new file mode 100644
index 0000000..e1afb99
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilecachewritewithoutz
@@ -0,0 +1,3 @@
+FROM alpine
+RUN mkdir /test2
+RUN --mount=type=cache,target=/test2 echo hello > /test2/world
diff --git a/tests/bud/buildkit-mount/Dockerfilemultiplemounts b/tests/bud/buildkit-mount/Dockerfilemultiplemounts
new file mode 100644
index 0000000..d02fb62
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfilemultiplemounts
@@ -0,0 +1,8 @@
+FROM alpine
+RUN mkdir /test
+# use option z if selinux is enabled
+# Order here is important, 'type=Bind' is the default, we want to make sure
+# it stays at it
+RUN --mount=type=cache,target=/test/cache,z \
+ --mount=source=input_file,target=/test/input_file,z \
+ cat /test/input_file
diff --git a/tests/bud/buildkit-mount/Dockerfiletmpfs b/tests/bud/buildkit-mount/Dockerfiletmpfs
new file mode 100644
index 0000000..057fb03
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfiletmpfs
@@ -0,0 +1,4 @@
+FROM alpine
+
+# As a baseline, this should succeed without creating any directory on container
+RUN --mount=type=tmpfs,target=/var/tmpfs-not-empty touch /var/tmpfs-not-empty/hello
diff --git a/tests/bud/buildkit-mount/Dockerfiletmpfscopyup b/tests/bud/buildkit-mount/Dockerfiletmpfscopyup
new file mode 100644
index 0000000..8e04d65
--- /dev/null
+++ b/tests/bud/buildkit-mount/Dockerfiletmpfscopyup
@@ -0,0 +1,4 @@
+FROM alpine
+
+# As a baseline, this should succeed without creating any directory on container
+RUN --mount=type=tmpfs,target=/etc/ssl,tmpcopyup ls /etc/ssl
diff --git a/tests/bud/buildkit-mount/input_file b/tests/bud/buildkit-mount/input_file
new file mode 100644
index 0000000..ce01362
--- /dev/null
+++ b/tests/bud/buildkit-mount/input_file
@@ -0,0 +1 @@
+hello
diff --git a/tests/bud/buildkit-mount/subdir/input_file b/tests/bud/buildkit-mount/subdir/input_file
new file mode 100644
index 0000000..ce01362
--- /dev/null
+++ b/tests/bud/buildkit-mount/subdir/input_file
@@ -0,0 +1 @@
+hello
diff --git a/tests/bud/cache-chown/Dockerfile.add1 b/tests/bud/cache-chown/Dockerfile.add1
new file mode 100644
index 0000000..254c5cc
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.add1
@@ -0,0 +1,2 @@
+FROM scratch
+ADD --chown=1:1 testfile /
diff --git a/tests/bud/cache-chown/Dockerfile.add2 b/tests/bud/cache-chown/Dockerfile.add2
new file mode 100644
index 0000000..6a4925f
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.add2
@@ -0,0 +1,2 @@
+FROM scratch
+ADD --chown=2:2 testfile /
diff --git a/tests/bud/cache-chown/Dockerfile.copy1 b/tests/bud/cache-chown/Dockerfile.copy1
new file mode 100644
index 0000000..5db8b7d
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.copy1
@@ -0,0 +1,2 @@
+FROM scratch
+COPY --chown=1:1 testfile /
diff --git a/tests/bud/cache-chown/Dockerfile.copy2 b/tests/bud/cache-chown/Dockerfile.copy2
new file mode 100644
index 0000000..1e68c42
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.copy2
@@ -0,0 +1,2 @@
+FROM scratch
+COPY --chown=2:2 testfile /
diff --git a/tests/bud/cache-chown/Dockerfile.prev1 b/tests/bud/cache-chown/Dockerfile.prev1
new file mode 100644
index 0000000..c24ad99
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.prev1
@@ -0,0 +1,4 @@
+FROM scratch
+COPY testfile renamedfile
+FROM scratch
+COPY --chown=1:1 --from=0 renamedfile /
diff --git a/tests/bud/cache-chown/Dockerfile.prev2 b/tests/bud/cache-chown/Dockerfile.prev2
new file mode 100644
index 0000000..333cef1
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.prev2
@@ -0,0 +1,4 @@
+FROM scratch
+COPY testfile renamedfile
+FROM scratch
+COPY --chown=2:2 --from=0 renamedfile /
diff --git a/tests/bud/cache-chown/Dockerfile.tar1 b/tests/bud/cache-chown/Dockerfile.tar1
new file mode 100644
index 0000000..a4c329b
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.tar1
@@ -0,0 +1,3 @@
+FROM scratch
+# Surprise! The --chown flag is ignored when we're extracting archives.
+ADD --chown=1:1 testfile.tar.gz /
diff --git a/tests/bud/cache-chown/Dockerfile.tar2 b/tests/bud/cache-chown/Dockerfile.tar2
new file mode 100644
index 0000000..d9d189d
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.tar2
@@ -0,0 +1,3 @@
+FROM scratch
+# Surprise! The --chown flag is ignored when we're extracting archives.
+ADD --chown=2:2 testfile.tar.gz /
diff --git a/tests/bud/cache-chown/Dockerfile.url1 b/tests/bud/cache-chown/Dockerfile.url1
new file mode 100644
index 0000000..9c14204
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.url1
@@ -0,0 +1,2 @@
+FROM scratch
+ADD --chown=1:1 https://raw.githubusercontent.com/containers/buildah/main/LICENSE /
diff --git a/tests/bud/cache-chown/Dockerfile.url2 b/tests/bud/cache-chown/Dockerfile.url2
new file mode 100644
index 0000000..0ba353e
--- /dev/null
+++ b/tests/bud/cache-chown/Dockerfile.url2
@@ -0,0 +1,2 @@
+FROM scratch
+ADD --chown=2:2 https://raw.githubusercontent.com/containers/buildah/main/LICENSE /
diff --git a/tests/bud/cache-chown/testfile b/tests/bud/cache-chown/testfile
new file mode 100644
index 0000000..43b8cd3
--- /dev/null
+++ b/tests/bud/cache-chown/testfile
@@ -0,0 +1 @@
+Hi, I'm a test file. Enjoy the test.
diff --git a/tests/bud/cache-chown/testfile.tar.gz b/tests/bud/cache-chown/testfile.tar.gz
new file mode 100644
index 0000000..2dfc397
--- /dev/null
+++ b/tests/bud/cache-chown/testfile.tar.gz
Binary files differ
diff --git a/tests/bud/cache-format/Dockerfile b/tests/bud/cache-format/Dockerfile
new file mode 100644
index 0000000..a3e2de1
--- /dev/null
+++ b/tests/bud/cache-format/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+COPY . .
+RUN pwd
diff --git a/tests/bud/cache-from/Containerfile b/tests/bud/cache-from/Containerfile
new file mode 100644
index 0000000..bd599b0
--- /dev/null
+++ b/tests/bud/cache-from/Containerfile
@@ -0,0 +1,3 @@
+FROM alpine
+ARG VAR=hello
+RUN echo "Hello $VAR"
diff --git a/tests/bud/cache-mount-locked/Containerfile b/tests/bud/cache-mount-locked/Containerfile
new file mode 100644
index 0000000..d63cab8
--- /dev/null
+++ b/tests/bud/cache-mount-locked/Containerfile
@@ -0,0 +1,21 @@
+FROM quay.io/centos/centos:8
+
+ARG WIPE_CACHE
+
+COPY file .
+
+RUN --mount=type=cache,target=/cache1,sharing=locked \
+ --mount=type=cache,target=/cache2 \
+ set -ex; \
+ ls -l /cache1; \
+ if [[ -v WIPE_CACHE ]]; then \
+ >&2 echo "Wiping cache"; \
+ find /cache1 -mindepth 1 -delete; \
+ fi; \
+ echo "foo" > /cache1/foo.txt; \
+ ls -l /cache1; \
+ chmod --recursive g=u /cache1; \
+ : ;
+
+# Cache was wiped-out but lock should not hang here: https://github.com/containers/buildah/issues/4342
+RUN --mount=type=cache,target=/cache1,sharing=locked cat file
diff --git a/tests/bud/cache-mount-locked/file b/tests/bud/cache-mount-locked/file
new file mode 100644
index 0000000..ce01362
--- /dev/null
+++ b/tests/bud/cache-mount-locked/file
@@ -0,0 +1 @@
+hello
diff --git a/tests/bud/cache-scratch/Dockerfile b/tests/bud/cache-scratch/Dockerfile
new file mode 100644
index 0000000..662b5a3
--- /dev/null
+++ b/tests/bud/cache-scratch/Dockerfile
@@ -0,0 +1,5 @@
+FROM alpine as build
+
+FROM scratch
+COPY --from=build / /
+COPY --from=build / /
diff --git a/tests/bud/cache-scratch/Dockerfile.config b/tests/bud/cache-scratch/Dockerfile.config
new file mode 100644
index 0000000..4c756d3
--- /dev/null
+++ b/tests/bud/cache-scratch/Dockerfile.config
@@ -0,0 +1,8 @@
+FROM alpine as build
+MAINTAINER root@localhost
+
+FROM scratch
+MAINTAINER root@localhost
+COPY --from=build / /
+MAINTAINER root@localhost
+COPY --from=build / /
diff --git a/tests/bud/cache-scratch/Dockerfile.different1 b/tests/bud/cache-scratch/Dockerfile.different1
new file mode 100644
index 0000000..e3034bf
--- /dev/null
+++ b/tests/bud/cache-scratch/Dockerfile.different1
@@ -0,0 +1,6 @@
+FROM alpine as build
+
+FROM scratch
+COPY --from=build / /
+RUN touch cache-invalidating-difference
+COPY --from=build / /
diff --git a/tests/bud/cache-scratch/Dockerfile.different2 b/tests/bud/cache-scratch/Dockerfile.different2
new file mode 100644
index 0000000..e40d0c0
--- /dev/null
+++ b/tests/bud/cache-scratch/Dockerfile.different2
@@ -0,0 +1,6 @@
+FROM alpine as build
+RUN touch cache-invalidating-difference
+
+FROM scratch
+COPY --from=build / /
+COPY --from=build / /
diff --git a/tests/bud/cache-stages/Dockerfile.1 b/tests/bud/cache-stages/Dockerfile.1
new file mode 100644
index 0000000..b33bf20
--- /dev/null
+++ b/tests/bud/cache-stages/Dockerfile.1
@@ -0,0 +1,2 @@
+FROM alpine AS builder
+RUN touch /tmpfile
diff --git a/tests/bud/cache-stages/Dockerfile.2 b/tests/bud/cache-stages/Dockerfile.2
new file mode 100644
index 0000000..288d23d
--- /dev/null
+++ b/tests/bud/cache-stages/Dockerfile.2
@@ -0,0 +1,5 @@
+FROM alpine AS builder
+RUN touch /tmpfile
+FROM alpine AS base
+COPY --from=builder /tmpfile /
+RUN stat /tmpfile
diff --git a/tests/bud/cache-stages/Dockerfile.3 b/tests/bud/cache-stages/Dockerfile.3
new file mode 100644
index 0000000..0dc1bfc
--- /dev/null
+++ b/tests/bud/cache-stages/Dockerfile.3
@@ -0,0 +1,25 @@
+FROM alpine AS common
+
+RUN echo "common" > /common.txt
+
+FROM common AS buildA
+RUN echo "foo" > /foo.txt
+
+FROM common AS buildB
+# This is contrived to force a cached layer without having to build twice
+# Ordinarily you wouldn't have duplicate stages
+RUN echo "foo" > /foo.txt
+
+FROM alpine
+
+ARG NONCE
+
+RUN --mount=type=bind,from=buildA,target=/buildA \
+ --mount=type=bind,from=buildB,target=/buildB \
+ set -ex; \
+ cat /buildA/common.txt; \
+ cat /buildA/foo.txt; \
+ cat /buildB/common.txt; \
+ cat /buildB/foo.txt; \
+ echo "Worked"; \
+ : ;
diff --git a/tests/bud/capabilities/Dockerfile b/tests/bud/capabilities/Dockerfile
new file mode 100644
index 0000000..6b1d510
--- /dev/null
+++ b/tests/bud/capabilities/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+USER 3267
+RUN id; grep -i cap /proc/self/status
diff --git a/tests/bud/check-race/Containerfile b/tests/bud/check-race/Containerfile
new file mode 100644
index 0000000..b78965a
--- /dev/null
+++ b/tests/bud/check-race/Containerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN for i in $(seq 0 1000); do touch /$i; done
diff --git a/tests/bud/commit/name-path-changes/Dockerfile b/tests/bud/commit/name-path-changes/Dockerfile
new file mode 100644
index 0000000..d6fc44b
--- /dev/null
+++ b/tests/bud/commit/name-path-changes/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+RUN pwd
diff --git a/tests/bud/container-ignoresymlink/Dockerfile b/tests/bud/container-ignoresymlink/Dockerfile
new file mode 100644
index 0000000..7c9b090
--- /dev/null
+++ b/tests/bud/container-ignoresymlink/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+COPY / /dir
+RUN ls /dir
diff --git a/tests/bud/container-ignoresymlink/hello b/tests/bud/container-ignoresymlink/hello
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/container-ignoresymlink/hello
diff --git a/tests/bud/container-ignoresymlink/world b/tests/bud/container-ignoresymlink/world
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/container-ignoresymlink/world
diff --git a/tests/bud/containeranddockerfile/Containerfile b/tests/bud/containeranddockerfile/Containerfile
new file mode 100644
index 0000000..67fd379
--- /dev/null
+++ b/tests/bud/containeranddockerfile/Containerfile
@@ -0,0 +1 @@
+FROM alpine
diff --git a/tests/bud/containeranddockerfile/Dockerfile b/tests/bud/containeranddockerfile/Dockerfile
new file mode 100644
index 0000000..0910236
--- /dev/null
+++ b/tests/bud/containeranddockerfile/Dockerfile
@@ -0,0 +1 @@
+FROM busybox
diff --git a/tests/bud/containerfile/Containerfile b/tests/bud/containerfile/Containerfile
new file mode 100644
index 0000000..0f9708a
--- /dev/null
+++ b/tests/bud/containerfile/Containerfile
@@ -0,0 +1,2 @@
+# This is for testing Containerfile with Buildah
+FROM alpine
diff --git a/tests/bud/containerfile/Containerfile.in b/tests/bud/containerfile/Containerfile.in
new file mode 100644
index 0000000..d13d5cd
--- /dev/null
+++ b/tests/bud/containerfile/Containerfile.in
@@ -0,0 +1,7 @@
+# include "Containerfile"
+RUN echo "success"
+#if TESTCPPDEBUG
+RUN echo "debug=yes"
+# else
+RUN echo "debug=no"
+#endif
diff --git a/tests/bud/containerignore/.containerignore b/tests/bud/containerignore/.containerignore
new file mode 100644
index 0000000..4cec094
--- /dev/null
+++ b/tests/bud/containerignore/.containerignore
@@ -0,0 +1,6 @@
+# comment
+*
+test*
+!test2*
+subdir
+!*/sub1* \ No newline at end of file
diff --git a/tests/bud/containerignore/.dockerignore b/tests/bud/containerignore/.dockerignore
new file mode 100644
index 0000000..da9bcd6
--- /dev/null
+++ b/tests/bud/containerignore/.dockerignore
@@ -0,0 +1,2 @@
+# comment
+*
diff --git a/tests/bud/containerignore/Dockerfile b/tests/bud/containerignore/Dockerfile
new file mode 100644
index 0000000..4d930ef
--- /dev/null
+++ b/tests/bud/containerignore/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+
+COPY ./ ./
+COPY subdir ./ \ No newline at end of file
diff --git a/tests/bud/containerignore/Dockerfile.succeed b/tests/bud/containerignore/Dockerfile.succeed
new file mode 100644
index 0000000..a164936
--- /dev/null
+++ b/tests/bud/containerignore/Dockerfile.succeed
@@ -0,0 +1,3 @@
+FROM alpine
+
+COPY ./ ./
diff --git a/tests/bud/containerignore/subdir/sub1.txt b/tests/bud/containerignore/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/containerignore/subdir/sub1.txt
diff --git a/tests/bud/containerignore/subdir/sub2.txt b/tests/bud/containerignore/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/containerignore/subdir/sub2.txt
diff --git a/tests/bud/containerignore/test1.txt b/tests/bud/containerignore/test1.txt
new file mode 100644
index 0000000..745eda7
--- /dev/null
+++ b/tests/bud/containerignore/test1.txt
@@ -0,0 +1 @@
+test1 failed \ No newline at end of file
diff --git a/tests/bud/containerignore/test2.txt b/tests/bud/containerignore/test2.txt
new file mode 100644
index 0000000..4e4d75d
--- /dev/null
+++ b/tests/bud/containerignore/test2.txt
@@ -0,0 +1 @@
+test2 failed \ No newline at end of file
diff --git a/tests/bud/context-escape-dir/testdir/Containerfile b/tests/bud/context-escape-dir/testdir/Containerfile
new file mode 100644
index 0000000..267855a
--- /dev/null
+++ b/tests/bud/context-escape-dir/testdir/Containerfile
@@ -0,0 +1,2 @@
+FROM alpine
+COPY ../upperfile.txt /
diff --git a/tests/bud/context-escape-dir/upperfile.txt b/tests/bud/context-escape-dir/upperfile.txt
new file mode 100644
index 0000000..92794b3
--- /dev/null
+++ b/tests/bud/context-escape-dir/upperfile.txt
@@ -0,0 +1,3 @@
+This is a text file to be used in Buildah testing.
+This will be used to ensure that a file from above the context
+directory can not be copied during the build phase.
diff --git a/tests/bud/context-from-stdin/Dockerfile b/tests/bud/context-from-stdin/Dockerfile
new file mode 100644
index 0000000..f794ed3
--- /dev/null
+++ b/tests/bud/context-from-stdin/Dockerfile
@@ -0,0 +1,5 @@
+FROM alpine as base
+RUN echo "stdin-context" > /scratchfile
+
+FROM scratch
+COPY --from=base /scratchfile /
diff --git a/tests/bud/copy-archive/Containerfile b/tests/bud/copy-archive/Containerfile
new file mode 100644
index 0000000..68ab9c8
--- /dev/null
+++ b/tests/bud/copy-archive/Containerfile
@@ -0,0 +1,2 @@
+FROM docker.io/busybox
+ADD test.tar.xz /
diff --git a/tests/bud/copy-chmod/Dockerfile b/tests/bud/copy-chmod/Dockerfile
new file mode 100644
index 0000000..6c1de47
--- /dev/null
+++ b/tests/bud/copy-chmod/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+
+COPY --chmod=777 copychmod.txt /tmp
+RUN ls -l /tmp/copychmod.txt
+CMD /bin/sh
+
diff --git a/tests/bud/copy-chmod/Dockerfile.bad b/tests/bud/copy-chmod/Dockerfile.bad
new file mode 100644
index 0000000..3cbe4b5
--- /dev/null
+++ b/tests/bud/copy-chmod/Dockerfile.bad
@@ -0,0 +1,7 @@
+FROM alpine
+
+COPY --chmod 777 copychmod.txt /tmp
+RUN ls -l /tmp/copychmod.txt
+CMD /bin/sh
+
+
diff --git a/tests/bud/copy-chmod/Dockerfile.combined b/tests/bud/copy-chmod/Dockerfile.combined
new file mode 100644
index 0000000..9b28687
--- /dev/null
+++ b/tests/bud/copy-chmod/Dockerfile.combined
@@ -0,0 +1,6 @@
+FROM alpine
+
+COPY --chmod=777 --chown=2367:3267 copychmod.txt /tmp
+RUN stat -c "chmod:%a user:%u group:%g" /tmp/copychmod.txt
+CMD /bin/sh
+
diff --git a/tests/bud/copy-chmod/copychmod.txt b/tests/bud/copy-chmod/copychmod.txt
new file mode 100644
index 0000000..761b584
--- /dev/null
+++ b/tests/bud/copy-chmod/copychmod.txt
@@ -0,0 +1 @@
+File for testing COPY with chmod in a Dockerfile.
diff --git a/tests/bud/copy-chown/Containerfile.chown_user b/tests/bud/copy-chown/Containerfile.chown_user
new file mode 100644
index 0000000..08a2e58
--- /dev/null
+++ b/tests/bud/copy-chown/Containerfile.chown_user
@@ -0,0 +1,8 @@
+FROM ubuntu:latest
+
+ENV MYUSER=myuser
+
+RUN useradd --create-home --home /"${MYUSER}" "${MYUSER}"
+COPY --chown="${MYUSER}" ./copychown.txt /somewhere
+
+RUN ls -alF /somewhere
diff --git a/tests/bud/copy-chown/Dockerfile b/tests/bud/copy-chown/Dockerfile
new file mode 100644
index 0000000..1498ef6
--- /dev/null
+++ b/tests/bud/copy-chown/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+
+COPY --chown=2367:3267 copychown.txt /tmp
+RUN stat -c "user:%u group:%g" /tmp/copychown.txt
+CMD /bin/sh
+
diff --git a/tests/bud/copy-chown/Dockerfile.bad b/tests/bud/copy-chown/Dockerfile.bad
new file mode 100644
index 0000000..3e1134c
--- /dev/null
+++ b/tests/bud/copy-chown/Dockerfile.bad
@@ -0,0 +1,6 @@
+FROM alpine
+
+COPY --chown 2367:3267 copychown.txt /tmp
+RUN stat -c "user:%u group:%g" /tmp/copychown.txt
+CMD /bin/sh
+
diff --git a/tests/bud/copy-chown/Dockerfile.bad2 b/tests/bud/copy-chown/Dockerfile.bad2
new file mode 100644
index 0000000..8b9523d
--- /dev/null
+++ b/tests/bud/copy-chown/Dockerfile.bad2
@@ -0,0 +1,4 @@
+FROM alpine
+
+COPY --chown=${BOGUS}:${BOGUS} copychown.txt /tmp
+CMD /bin/sh
diff --git a/tests/bud/copy-chown/copychown.txt b/tests/bud/copy-chown/copychown.txt
new file mode 100644
index 0000000..d483cb2
--- /dev/null
+++ b/tests/bud/copy-chown/copychown.txt
@@ -0,0 +1 @@
+File for testing COPY with chown in a Dockerfile.
diff --git a/tests/bud/copy-create-absolute-path/Dockerfile b/tests/bud/copy-create-absolute-path/Dockerfile
new file mode 100644
index 0000000..2659d2c
--- /dev/null
+++ b/tests/bud/copy-create-absolute-path/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu
+COPY distutils.cfg /usr/lib/python3.7/distutils/distutils.cfg
+RUN stat -c "permissions=%a" /usr/lib/python3.7/distutils
diff --git a/tests/bud/copy-create-absolute-path/distutils.cfg b/tests/bud/copy-create-absolute-path/distutils.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-create-absolute-path/distutils.cfg
diff --git a/tests/bud/copy-create-relative-path/Dockerfile b/tests/bud/copy-create-relative-path/Dockerfile
new file mode 100644
index 0000000..0e6d726
--- /dev/null
+++ b/tests/bud/copy-create-relative-path/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu
+COPY distutils.cfg lib/custom/distutils.cfg
+RUN stat -c "permissions=%a" lib/custom
diff --git a/tests/bud/copy-create-relative-path/distutils.cfg b/tests/bud/copy-create-relative-path/distutils.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-create-relative-path/distutils.cfg
diff --git a/tests/bud/copy-envvar/Containerfile b/tests/bud/copy-envvar/Containerfile
new file mode 100644
index 0000000..0e8c910
--- /dev/null
+++ b/tests/bud/copy-envvar/Containerfile
@@ -0,0 +1,3 @@
+FROM alpine
+ENV VERSION=0.0.1
+COPY file-${VERSION}.txt /
diff --git a/tests/bud/copy-envvar/file-0.0.1.txt b/tests/bud/copy-envvar/file-0.0.1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-envvar/file-0.0.1.txt
diff --git a/tests/bud/copy-from/Dockerfile b/tests/bud/copy-from/Dockerfile
new file mode 100644
index 0000000..f195441
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+# DO NOT TOUCH THIS UNLESS YOU KNOW WHAT YOU'RE DOING!
+# renovatebot seems to want to clobber the image tag below. DO NOT LET IT DO SO.
+COPY --from=quay.io/libpod/testimage:20221018 /home/podman/testimage-id /home/busyboxpodman/copied-testimage-id
diff --git a/tests/bud/copy-from/Dockerfile.bad b/tests/bud/copy-from/Dockerfile.bad
new file mode 100644
index 0000000..1a815e6
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile.bad
@@ -0,0 +1,2 @@
+FROM busybox
+COPY --from registry.com/missing-equals/causes:error /foo /bar
diff --git a/tests/bud/copy-from/Dockerfile2 b/tests/bud/copy-from/Dockerfile2
new file mode 100644
index 0000000..d5a88d0
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile2
@@ -0,0 +1,4 @@
+FROM busybox AS basis
+RUN echo hello > /newfile
+FROM basis
+RUN test -s /newfile
diff --git a/tests/bud/copy-from/Dockerfile2.bad b/tests/bud/copy-from/Dockerfile2.bad
new file mode 100644
index 0000000..7e29796
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile2.bad
@@ -0,0 +1,6 @@
+FROM busybox AS test
+USER 1001
+FROM busybox AS build
+COPY --from=test /bin/cut /test/
+COPY --from=build /bin/cp /test/
+COPY --from=busybox /bin/paste /test/
diff --git a/tests/bud/copy-from/Dockerfile3 b/tests/bud/copy-from/Dockerfile3
new file mode 100644
index 0000000..98d976a
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile3
@@ -0,0 +1,4 @@
+FROM docker.io/library/busybox AS build
+RUN rm -f /bin/paste
+USER 1001
+COPY --from=docker.io/library/busybox /bin/paste /test/
diff --git a/tests/bud/copy-from/Dockerfile4 b/tests/bud/copy-from/Dockerfile4
new file mode 100644
index 0000000..6190a61
--- /dev/null
+++ b/tests/bud/copy-from/Dockerfile4
@@ -0,0 +1,4 @@
+FROM docker.io/library/busybox AS test
+RUN rm -f /bin/nl
+FROM docker.io/library/alpine AS final
+COPY --from=docker.io/library/busybox /bin/nl /test/
diff --git a/tests/bud/copy-globs/Containerfile b/tests/bud/copy-globs/Containerfile
new file mode 100644
index 0000000..06b6f78
--- /dev/null
+++ b/tests/bud/copy-globs/Containerfile
@@ -0,0 +1,3 @@
+FROM scratch
+# *txt exists so should succeed
+COPY *.txt /testdir
diff --git a/tests/bud/copy-globs/Containerfile.bad b/tests/bud/copy-globs/Containerfile.bad
new file mode 100644
index 0000000..9645c02
--- /dev/null
+++ b/tests/bud/copy-globs/Containerfile.bad
@@ -0,0 +1,3 @@
+FROM scratch
+# No match so should fail
+COPY *foo /testdir
diff --git a/tests/bud/copy-globs/Containerfile.missing b/tests/bud/copy-globs/Containerfile.missing
new file mode 100644
index 0000000..5185cd4
--- /dev/null
+++ b/tests/bud/copy-globs/Containerfile.missing
@@ -0,0 +1,3 @@
+FROM scratch
+# No match for *foo, but *txt exists so should succeed
+COPY *foo *.txt /testdir
diff --git a/tests/bud/copy-globs/Dockerfile b/tests/bud/copy-globs/Dockerfile
new file mode 100644
index 0000000..6f615a0
--- /dev/null
+++ b/tests/bud/copy-globs/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+COPY --from=alpine /we/never/get/here /nor/here/either
diff --git a/tests/bud/copy-globs/test1.txt b/tests/bud/copy-globs/test1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-globs/test1.txt
diff --git a/tests/bud/copy-globs/test2.txt b/tests/bud/copy-globs/test2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-globs/test2.txt
diff --git a/tests/bud/copy-multiple-files/Dockerfile b/tests/bud/copy-multiple-files/Dockerfile
new file mode 100644
index 0000000..9dcb367
--- /dev/null
+++ b/tests/bud/copy-multiple-files/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu
+COPY file file2 /var/www/
+ADD file file2 /var/html/
diff --git a/tests/bud/copy-multiple-files/file b/tests/bud/copy-multiple-files/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-multiple-files/file
diff --git a/tests/bud/copy-multiple-files/file2 b/tests/bud/copy-multiple-files/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-multiple-files/file2
diff --git a/tests/bud/copy-multistage-paths/Dockerfile.absolute b/tests/bud/copy-multistage-paths/Dockerfile.absolute
new file mode 100644
index 0000000..3764a63
--- /dev/null
+++ b/tests/bud/copy-multistage-paths/Dockerfile.absolute
@@ -0,0 +1,4 @@
+FROM ubuntu as builder
+FROM ubuntu
+COPY --from=builder /bin/bash /my/bin/bash
+RUN stat -c "permissions=%a" /my/bin
diff --git a/tests/bud/copy-multistage-paths/Dockerfile.invalid_from b/tests/bud/copy-multistage-paths/Dockerfile.invalid_from
new file mode 100644
index 0000000..72c56f8
--- /dev/null
+++ b/tests/bud/copy-multistage-paths/Dockerfile.invalid_from
@@ -0,0 +1,4 @@
+FROM ubuntu as builder
+FROM ubuntu
+COPY --from builder /bin/bash /my/bin/bash
+RUN stat -c "permissions=%a" /my/bin
diff --git a/tests/bud/copy-multistage-paths/Dockerfile.relative b/tests/bud/copy-multistage-paths/Dockerfile.relative
new file mode 100644
index 0000000..9e1c84d
--- /dev/null
+++ b/tests/bud/copy-multistage-paths/Dockerfile.relative
@@ -0,0 +1,4 @@
+FROM ubuntu as builder
+FROM ubuntu
+COPY --from=builder /bin/bash my/bin/bash
+RUN stat -c "permissions=%a" my/bin
diff --git a/tests/bud/copy-root/Dockerfile b/tests/bud/copy-root/Dockerfile
new file mode 100644
index 0000000..4f3d1f7
--- /dev/null
+++ b/tests/bud/copy-root/Dockerfile
@@ -0,0 +1,2 @@
+FROM ubuntu
+COPY distutils.cfg /
diff --git a/tests/bud/copy-root/distutils.cfg b/tests/bud/copy-root/distutils.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/copy-root/distutils.cfg
diff --git a/tests/bud/copy-workdir/Dockerfile b/tests/bud/copy-workdir/Dockerfile
new file mode 100644
index 0000000..e46e017
--- /dev/null
+++ b/tests/bud/copy-workdir/Dockerfile
@@ -0,0 +1,4 @@
+FROM scratch
+WORKDIR /subdir
+COPY file1.txt /
+COPY file2.txt /subdir
diff --git a/tests/bud/copy-workdir/Dockerfile.2 b/tests/bud/copy-workdir/Dockerfile.2
new file mode 100644
index 0000000..e156e78
--- /dev/null
+++ b/tests/bud/copy-workdir/Dockerfile.2
@@ -0,0 +1,4 @@
+FROM alpine
+WORKDIR /subdir
+COPY file1.txt /subdir
+RUN ls
diff --git a/tests/bud/copy-workdir/file1.txt b/tests/bud/copy-workdir/file1.txt
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/tests/bud/copy-workdir/file1.txt
@@ -0,0 +1 @@
+file1
diff --git a/tests/bud/copy-workdir/file2.txt b/tests/bud/copy-workdir/file2.txt
new file mode 100644
index 0000000..6c493ff
--- /dev/null
+++ b/tests/bud/copy-workdir/file2.txt
@@ -0,0 +1 @@
+file2
diff --git a/tests/bud/dest-final-slash/Dockerfile b/tests/bud/dest-final-slash/Dockerfile
new file mode 100644
index 0000000..a7ec735
--- /dev/null
+++ b/tests/bud/dest-final-slash/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox AS base
+FROM scratch
+COPY --from=base /bin/ls /test/
+COPY --from=base /bin/sh /bin/
+RUN /test/ls -lR /test/ls
diff --git a/tests/bud/dest-symlink-dangling/Dockerfile b/tests/bud/dest-symlink-dangling/Dockerfile
new file mode 100644
index 0000000..612e6a7
--- /dev/null
+++ b/tests/bud/dest-symlink-dangling/Dockerfile
@@ -0,0 +1,6 @@
+FROM ubuntu
+
+RUN mkdir /symlink
+RUN ln -s /symlink /src && rm -rf /symlink
+COPY Dockerfile /src/
+RUN test -s /symlink/Dockerfile
diff --git a/tests/bud/dest-symlink/Dockerfile b/tests/bud/dest-symlink/Dockerfile
new file mode 100644
index 0000000..d43e930
--- /dev/null
+++ b/tests/bud/dest-symlink/Dockerfile
@@ -0,0 +1,9 @@
+FROM alpine
+
+ENV HBASE_HOME="/usr/local/hbase"
+ENV HBASE_CONF_DIR="/etc/hbase"
+
+RUN mkdir $HBASE_HOME
+RUN ln -s $HBASE_HOME $HBASE_CONF_DIR
+
+COPY Dockerfile $HBASE_CONF_DIR
diff --git a/tests/bud/device/Dockerfile b/tests/bud/device/Dockerfile
new file mode 100644
index 0000000..02df113
--- /dev/null
+++ b/tests/bud/device/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN ls /dev/fuse
diff --git a/tests/bud/dns/Dockerfile b/tests/bud/dns/Dockerfile
new file mode 100644
index 0000000..1be280c
--- /dev/null
+++ b/tests/bud/dns/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine:latest
+RUN cat /etc/resolv.conf \ No newline at end of file
diff --git a/tests/bud/dockerfile/Dockerfile b/tests/bud/dockerfile/Dockerfile
new file mode 100644
index 0000000..67fd379
--- /dev/null
+++ b/tests/bud/dockerfile/Dockerfile
@@ -0,0 +1 @@
+FROM alpine
diff --git a/tests/bud/dockerignore/.dockerignore b/tests/bud/dockerignore/.dockerignore
new file mode 100644
index 0000000..4cec094
--- /dev/null
+++ b/tests/bud/dockerignore/.dockerignore
@@ -0,0 +1,6 @@
+# comment
+*
+test*
+!test2*
+subdir
+!*/sub1* \ No newline at end of file
diff --git a/tests/bud/dockerignore/Dockerfile b/tests/bud/dockerignore/Dockerfile
new file mode 100644
index 0000000..4d930ef
--- /dev/null
+++ b/tests/bud/dockerignore/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+
+COPY ./ ./
+COPY subdir ./ \ No newline at end of file
diff --git a/tests/bud/dockerignore/Dockerfile.succeed b/tests/bud/dockerignore/Dockerfile.succeed
new file mode 100644
index 0000000..a164936
--- /dev/null
+++ b/tests/bud/dockerignore/Dockerfile.succeed
@@ -0,0 +1,3 @@
+FROM alpine
+
+COPY ./ ./
diff --git a/tests/bud/dockerignore/subdir/sub1.txt b/tests/bud/dockerignore/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore/subdir/sub1.txt
diff --git a/tests/bud/dockerignore/subdir/sub2.txt b/tests/bud/dockerignore/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore/subdir/sub2.txt
diff --git a/tests/bud/dockerignore/test1.txt b/tests/bud/dockerignore/test1.txt
new file mode 100644
index 0000000..745eda7
--- /dev/null
+++ b/tests/bud/dockerignore/test1.txt
@@ -0,0 +1 @@
+test1 failed \ No newline at end of file
diff --git a/tests/bud/dockerignore/test2.txt b/tests/bud/dockerignore/test2.txt
new file mode 100644
index 0000000..4e4d75d
--- /dev/null
+++ b/tests/bud/dockerignore/test2.txt
@@ -0,0 +1 @@
+test2 failed \ No newline at end of file
diff --git a/tests/bud/dockerignore2/.dockerignore b/tests/bud/dockerignore2/.dockerignore
new file mode 100644
index 0000000..7cf9e4b
--- /dev/null
+++ b/tests/bud/dockerignore2/.dockerignore
@@ -0,0 +1 @@
+unmatched
diff --git a/tests/bud/dockerignore2/Dockerfile b/tests/bud/dockerignore2/Dockerfile
new file mode 100644
index 0000000..04fdc27
--- /dev/null
+++ b/tests/bud/dockerignore2/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+COPY . .
diff --git a/tests/bud/dockerignore2/subdir/sub1.txt b/tests/bud/dockerignore2/subdir/sub1.txt
new file mode 100644
index 0000000..6b2fffa
--- /dev/null
+++ b/tests/bud/dockerignore2/subdir/sub1.txt
@@ -0,0 +1 @@
+sub1 \ No newline at end of file
diff --git a/tests/bud/dockerignore2/subdir/subsubdir/subsub1.txt b/tests/bud/dockerignore2/subdir/subsubdir/subsub1.txt
new file mode 100644
index 0000000..652f1b6
--- /dev/null
+++ b/tests/bud/dockerignore2/subdir/subsubdir/subsub1.txt
@@ -0,0 +1 @@
+subsub1
diff --git a/tests/bud/dockerignore3/.dockerignore b/tests/bud/dockerignore3/.dockerignore
new file mode 100644
index 0000000..5bfce4d
--- /dev/null
+++ b/tests/bud/dockerignore3/.dockerignore
@@ -0,0 +1,10 @@
+# comment
+*
+!test*
+!src
+**/*.in
+src/etc
+*.md
+!README*.md
+README-secret.md
+test1.txt
diff --git a/tests/bud/dockerignore3/BUILD.md b/tests/bud/dockerignore3/BUILD.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/BUILD.md
diff --git a/tests/bud/dockerignore3/COPYRIGHT b/tests/bud/dockerignore3/COPYRIGHT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/COPYRIGHT
diff --git a/tests/bud/dockerignore3/Dockerfile b/tests/bud/dockerignore3/Dockerfile
new file mode 100644
index 0000000..48a8a79
--- /dev/null
+++ b/tests/bud/dockerignore3/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+COPY . /upload/
+COPY src /upload/src2/
+COPY test1.txt /upload/test1.txt
+RUN echo "CUT HERE"; /bin/find /upload | LANG=en_US.UTF-8 sort; echo "CUT HERE"
diff --git a/tests/bud/dockerignore3/LICENSE b/tests/bud/dockerignore3/LICENSE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/LICENSE
diff --git a/tests/bud/dockerignore3/README-secret.md b/tests/bud/dockerignore3/README-secret.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/README-secret.md
diff --git a/tests/bud/dockerignore3/README.md b/tests/bud/dockerignore3/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/README.md
diff --git a/tests/bud/dockerignore3/manifest b/tests/bud/dockerignore3/manifest
new file mode 100644
index 0000000..0ada42d
--- /dev/null
+++ b/tests/bud/dockerignore3/manifest
@@ -0,0 +1,16 @@
+/upload
+/upload/README.md
+/upload/src
+/upload/src/Makefile
+/upload/src/cmd
+/upload/src/cmd/Makefile
+/upload/src/lib
+/upload/src/lib/Makefile
+/upload/src2
+/upload/src2/Makefile
+/upload/src2/cmd
+/upload/src2/cmd/Makefile
+/upload/src2/lib
+/upload/src2/lib/Makefile
+/upload/test2.txt
+/upload/test3.txt
diff --git a/tests/bud/dockerignore3/src/Makefile b/tests/bud/dockerignore3/src/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/Makefile
diff --git a/tests/bud/dockerignore3/src/cmd/Makefile b/tests/bud/dockerignore3/src/cmd/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/cmd/Makefile
diff --git a/tests/bud/dockerignore3/src/cmd/main.in b/tests/bud/dockerignore3/src/cmd/main.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/cmd/main.in
diff --git a/tests/bud/dockerignore3/src/etc/foo.conf b/tests/bud/dockerignore3/src/etc/foo.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/etc/foo.conf
diff --git a/tests/bud/dockerignore3/src/etc/foo.conf.d/dropin.conf b/tests/bud/dockerignore3/src/etc/foo.conf.d/dropin.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/etc/foo.conf.d/dropin.conf
diff --git a/tests/bud/dockerignore3/src/lib/Makefile b/tests/bud/dockerignore3/src/lib/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/lib/Makefile
diff --git a/tests/bud/dockerignore3/src/lib/framework.in b/tests/bud/dockerignore3/src/lib/framework.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/src/lib/framework.in
diff --git a/tests/bud/dockerignore3/test1.txt b/tests/bud/dockerignore3/test1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/test1.txt
diff --git a/tests/bud/dockerignore3/test2.txt b/tests/bud/dockerignore3/test2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/test2.txt
diff --git a/tests/bud/dockerignore3/test3.txt b/tests/bud/dockerignore3/test3.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore3/test3.txt
diff --git a/tests/bud/dockerignore4/BUILD.md b/tests/bud/dockerignore4/BUILD.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/BUILD.md
diff --git a/tests/bud/dockerignore4/COPYRIGHT b/tests/bud/dockerignore4/COPYRIGHT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/COPYRIGHT
diff --git a/tests/bud/dockerignore4/Dockerfile.test b/tests/bud/dockerignore4/Dockerfile.test
new file mode 100644
index 0000000..48a8a79
--- /dev/null
+++ b/tests/bud/dockerignore4/Dockerfile.test
@@ -0,0 +1,5 @@
+FROM busybox
+COPY . /upload/
+COPY src /upload/src2/
+COPY test1.txt /upload/test1.txt
+RUN echo "CUT HERE"; /bin/find /upload | LANG=en_US.UTF-8 sort; echo "CUT HERE"
diff --git a/tests/bud/dockerignore4/Dockerfile.test.dockerignore b/tests/bud/dockerignore4/Dockerfile.test.dockerignore
new file mode 100644
index 0000000..5bfce4d
--- /dev/null
+++ b/tests/bud/dockerignore4/Dockerfile.test.dockerignore
@@ -0,0 +1,10 @@
+# comment
+*
+!test*
+!src
+**/*.in
+src/etc
+*.md
+!README*.md
+README-secret.md
+test1.txt
diff --git a/tests/bud/dockerignore4/LICENSE b/tests/bud/dockerignore4/LICENSE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/LICENSE
diff --git a/tests/bud/dockerignore4/README-secret.md b/tests/bud/dockerignore4/README-secret.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/README-secret.md
diff --git a/tests/bud/dockerignore4/README.md b/tests/bud/dockerignore4/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/README.md
diff --git a/tests/bud/dockerignore4/manifest b/tests/bud/dockerignore4/manifest
new file mode 100644
index 0000000..0ada42d
--- /dev/null
+++ b/tests/bud/dockerignore4/manifest
@@ -0,0 +1,16 @@
+/upload
+/upload/README.md
+/upload/src
+/upload/src/Makefile
+/upload/src/cmd
+/upload/src/cmd/Makefile
+/upload/src/lib
+/upload/src/lib/Makefile
+/upload/src2
+/upload/src2/Makefile
+/upload/src2/cmd
+/upload/src2/cmd/Makefile
+/upload/src2/lib
+/upload/src2/lib/Makefile
+/upload/test2.txt
+/upload/test3.txt
diff --git a/tests/bud/dockerignore4/src/Makefile b/tests/bud/dockerignore4/src/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/Makefile
diff --git a/tests/bud/dockerignore4/src/cmd/Makefile b/tests/bud/dockerignore4/src/cmd/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/cmd/Makefile
diff --git a/tests/bud/dockerignore4/src/cmd/main.in b/tests/bud/dockerignore4/src/cmd/main.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/cmd/main.in
diff --git a/tests/bud/dockerignore4/src/etc/foo.conf b/tests/bud/dockerignore4/src/etc/foo.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/etc/foo.conf
diff --git a/tests/bud/dockerignore4/src/etc/foo.conf.d/dropin.conf b/tests/bud/dockerignore4/src/etc/foo.conf.d/dropin.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/etc/foo.conf.d/dropin.conf
diff --git a/tests/bud/dockerignore4/src/lib/Makefile b/tests/bud/dockerignore4/src/lib/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/lib/Makefile
diff --git a/tests/bud/dockerignore4/src/lib/framework.in b/tests/bud/dockerignore4/src/lib/framework.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/src/lib/framework.in
diff --git a/tests/bud/dockerignore4/test1.txt b/tests/bud/dockerignore4/test1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/test1.txt
diff --git a/tests/bud/dockerignore4/test2.txt b/tests/bud/dockerignore4/test2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/test2.txt
diff --git a/tests/bud/dockerignore4/test3.txt b/tests/bud/dockerignore4/test3.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore4/test3.txt
diff --git a/tests/bud/dockerignore6/Dockerfile b/tests/bud/dockerignore6/Dockerfile
new file mode 100644
index 0000000..4d930ef
--- /dev/null
+++ b/tests/bud/dockerignore6/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+
+COPY ./ ./
+COPY subdir ./ \ No newline at end of file
diff --git a/tests/bud/dockerignore6/Dockerfile.dockerignore b/tests/bud/dockerignore6/Dockerfile.dockerignore
new file mode 100644
index 0000000..4cec094
--- /dev/null
+++ b/tests/bud/dockerignore6/Dockerfile.dockerignore
@@ -0,0 +1,6 @@
+# comment
+*
+test*
+!test2*
+subdir
+!*/sub1* \ No newline at end of file
diff --git a/tests/bud/dockerignore6/Dockerfile.succeed b/tests/bud/dockerignore6/Dockerfile.succeed
new file mode 100644
index 0000000..a164936
--- /dev/null
+++ b/tests/bud/dockerignore6/Dockerfile.succeed
@@ -0,0 +1,3 @@
+FROM alpine
+
+COPY ./ ./
diff --git a/tests/bud/dockerignore6/Dockerfile.succeed.dockerignore b/tests/bud/dockerignore6/Dockerfile.succeed.dockerignore
new file mode 100644
index 0000000..4cec094
--- /dev/null
+++ b/tests/bud/dockerignore6/Dockerfile.succeed.dockerignore
@@ -0,0 +1,6 @@
+# comment
+*
+test*
+!test2*
+subdir
+!*/sub1* \ No newline at end of file
diff --git a/tests/bud/dockerignore6/subdir/sub1.txt b/tests/bud/dockerignore6/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore6/subdir/sub1.txt
diff --git a/tests/bud/dockerignore6/subdir/sub2.txt b/tests/bud/dockerignore6/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/bud/dockerignore6/subdir/sub2.txt
diff --git a/tests/bud/dockerignore6/test1.txt b/tests/bud/dockerignore6/test1.txt
new file mode 100644
index 0000000..745eda7
--- /dev/null
+++ b/tests/bud/dockerignore6/test1.txt
@@ -0,0 +1 @@
+test1 failed \ No newline at end of file
diff --git a/tests/bud/dockerignore6/test2.txt b/tests/bud/dockerignore6/test2.txt
new file mode 100644
index 0000000..4e4d75d
--- /dev/null
+++ b/tests/bud/dockerignore6/test2.txt
@@ -0,0 +1 @@
+test2 failed \ No newline at end of file
diff --git a/tests/bud/dupe-arg-env-name/Containerfile b/tests/bud/dupe-arg-env-name/Containerfile
new file mode 100644
index 0000000..62bbf12
--- /dev/null
+++ b/tests/bud/dupe-arg-env-name/Containerfile
@@ -0,0 +1,7 @@
+FROM centos:8
+ARG FOO=bar
+ARG WEBROOT=https://example.org/
+
+ENV WEBROOT="$WEBROOT$FOO"
+
+RUN echo "${WEBROOT}"
diff --git a/tests/bud/env/Dockerfile.check-env b/tests/bud/env/Dockerfile.check-env
new file mode 100644
index 0000000..546acd9
--- /dev/null
+++ b/tests/bud/env/Dockerfile.check-env
@@ -0,0 +1,2 @@
+FROM alpine
+ENV foo=bar
diff --git a/tests/bud/env/Dockerfile.env-from-image b/tests/bud/env/Dockerfile.env-from-image
new file mode 100644
index 0000000..928cdf3
--- /dev/null
+++ b/tests/bud/env/Dockerfile.env-from-image
@@ -0,0 +1,2 @@
+FROM env-from-image
+RUN echo "@${envcheck}@"
diff --git a/tests/bud/env/Dockerfile.env-precedence b/tests/bud/env/Dockerfile.env-precedence
new file mode 100644
index 0000000..d8824b8
--- /dev/null
+++ b/tests/bud/env/Dockerfile.env-precedence
@@ -0,0 +1,17 @@
+FROM alpine
+ENV a=b
+ENV c=d
+# E and G are passed in on the command-line, and we haven't overridden them yet, so the command will get the CLI values.
+RUN echo a=$a c=$c E=$E G=$G
+ENV E=E G=G
+# We just set E and G, and that will override values passed at the command line thanks to imagebuilder's handling of ENV instructions.
+RUN echo a=$a c=$c E=$E G=$G
+
+FROM 0
+ENV w=x
+ENV y=z
+# I and K are passed in on the command-line, and we haven't overridden them yet, so the command will get the CLI values.
+RUN echo w=$w y=$y I=$I K=$K
+ENV I=I K=K
+# We just set I and K, and that will override values passed at the command line thanks to imagebuilder's handling of ENV instructions.
+RUN echo w=$w y=$y I=$I K=$K
diff --git a/tests/bud/env/Dockerfile.env-same-file b/tests/bud/env/Dockerfile.env-same-file
new file mode 100644
index 0000000..62c6cf6
--- /dev/null
+++ b/tests/bud/env/Dockerfile.env-same-file
@@ -0,0 +1,3 @@
+FROM alpine
+ENV envcheck "unique.test.string"
+RUN echo ":${envcheck}:"
diff --git a/tests/bud/env/Dockerfile.special-chars b/tests/bud/env/Dockerfile.special-chars
new file mode 100644
index 0000000..01a2d13
--- /dev/null
+++ b/tests/bud/env/Dockerfile.special-chars
@@ -0,0 +1,3 @@
+FROM docker.io/ubuntu
+ENV LIB="$(PREFIX)/lib"
+
diff --git a/tests/bud/exit42/Containerfile b/tests/bud/exit42/Containerfile
new file mode 100644
index 0000000..1647012
--- /dev/null
+++ b/tests/bud/exit42/Containerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN sh -c "exit 42"
diff --git a/tests/bud/from-as/Dockerfile b/tests/bud/from-as/Dockerfile
new file mode 100644
index 0000000..5b0e36e
--- /dev/null
+++ b/tests/bud/from-as/Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine AS base
+RUN touch /1
+ENV LOCAL=/1
+RUN find $LOCAL
+
+FROM base
+RUN find $LOCAL
diff --git a/tests/bud/from-as/Dockerfile.skip b/tests/bud/from-as/Dockerfile.skip
new file mode 100644
index 0000000..e292d90
--- /dev/null
+++ b/tests/bud/from-as/Dockerfile.skip
@@ -0,0 +1,19 @@
+FROM alpine AS base
+RUN touch /1
+ENV LOCAL=/1
+RUN find $LOCAL
+RUN touch hello
+
+FROM base
+RUN find $LOCAL
+RUN touch /2
+ENV LOCAL2=/2
+RUN find $LOCAL2
+# so we don't end up skipping stage: 0
+COPY --from=0 hello .
+
+FROM base
+RUN find $LOCAL
+RUN ls /
+# so we don't end up skipping stage: 1
+COPY --from=1 hello .
diff --git a/tests/bud/from-invalid-registry/Containerfile b/tests/bud/from-invalid-registry/Containerfile
new file mode 100644
index 0000000..00dc308
--- /dev/null
+++ b/tests/bud/from-invalid-registry/Containerfile
@@ -0,0 +1,3 @@
+FROM alpine as build
+# Invalid registry and image
+FROM myrepository.example/image:tag
diff --git a/tests/bud/from-multiple-files/Dockerfile1.alpine b/tests/bud/from-multiple-files/Dockerfile1.alpine
new file mode 100644
index 0000000..c6e3fc4
--- /dev/null
+++ b/tests/bud/from-multiple-files/Dockerfile1.alpine
@@ -0,0 +1,2 @@
+FROM alpine
+COPY Dockerfile1.alpine /Dockerfile1
diff --git a/tests/bud/from-multiple-files/Dockerfile1.scratch b/tests/bud/from-multiple-files/Dockerfile1.scratch
new file mode 100644
index 0000000..4f9ab8a
--- /dev/null
+++ b/tests/bud/from-multiple-files/Dockerfile1.scratch
@@ -0,0 +1,2 @@
+FROM scratch
+COPY Dockerfile1.scratch /Dockerfile1
diff --git a/tests/bud/from-multiple-files/Dockerfile2.glob b/tests/bud/from-multiple-files/Dockerfile2.glob
new file mode 100644
index 0000000..1d843ba
--- /dev/null
+++ b/tests/bud/from-multiple-files/Dockerfile2.glob
@@ -0,0 +1,2 @@
+FROM alpine
+COPY Dockerfile* /
diff --git a/tests/bud/from-multiple-files/Dockerfile2.nofrom b/tests/bud/from-multiple-files/Dockerfile2.nofrom
new file mode 100644
index 0000000..0473c91
--- /dev/null
+++ b/tests/bud/from-multiple-files/Dockerfile2.nofrom
@@ -0,0 +1 @@
+COPY Dockerfile2.nofrom /
diff --git a/tests/bud/from-multiple-files/Dockerfile2.withfrom b/tests/bud/from-multiple-files/Dockerfile2.withfrom
new file mode 100644
index 0000000..fa3b969
--- /dev/null
+++ b/tests/bud/from-multiple-files/Dockerfile2.withfrom
@@ -0,0 +1,2 @@
+FROM alpine
+COPY Dockerfile2.withfrom /
diff --git a/tests/bud/from-scratch/Containerfile b/tests/bud/from-scratch/Containerfile
new file mode 100644
index 0000000..c35f1b5
--- /dev/null
+++ b/tests/bud/from-scratch/Containerfile
@@ -0,0 +1 @@
+FROM scratch
diff --git a/tests/bud/from-scratch/Containerfile2 b/tests/bud/from-scratch/Containerfile2
new file mode 100644
index 0000000..0b69d69
--- /dev/null
+++ b/tests/bud/from-scratch/Containerfile2
@@ -0,0 +1,4 @@
+
+FROM scratch
+USER 1001
+
diff --git a/tests/bud/from-scratch/Dockerfile b/tests/bud/from-scratch/Dockerfile
new file mode 100644
index 0000000..c35f1b5
--- /dev/null
+++ b/tests/bud/from-scratch/Dockerfile
@@ -0,0 +1 @@
+FROM scratch
diff --git a/tests/bud/from-with-arg/Containerfile b/tests/bud/from-with-arg/Containerfile
new file mode 100644
index 0000000..c68e9af
--- /dev/null
+++ b/tests/bud/from-with-arg/Containerfile
@@ -0,0 +1,7 @@
+ARG base
+FROM ${base}
+
+ARG toolchainname
+ARG destinationpath
+
+COPY --from=${toolchainname} / ${destinationpath}
diff --git a/tests/bud/group/Containerfile b/tests/bud/group/Containerfile
new file mode 100644
index 0000000..033c54d
--- /dev/null
+++ b/tests/bud/group/Containerfile
@@ -0,0 +1,11 @@
+FROM alpine
+
+RUN adduser -D -g 'Susan' susan \
+ && addgroup cool_kids \
+ && addgroup susan cool_kids \
+ && addgroup good_kids \
+ && addgroup susan good_kids
+
+USER susan
+
+RUN groups | grep cool_kids
diff --git a/tests/bud/hardlink/Dockerfile b/tests/bud/hardlink/Dockerfile
new file mode 100644
index 0000000..a64709c
--- /dev/null
+++ b/tests/bud/hardlink/Dockerfile
@@ -0,0 +1,3 @@
+FROM scratch
+COPY . .
+COPY . .
diff --git a/tests/bud/healthcheck/Dockerfile b/tests/bud/healthcheck/Dockerfile
new file mode 100644
index 0000000..3fe3644
--- /dev/null
+++ b/tests/bud/healthcheck/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+HEALTHCHECK --start-period=10m --interval=5m --timeout=3s --retries=4 \
+ CMD curl -f http://localhost/ || exit 1
diff --git a/tests/bud/heredoc/Containerfile b/tests/bud/heredoc/Containerfile
new file mode 100644
index 0000000..878ba09
--- /dev/null
+++ b/tests/bud/heredoc/Containerfile
@@ -0,0 +1,59 @@
+FROM fedora
+
+RUN <<EOF
+echo "print first line from heredoc"
+echo "print second line from heredoc"
+EOF
+
+RUN <<EOF
+echo "Heredoc writing first file" >> /file1
+echo "some text of first file" >> /file1
+EOF
+
+RUN cat file1
+
+RUN python3 <<EOF
+with open("/file2", "w") as f:
+ print("file2 from python", file=f)
+EOF
+
+RUN cat file2
+
+ADD <<EOF /index.html
+ (your index page goes here)
+EOF
+
+RUN cat index.html
+
+COPY <<robots.txt <<humans.txt /test/
+(robots content)
+robots.txt
+(humans content)
+humans.txt
+
+RUN cat /proc/self/fd/5 /proc/self/fd/6 5<<FILE1 6<<FILE2 > test6.txt
+this is the output of test6 part1
+FILE1
+this is the output of test6 part2
+FILE2
+
+RUN 5<<file cat /proc/self/fd/5 /proc/self/fd/6 6<<FILE | cat /dev/stdin /proc/self/fd/6 6<<File > test7.txt
+this is the output of test7 part1
+file
+this is the output of test7 part2
+FILE
+this is the output of test7 part3
+File
+
+RUN <<FILE1 cat > test8.1 && <<FILE2 cat > test8.2
+this is the output of test8 part1
+FILE1
+this is the output of test8 part2
+FILE2
+
+RUN cat /test/robots.txt
+RUN cat /test/humans.txt
+RUN cat test6.txt
+RUN cat test7.txt
+RUN cat test8.1
+RUN cat test8.2
diff --git a/tests/bud/heredoc/Containerfile.bash_file b/tests/bud/heredoc/Containerfile.bash_file
new file mode 100644
index 0000000..89032a1
--- /dev/null
+++ b/tests/bud/heredoc/Containerfile.bash_file
@@ -0,0 +1,15 @@
+FROM busybox
+RUN <<EOF
+#!/bin/sh
+echo "
+ this is the output of test9" > test9.txt
+EOF
+
+RUN <<-EOF
+#!/bin/sh
+echo "
+ this is the output of test10" > test10.txt
+EOF
+
+RUN cat test9.txt
+RUN cat test10.txt
diff --git a/tests/bud/heredoc/Containerfile.verify_mount_leak b/tests/bud/heredoc/Containerfile.verify_mount_leak
new file mode 100644
index 0000000..753fd89
--- /dev/null
+++ b/tests/bud/heredoc/Containerfile.verify_mount_leak
@@ -0,0 +1,17 @@
+FROM alpine
+
+RUN <<EOF
+#!/bin/sh
+echo "
+ this is the output of test" > test.txt
+# Mount of this file must exists till this run step
+# so this `ls` command should not fail
+ls -a /dev/pipes/
+EOF
+
+RUN cat test.txt
+
+# This ls command must fail, since mount is removed in this step
+RUN ls -a /dev/pipes
+
+
diff --git a/tests/bud/http-context-containerfile/context.tar b/tests/bud/http-context-containerfile/context.tar
new file mode 100644
index 0000000..743acbd
--- /dev/null
+++ b/tests/bud/http-context-containerfile/context.tar
Binary files differ
diff --git a/tests/bud/http-context-subdir/context.tar b/tests/bud/http-context-subdir/context.tar
new file mode 100644
index 0000000..533ae52
--- /dev/null
+++ b/tests/bud/http-context-subdir/context.tar
Binary files differ
diff --git a/tests/bud/http-context/context.tar b/tests/bud/http-context/context.tar
new file mode 100644
index 0000000..2e5f3a5
--- /dev/null
+++ b/tests/bud/http-context/context.tar
Binary files differ
diff --git a/tests/bud/inline-network/Dockerfile1 b/tests/bud/inline-network/Dockerfile1
new file mode 100644
index 0000000..bcae664
--- /dev/null
+++ b/tests/bud/inline-network/Dockerfile1
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --network=host readlink /proc/self/ns/net
diff --git a/tests/bud/inline-network/Dockerfile2 b/tests/bud/inline-network/Dockerfile2
new file mode 100644
index 0000000..332134c
--- /dev/null
+++ b/tests/bud/inline-network/Dockerfile2
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --network=none wget google.com
diff --git a/tests/bud/inline-network/Dockerfile3 b/tests/bud/inline-network/Dockerfile3
new file mode 100644
index 0000000..a3c33ec
--- /dev/null
+++ b/tests/bud/inline-network/Dockerfile3
@@ -0,0 +1,3 @@
+FROM alpine
+RUN --network=fake wget google.com
+
diff --git a/tests/bud/inline-network/Dockerfile4 b/tests/bud/inline-network/Dockerfile4
new file mode 100644
index 0000000..bb75794
--- /dev/null
+++ b/tests/bud/inline-network/Dockerfile4
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --network=default readlink /proc/self/ns/net
diff --git a/tests/bud/layers-squash/Dockerfile b/tests/bud/layers-squash/Dockerfile
new file mode 100644
index 0000000..faf71cd
--- /dev/null
+++ b/tests/bud/layers-squash/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+USER root
+COPY artifact /artifact
diff --git a/tests/bud/layers-squash/Dockerfile.hardlinks b/tests/bud/layers-squash/Dockerfile.hardlinks
new file mode 100644
index 0000000..fac68e8
--- /dev/null
+++ b/tests/bud/layers-squash/Dockerfile.hardlinks
@@ -0,0 +1,3 @@
+FROM busybox
+COPY artifact /subdir/artifact
+RUN ln -f /subdir/artifact /subdir/artifact-hardlink
diff --git a/tests/bud/layers-squash/Dockerfile.multi-stage b/tests/bud/layers-squash/Dockerfile.multi-stage
new file mode 100644
index 0000000..630bbcf
--- /dev/null
+++ b/tests/bud/layers-squash/Dockerfile.multi-stage
@@ -0,0 +1,9 @@
+# Following stage must be picked from cache
+FROM busybox as one
+RUN echo hello
+RUN echo hello > world
+
+# Following stage must be picked from cache except last instruction
+FROM busybox as two
+RUN echo hello1
+RUN echo helloworld
diff --git a/tests/bud/layers-squash/artifact b/tests/bud/layers-squash/artifact
new file mode 100644
index 0000000..ae5b3c5
--- /dev/null
+++ b/tests/bud/layers-squash/artifact
@@ -0,0 +1 @@
+Aaa
diff --git a/tests/bud/leading-args/Dockerfile b/tests/bud/leading-args/Dockerfile
new file mode 100644
index 0000000..5dc24be
--- /dev/null
+++ b/tests/bud/leading-args/Dockerfile
@@ -0,0 +1,5 @@
+ARG VERSION=latest
+ARG FOO=bar
+FROM busybox:$VERSION
+ENV FOO $FOO
+RUN echo $FOO $VERSION
diff --git a/tests/bud/long-sleep/Dockerfile b/tests/bud/long-sleep/Dockerfile
new file mode 100644
index 0000000..d620feb
--- /dev/null
+++ b/tests/bud/long-sleep/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+# this can be a long long time, since the test should kill it long before this has elapsed
+RUN sleep 300
+RUN echo not fully killed
diff --git a/tests/bud/maintainer/Dockerfile b/tests/bud/maintainer/Dockerfile
new file mode 100644
index 0000000..c217d18
--- /dev/null
+++ b/tests/bud/maintainer/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+MAINTAINER kilroy
diff --git a/tests/bud/mount/Dockerfile b/tests/bud/mount/Dockerfile
new file mode 100644
index 0000000..6248753
--- /dev/null
+++ b/tests/bud/mount/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN mount
diff --git a/tests/bud/multi-stage-builds-small-as/Dockerfile.index b/tests/bud/multi-stage-builds-small-as/Dockerfile.index
new file mode 100644
index 0000000..f3ee8c7
--- /dev/null
+++ b/tests/bud/multi-stage-builds-small-as/Dockerfile.index
@@ -0,0 +1,5 @@
+FROM scratch
+COPY Dockerfile.index /
+
+FROM alpine
+COPY --from=0 /Dockerfile.index /Dockerfile.index
diff --git a/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed b/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed
new file mode 100644
index 0000000..9323c23
--- /dev/null
+++ b/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed
@@ -0,0 +1,13 @@
+FROM scratch as myname
+COPY Dockerfile.name /
+
+FROM scratch as myname2
+COPY Dockerfile.index /
+
+FROM scratch
+COPY Dockerfile.mixed /
+
+FROM scratch
+COPY --from=myname /Dockerfile.name /Dockerfile.name
+COPY --from=1 /Dockerfile.index /Dockerfile.index
+COPY --from=2 /Dockerfile.mixed /Dockerfile.mixed
diff --git a/tests/bud/multi-stage-builds-small-as/Dockerfile.name b/tests/bud/multi-stage-builds-small-as/Dockerfile.name
new file mode 100644
index 0000000..8908f5c
--- /dev/null
+++ b/tests/bud/multi-stage-builds-small-as/Dockerfile.name
@@ -0,0 +1,5 @@
+FROM alpine as myname
+COPY Dockerfile.name /
+
+FROM scratch
+COPY --from=myname /Dockerfile.name /Dockerfile.name
diff --git a/tests/bud/multi-stage-builds/Dockerfile.arg b/tests/bud/multi-stage-builds/Dockerfile.arg
new file mode 100644
index 0000000..2a4992b
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.arg
@@ -0,0 +1,6 @@
+FROM alpine
+ARG SECRET
+RUN echo $SECRET
+
+FROM alpine
+RUN echo "$SECRET" > test_file
diff --git a/tests/bud/multi-stage-builds/Dockerfile.arg_in_copy b/tests/bud/multi-stage-builds/Dockerfile.arg_in_copy
new file mode 100644
index 0000000..e46c430
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.arg_in_copy
@@ -0,0 +1,8 @@
+ARG my_env=a
+
+FROM alpine as stage_a
+RUN /bin/true
+
+FROM alpine
+ARG my_env
+COPY --from=stage_${my_env} /bin/true /bin/true_copy
diff --git a/tests/bud/multi-stage-builds/Dockerfile.arg_in_stage b/tests/bud/multi-stage-builds/Dockerfile.arg_in_stage
new file mode 100644
index 0000000..3924b16
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.arg_in_stage
@@ -0,0 +1,7 @@
+ARG my_env=a
+
+FROM alpine as stage_a
+RUN /bin/true
+
+FROM stage_${my_env} as stage_b
+RUN /bin/true
diff --git a/tests/bud/multi-stage-builds/Dockerfile.extended b/tests/bud/multi-stage-builds/Dockerfile.extended
new file mode 100644
index 0000000..0debcf8
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.extended
@@ -0,0 +1,13 @@
+FROM busybox:latest AS builder
+ENV "BUILD_LOGLEVEL"="5"
+RUN touch /tmp/preCommit
+ENTRYPOINT /bin/sleep 600
+ENV "OPENSHIFT_BUILD_NAME"="mydockertest-1" "OPENSHIFT_BUILD_NAMESPACE"="default"
+LABEL "io.openshift.build.name"="mydockertest-1" "io.openshift.build.namespace"="default"
+
+FROM builder
+ENV "BUILD_LOGLEVEL"="5"
+RUN touch /tmp/postCommit
+
+FROM builder
+ENV "BUILD_LOGLEVEL"="5"
diff --git a/tests/bud/multi-stage-builds/Dockerfile.index b/tests/bud/multi-stage-builds/Dockerfile.index
new file mode 100644
index 0000000..f3ee8c7
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.index
@@ -0,0 +1,5 @@
+FROM scratch
+COPY Dockerfile.index /
+
+FROM alpine
+COPY --from=0 /Dockerfile.index /Dockerfile.index
diff --git a/tests/bud/multi-stage-builds/Dockerfile.mixed b/tests/bud/multi-stage-builds/Dockerfile.mixed
new file mode 100644
index 0000000..b436a50
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.mixed
@@ -0,0 +1,13 @@
+FROM scratch AS myname
+COPY Dockerfile.name /
+
+FROM scratch AS myname2
+COPY Dockerfile.index /
+
+FROM scratch
+COPY Dockerfile.mixed /
+
+FROM scratch
+COPY --from=myname /Dockerfile.name /Dockerfile.name
+COPY --from=1 /Dockerfile.index /Dockerfile.index
+COPY --from=2 /Dockerfile.mixed /Dockerfile.mixed
diff --git a/tests/bud/multi-stage-builds/Dockerfile.name b/tests/bud/multi-stage-builds/Dockerfile.name
new file mode 100644
index 0000000..ab22ce9
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.name
@@ -0,0 +1,5 @@
+FROM alpine AS myname
+COPY Dockerfile.name /
+
+FROM scratch
+COPY --from=myname /Dockerfile.name /Dockerfile.name
diff --git a/tests/bud/multi-stage-builds/Dockerfile.rebase b/tests/bud/multi-stage-builds/Dockerfile.rebase
new file mode 100644
index 0000000..5addf37
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.rebase
@@ -0,0 +1,8 @@
+FROM alpine AS myname
+COPY Dockerfile.name /
+
+FROM myname
+RUN pwd
+
+FROM myname
+RUN id
diff --git a/tests/bud/multi-stage-builds/Dockerfile.reused b/tests/bud/multi-stage-builds/Dockerfile.reused
new file mode 100644
index 0000000..0aad24e
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.reused
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN pwd
+FROM base
+RUN pwd
+FROM base
diff --git a/tests/bud/multi-stage-builds/Dockerfile.reused2 b/tests/bud/multi-stage-builds/Dockerfile.reused2
new file mode 100644
index 0000000..2cac548
--- /dev/null
+++ b/tests/bud/multi-stage-builds/Dockerfile.reused2
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN pwd
+FROM base
+RUN pwd
+FROM base
+RUN pwd
diff --git a/tests/bud/multi-stage-only-base/Containerfile1 b/tests/bud/multi-stage-only-base/Containerfile1
new file mode 100644
index 0000000..1d61b0b
--- /dev/null
+++ b/tests/bud/multi-stage-only-base/Containerfile1
@@ -0,0 +1,2 @@
+FROM alpine
+RUN echo "parent-one" > parent-one
diff --git a/tests/bud/multi-stage-only-base/Containerfile2 b/tests/bud/multi-stage-only-base/Containerfile2
new file mode 100644
index 0000000..74a3966
--- /dev/null
+++ b/tests/bud/multi-stage-only-base/Containerfile2
@@ -0,0 +1,2 @@
+FROM alpine
+RUN echo "parent-two" > parent-two
diff --git a/tests/bud/multi-stage-only-base/Containerfile3 b/tests/bud/multi-stage-only-base/Containerfile3
new file mode 100644
index 0000000..38c84d0
--- /dev/null
+++ b/tests/bud/multi-stage-only-base/Containerfile3
@@ -0,0 +1,3 @@
+FROM localhost/parent-one as p1
+FROM localhost/parent-two
+COPY --from=p1 parent-one .
diff --git a/tests/bud/multiarch/Dockerfile b/tests/bud/multiarch/Dockerfile
new file mode 100644
index 0000000..9f0d863
--- /dev/null
+++ b/tests/bud/multiarch/Dockerfile
@@ -0,0 +1,10 @@
+FROM alpine AS base
+RUN cp /etc/apk/arch /root/arch-base
+
+FROM alpine
+# Make sure that non-default arch doesn't mess with copying from previous stages.
+COPY --from=base /root/arch-base /root/
+# Make sure that COPY --from=image uses the image for the preferred architecture.
+COPY --from=alpine /etc/apk/arch /root/
+RUN cmp /etc/apk/arch /root/arch
+RUN cmp /etc/apk/arch /root/arch-base
diff --git a/tests/bud/multiarch/Dockerfile.built-in-args b/tests/bud/multiarch/Dockerfile.built-in-args
new file mode 100644
index 0000000..3718a61
--- /dev/null
+++ b/tests/bud/multiarch/Dockerfile.built-in-args
@@ -0,0 +1,6 @@
+FROM --platform=$BUILDPLATFORM alpine
+ARG TARGETPLATFORM
+ARG TARGETOS
+ARG TARGETARCH
+ARG BUILDPLATFORM
+RUN echo "I'm compiling for $TARGETPLATFORM on $BUILDPLATFORM and tagging for $TARGETPLATFORM and OS $TARGETOS and ARCH $TARGETARCH"
diff --git a/tests/bud/multiarch/Dockerfile.fail b/tests/bud/multiarch/Dockerfile.fail
new file mode 100644
index 0000000..d1cfc39
--- /dev/null
+++ b/tests/bud/multiarch/Dockerfile.fail
@@ -0,0 +1,5 @@
+# This build should fail if we're building with at least one non-amd64 platform
+# either because we can't execute this test binary, or because it executed fine
+# but returned an error
+FROM alpine
+RUN test `arch` = x86_64
diff --git a/tests/bud/multiarch/Dockerfile.fail-multistage b/tests/bud/multiarch/Dockerfile.fail-multistage
new file mode 100644
index 0000000..6e07f83
--- /dev/null
+++ b/tests/bud/multiarch/Dockerfile.fail-multistage
@@ -0,0 +1,19 @@
+ARG SAFEIMAGE
+FROM $SAFEIMAGE
+RUN touch -r /etc/os-release /timestamped
+RUN sleep 0
+FROM $SAFEIMAGE
+COPY --from=0 /timestamped /timestamped
+RUN sleep 0
+FROM $SAFEIMAGE
+COPY --from=1 /timestamped /timestamped
+RUN sleep 0
+FROM $SAFEIMAGE
+COPY --from=2 /timestamped /timestamped
+RUN false
+FROM $SAFEIMAGE
+COPY --from=3 /timestamped /timestamped
+RUN sleep 0
+FROM $SAFEIMAGE
+COPY --from=4 /timestamped /timestamped
+RUN sleep 0
diff --git a/tests/bud/multiarch/Dockerfile.no-run b/tests/bud/multiarch/Dockerfile.no-run
new file mode 100644
index 0000000..ef89ae6
--- /dev/null
+++ b/tests/bud/multiarch/Dockerfile.no-run
@@ -0,0 +1,8 @@
+# A base image that is known to be a manifest list.
+FROM docker.io/library/alpine
+COPY Dockerfile.no-run /root/
+# A different base image that is known to be a manifest list, supporting a
+# different but partially-overlapping set of platforms.
+ARG SAFEIMAGE
+FROM $SAFEIMAGE
+COPY --from=0 /root/Dockerfile.no-run /root/
diff --git a/tests/bud/namespaces/Containerfile b/tests/bud/namespaces/Containerfile
new file mode 100644
index 0000000..24ffb8b
--- /dev/null
+++ b/tests/bud/namespaces/Containerfile
@@ -0,0 +1,12 @@
+FROM alpine
+RUN echo "ReadlinkResult" && readlink /proc/self/ns/user
+RUN echo "UidMapResult" && cat /proc/self/uid_map
+RUN echo "GidMapResult" && cat /proc/self/gid_map
+COPY --chown=1:1 somefile /
+RUN echo "StatSomefileResult" && stat -c '%u:%g' /somefile
+COPY somedir /somedir
+RUN echo "StatSomedirResult" && stat -c '%u:%g' /somedir
+RUN echo "StatSomeotherfileResult" && stat -c '%u:%g %a' /somedir/someotherfile
+USER guest
+WORKDIR /new-workdir
+RUN echo "StatNewWorkdir" && stat -c '%U:%G' $PWD
diff --git a/tests/bud/network/Containerfile b/tests/bud/network/Containerfile
new file mode 100644
index 0000000..76dfa07
--- /dev/null
+++ b/tests/bud/network/Containerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN ip addr
diff --git a/tests/bud/no-change/Dockerfile b/tests/bud/no-change/Dockerfile
new file mode 100644
index 0000000..67fd379
--- /dev/null
+++ b/tests/bud/no-change/Dockerfile
@@ -0,0 +1 @@
+FROM alpine
diff --git a/tests/bud/no-hostname/Containerfile b/tests/bud/no-hostname/Containerfile
new file mode 100644
index 0000000..3fe1bb6
--- /dev/null
+++ b/tests/bud/no-hostname/Containerfile
@@ -0,0 +1,3 @@
+from alpine
+run cat /etc/hostname
+
diff --git a/tests/bud/no-hostname/Containerfile.noetc b/tests/bud/no-hostname/Containerfile.noetc
new file mode 100644
index 0000000..5df7822
--- /dev/null
+++ b/tests/bud/no-hostname/Containerfile.noetc
@@ -0,0 +1,3 @@
+from alpine
+RUN mv /etc /usr/
+RUN ls -l /etc
diff --git a/tests/bud/non-directory-in-path/non-directory b/tests/bud/non-directory-in-path/non-directory
new file mode 100644
index 0000000..71fbd69
--- /dev/null
+++ b/tests/bud/non-directory-in-path/non-directory
@@ -0,0 +1 @@
+A dummy file that is not a directory.
diff --git a/tests/bud/onbuild/Dockerfile b/tests/bud/onbuild/Dockerfile
new file mode 100644
index 0000000..2b25c59
--- /dev/null
+++ b/tests/bud/onbuild/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+
+ONBUILD RUN touch /onbuild1
+ONBUILD RUN touch /onbuild2
diff --git a/tests/bud/onbuild/Dockerfile1 b/tests/bud/onbuild/Dockerfile1
new file mode 100644
index 0000000..e6c0e6b
--- /dev/null
+++ b/tests/bud/onbuild/Dockerfile1
@@ -0,0 +1,3 @@
+FROM onbuild
+
+ONBUILD RUN touch /onbuild3
diff --git a/tests/bud/onbuild/Dockerfile2 b/tests/bud/onbuild/Dockerfile2
new file mode 100644
index 0000000..86aed3a
--- /dev/null
+++ b/tests/bud/onbuild/Dockerfile2
@@ -0,0 +1,6 @@
+FROM alpine
+
+ONBUILD RUN touch /onbuild1
+ONBUILD RUN touch /onbuild2
+RUN touch /firstfile
+RUN touch /secondfile
diff --git a/tests/bud/platform-sets-args/Containerfile b/tests/bud/platform-sets-args/Containerfile
new file mode 100644
index 0000000..dce2ec0
--- /dev/null
+++ b/tests/bud/platform-sets-args/Containerfile
@@ -0,0 +1,6 @@
+FROM alpine
+ARG TARGETARCH
+ARG TARGETOS
+ARG TARGETPLATFORM
+ARG TARGETVARIANT
+ENV nothing="multiarch-safe statement that will result in a built image"
diff --git a/tests/bud/preprocess/Decomposed.in b/tests/bud/preprocess/Decomposed.in
new file mode 100644
index 0000000..a532176
--- /dev/null
+++ b/tests/bud/preprocess/Decomposed.in
@@ -0,0 +1,7 @@
+FROM alpine:latest
+
+#include "common"
+
+RUNHELLO
+
+#include "install-base"
diff --git a/tests/bud/preprocess/Error.in b/tests/bud/preprocess/Error.in
new file mode 100644
index 0000000..282ae5f
--- /dev/null
+++ b/tests/bud/preprocess/Error.in
@@ -0,0 +1,5 @@
+FROM alpine:latest
+
+#include "common"
+
+#error
diff --git a/tests/bud/preprocess/common b/tests/bud/preprocess/common
new file mode 100644
index 0000000..15c34f8
--- /dev/null
+++ b/tests/bud/preprocess/common
@@ -0,0 +1,3 @@
+#define RUNHELLO RUN echo "Hello world!"
+
+RUN touch /etc/hello-world.txt
diff --git a/tests/bud/preprocess/install-base b/tests/bud/preprocess/install-base
new file mode 100644
index 0000000..105a323
--- /dev/null
+++ b/tests/bud/preprocess/install-base
@@ -0,0 +1,3 @@
+RUN apk update
+
+RUN apk add git curl
diff --git a/tests/bud/preserve-volumes/Dockerfile b/tests/bud/preserve-volumes/Dockerfile
new file mode 100644
index 0000000..df95908
--- /dev/null
+++ b/tests/bud/preserve-volumes/Dockerfile
@@ -0,0 +1,24 @@
+FROM alpine
+RUN mkdir -p /vol/subvol/subsubvol
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subsubvol/subsubvolfile
+VOLUME /vol/subvol
+# At this point, the contents below /vol/subvol should be frozen.
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
+# In particular, /vol/subvol/subvolfile should be wiped out.
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/volfile
+# However, /vol/volfile should exist.
+VOLUME /vol
+# And this should be redundant.
+VOLUME /vol/subvol
+# And now we've frozen /vol.
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/anothervolfile
+# Which means that in the image we're about to commit, /vol/anothervolfile
+# shouldn't exist, either.
+
+# ADD files which should persist.
+ADD Dockerfile /vol/Dockerfile
+RUN stat /vol/Dockerfile
+ADD Dockerfile /vol/Dockerfile2
+RUN stat /vol/Dockerfile2
+# We should still be saving and restoring volume caches.
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
diff --git a/tests/bud/pull/Containerfile b/tests/bud/pull/Containerfile
new file mode 100644
index 0000000..24a79d0
--- /dev/null
+++ b/tests/bud/pull/Containerfile
@@ -0,0 +1 @@
+FROM busybox
diff --git a/tests/bud/recurse/Dockerfile b/tests/bud/recurse/Dockerfile
new file mode 100644
index 0000000..a64709c
--- /dev/null
+++ b/tests/bud/recurse/Dockerfile
@@ -0,0 +1,3 @@
+FROM scratch
+COPY . .
+COPY . .
diff --git a/tests/bud/run-mounts/Dockerfile.secret b/tests/bud/run-mounts/Dockerfile.secret
new file mode 100644
index 0000000..920663a
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-access b/tests/bud/run-mounts/Dockerfile.secret-access
new file mode 100644
index 0000000..a0e5f8c
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-access
@@ -0,0 +1,3 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret,target=mysecret cat /mysecret
+RUN cat /mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-mode b/tests/bud/run-mounts/Dockerfile.secret-mode
new file mode 100644
index 0000000..2fa3576
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-mode
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret stat -c "%a" /run/secrets/mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-not-required b/tests/bud/run-mounts/Dockerfile.secret-not-required
new file mode 100644
index 0000000..8c80d92
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-not-required
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret echo "hello"
diff --git a/tests/bud/run-mounts/Dockerfile.secret-options b/tests/bud/run-mounts/Dockerfile.secret-options
new file mode 100644
index 0000000..e2abcd9
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-options
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret,dst=/mysecret,uid=1000,gid=1001,mode=0444 stat -c "%a" /mysecret ; ls -n /mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-required b/tests/bud/run-mounts/Dockerfile.secret-required
new file mode 100644
index 0000000..10070e2
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-required
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret,required=true cat /run/secrets/mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-required-false b/tests/bud/run-mounts/Dockerfile.secret-required-false
new file mode 100644
index 0000000..7f1f060
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-required-false
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret,required=false cat /run/secrets/mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.secret-required-wo-value b/tests/bud/run-mounts/Dockerfile.secret-required-wo-value
new file mode 100644
index 0000000..7d04b70
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.secret-required-wo-value
@@ -0,0 +1,2 @@
+FROM alpine
+RUN --mount=type=secret,id=mysecret,required cat /run/secrets/mysecret
diff --git a/tests/bud/run-mounts/Dockerfile.ssh b/tests/bud/run-mounts/Dockerfile.ssh
new file mode 100644
index 0000000..a0377d7
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.ssh
@@ -0,0 +1,5 @@
+FROM alpine
+
+RUN apk add openssh-client
+
+RUN --mount=type=ssh,id=default ssh-add -l -E md5
diff --git a/tests/bud/run-mounts/Dockerfile.ssh_access b/tests/bud/run-mounts/Dockerfile.ssh_access
new file mode 100644
index 0000000..7296be5
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.ssh_access
@@ -0,0 +1,7 @@
+FROM alpine
+
+RUN apk add openssh-client
+
+RUN --mount=type=ssh,id=default ssh-add -l -E md5
+RUN ssh-add -l -E md5
+RUN cat /run/buildkit/ssh_agent.0
diff --git a/tests/bud/run-mounts/Dockerfile.ssh_options b/tests/bud/run-mounts/Dockerfile.ssh_options
new file mode 100644
index 0000000..4cd4f5b
--- /dev/null
+++ b/tests/bud/run-mounts/Dockerfile.ssh_options
@@ -0,0 +1,3 @@
+FROM alpine
+
+RUN --mount=type=ssh,id=default,dst=/dstsock,uid=1000,gid=1001,mode=0444 stat -c "%a" /dstsock; ls -n /dstsock
diff --git a/tests/bud/run-privd/Dockerfile b/tests/bud/run-privd/Dockerfile
new file mode 100644
index 0000000..c78de1d
--- /dev/null
+++ b/tests/bud/run-privd/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+RUN apk add nginx
diff --git a/tests/bud/run-scenarios/Dockerfile.args b/tests/bud/run-scenarios/Dockerfile.args
new file mode 100644
index 0000000..22de9b6
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.args
@@ -0,0 +1,4 @@
+FROM alpine
+ARG arg="arg_value"
+RUN echo ${arg}
+
diff --git a/tests/bud/run-scenarios/Dockerfile.cmd-empty-run b/tests/bud/run-scenarios/Dockerfile.cmd-empty-run
new file mode 100644
index 0000000..7b51ae5
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.cmd-empty-run
@@ -0,0 +1,3 @@
+FROM alpine
+CMD [ "pwd" ]
+RUN
diff --git a/tests/bud/run-scenarios/Dockerfile.cmd-run b/tests/bud/run-scenarios/Dockerfile.cmd-run
new file mode 100644
index 0000000..f064024
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.cmd-run
@@ -0,0 +1,3 @@
+FROM alpine
+CMD [ "/invalid/cmd" ]
+RUN echo "unique.test.string"
diff --git a/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-empty-run b/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-empty-run
new file mode 100644
index 0000000..2afeba0
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-empty-run
@@ -0,0 +1,4 @@
+FROM alpine
+ENTRYPOINT [ "pwd" ]
+CMD [ "whoami" ]
+RUN
diff --git a/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-run b/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-run
new file mode 100644
index 0000000..499fa11
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.entrypoint-cmd-run
@@ -0,0 +1,4 @@
+FROM alpine
+ENTRYPOINT [ "/invalid/entrypoint" ]
+CMD [ "/invalid/cmd" ]
+RUN echo "unique.test.string"
diff --git a/tests/bud/run-scenarios/Dockerfile.entrypoint-empty-run b/tests/bud/run-scenarios/Dockerfile.entrypoint-empty-run
new file mode 100644
index 0000000..ce64242
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.entrypoint-empty-run
@@ -0,0 +1,3 @@
+FROM alpine
+ENTRYPOINT [ "pwd" ]
+RUN
diff --git a/tests/bud/run-scenarios/Dockerfile.entrypoint-run b/tests/bud/run-scenarios/Dockerfile.entrypoint-run
new file mode 100644
index 0000000..94b3e1d
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.entrypoint-run
@@ -0,0 +1,3 @@
+FROM alpine
+ENTRYPOINT [ "/invalid/entrypoint" ]
+RUN echo "unique.test.string"
diff --git a/tests/bud/run-scenarios/Dockerfile.multi-args b/tests/bud/run-scenarios/Dockerfile.multi-args
new file mode 100644
index 0000000..456c71e
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.multi-args
@@ -0,0 +1,5 @@
+FROM alpine
+ARG USED_ARG="used_value"
+RUN echo ${USED_ARG}
+FROM scratch
+COPY --from=0 /etc/passwd /root/passwd-file
diff --git a/tests/bud/run-scenarios/Dockerfile.noop-flags b/tests/bud/run-scenarios/Dockerfile.noop-flags
new file mode 100644
index 0000000..c35f1b5
--- /dev/null
+++ b/tests/bud/run-scenarios/Dockerfile.noop-flags
@@ -0,0 +1 @@
+FROM scratch
diff --git a/tests/bud/secret-relative/Dockerfile b/tests/bud/secret-relative/Dockerfile
new file mode 100644
index 0000000..15f1623
--- /dev/null
+++ b/tests/bud/secret-relative/Dockerfile
@@ -0,0 +1,5 @@
+FROM alpine
+RUN mkdir test
+WORKDIR test
+RUN --mount=type=secret,id=secret-foo,dst=secret1.txt --mount=type=secret,id=secret-bar,dst=secret2.txt \
+ cat /test/secret1.txt && cat /test/secret2.txt
diff --git a/tests/bud/secret-relative/secret1.txt b/tests/bud/secret-relative/secret1.txt
new file mode 100644
index 0000000..3ef745d
--- /dev/null
+++ b/tests/bud/secret-relative/secret1.txt
@@ -0,0 +1 @@
+secret:foo
diff --git a/tests/bud/secret-relative/secret2.txt b/tests/bud/secret-relative/secret2.txt
new file mode 100644
index 0000000..a57be75
--- /dev/null
+++ b/tests/bud/secret-relative/secret2.txt
@@ -0,0 +1 @@
+secret:bar
diff --git a/tests/bud/shell/Dockerfile b/tests/bud/shell/Dockerfile
new file mode 100644
index 0000000..520b9ca
--- /dev/null
+++ b/tests/bud/shell/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+SHELL [ "/bin/sh", "-c" ]
+
diff --git a/tests/bud/shell/Dockerfile.build-shell-custom b/tests/bud/shell/Dockerfile.build-shell-custom
new file mode 100644
index 0000000..653cbf7
--- /dev/null
+++ b/tests/bud/shell/Dockerfile.build-shell-custom
@@ -0,0 +1,3 @@
+FROM ubuntu
+SHELL [ "/bin/bash", "-c" ]
+RUN echo "SHELL=$0"
diff --git a/tests/bud/shell/Dockerfile.build-shell-default b/tests/bud/shell/Dockerfile.build-shell-default
new file mode 100644
index 0000000..fe83ef5
--- /dev/null
+++ b/tests/bud/shell/Dockerfile.build-shell-default
@@ -0,0 +1,2 @@
+FROM alpine
+RUN echo "SHELL=$0"
diff --git a/tests/bud/simple-multi-step/Containerfile b/tests/bud/simple-multi-step/Containerfile
new file mode 100644
index 0000000..0229457
--- /dev/null
+++ b/tests/bud/simple-multi-step/Containerfile
@@ -0,0 +1,4 @@
+FROM alpine
+RUN echo helloworld
+RUN echo helloworld2
+RUN echo helloworld3
diff --git a/tests/bud/stdio/Dockerfile b/tests/bud/stdio/Dockerfile
new file mode 100644
index 0000000..a7efc99
--- /dev/null
+++ b/tests/bud/stdio/Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine
+# Will stall if this is connected to a terminal, or fail if it's not readable
+RUN cat /dev/stdin
+# Will fail if it's not writable
+RUN echo foo > /dev/stdout
+# Will fail if it's not writable
+RUN echo foo > /dev/stderr
diff --git a/tests/bud/supplemental-groups/Dockerfile b/tests/bud/supplemental-groups/Dockerfile
new file mode 100644
index 0000000..462d9ea
--- /dev/null
+++ b/tests/bud/supplemental-groups/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+USER 1000:1000
+RUN cat /proc/$$/status
diff --git a/tests/bud/symlink/Containerfile.add-tar-gz-with-link b/tests/bud/symlink/Containerfile.add-tar-gz-with-link
new file mode 100644
index 0000000..cd069c3
--- /dev/null
+++ b/tests/bud/symlink/Containerfile.add-tar-gz-with-link
@@ -0,0 +1,3 @@
+FROM alpine
+WORKDIR /
+ADD tarball_latest.tar.gz /tmp/
diff --git a/tests/bud/symlink/Containerfile.add-tar-with-link b/tests/bud/symlink/Containerfile.add-tar-with-link
new file mode 100644
index 0000000..690ca0a
--- /dev/null
+++ b/tests/bud/symlink/Containerfile.add-tar-with-link
@@ -0,0 +1,3 @@
+FROM alpine
+WORKDIR /
+ADD tarball_latest.tar /tmp/
diff --git a/tests/bud/symlink/Dockerfile b/tests/bud/symlink/Dockerfile
new file mode 100644
index 0000000..aba85be
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+RUN mkdir -p /data
+RUN ln -s /test-log /blah
+RUN ln -s /data/log /test-log
+VOLUME [ "/test-log/test" ]
+RUN echo "hello" > /data/log/blah.txt
diff --git a/tests/bud/symlink/Dockerfile.absolute-dir-symlink b/tests/bud/symlink/Dockerfile.absolute-dir-symlink
new file mode 100644
index 0000000..6e02ed5
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.absolute-dir-symlink
@@ -0,0 +1,6 @@
+FROM ubuntu as builder
+RUN mkdir -p /my/data && touch /my/data/myexe && ln -s /my/data /data
+
+FROM ubuntu
+COPY --from=builder /data /data
+VOLUME [ "/data" ]
diff --git a/tests/bud/symlink/Dockerfile.absolute-symlink b/tests/bud/symlink/Dockerfile.absolute-symlink
new file mode 100644
index 0000000..49a1b9d
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.absolute-symlink
@@ -0,0 +1,6 @@
+FROM ubuntu as builder
+RUN echo "symlink-test" > /bin/myexe.1 && ln -s /bin/myexe.1 /bin/myexe
+
+FROM ubuntu
+COPY --from=builder /bin/myexe /bin/
+VOLUME [ "/bin" ]
diff --git a/tests/bud/symlink/Dockerfile.multiple-symlinks b/tests/bud/symlink/Dockerfile.multiple-symlinks
new file mode 100644
index 0000000..80e44f5
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.multiple-symlinks
@@ -0,0 +1,10 @@
+FROM alpine
+RUN mkdir -p /data
+RUN mkdir -p /test
+RUN mkdir -p /test-log
+RUN mkdir -p /myuser
+RUN ln -s /test /myuser/log
+RUN ln -s /test-log /test/bar
+RUN ln -s /data/log /test-log/foo
+VOLUME [ "/myuser/log/bar/foo/bin" ]
+RUN echo "hello" > /data/log/blah.txt
diff --git a/tests/bud/symlink/Dockerfile.relative-symlink b/tests/bud/symlink/Dockerfile.relative-symlink
new file mode 100644
index 0000000..4fb5473
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.relative-symlink
@@ -0,0 +1,8 @@
+FROM alpine
+RUN mkdir -p /data
+RUN ln -s ../log /test-log
+VOLUME [ "/test-log/test" ]
+RUN ln -s ../data /var/data
+RUN touch /data/empty
+VOLUME [ "/var/data" ]
+RUN pwd
diff --git a/tests/bud/symlink/Dockerfile.replace-symlink b/tests/bud/symlink/Dockerfile.replace-symlink
new file mode 100644
index 0000000..c208326
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.replace-symlink
@@ -0,0 +1,3 @@
+FROM scratch
+COPY ./ ./
+COPY ./ ./
diff --git a/tests/bud/symlink/Dockerfile.symlink-points-to-itself b/tests/bud/symlink/Dockerfile.symlink-points-to-itself
new file mode 100644
index 0000000..4cb41b9
--- /dev/null
+++ b/tests/bud/symlink/Dockerfile.symlink-points-to-itself
@@ -0,0 +1,3 @@
+FROM alpine
+RUN ln -s /test-log /test-log
+VOLUME [ "/test-log/test" ]
diff --git a/tests/bud/symlink/tarball.tar b/tests/bud/symlink/tarball.tar
new file mode 100644
index 0000000..f830935
--- /dev/null
+++ b/tests/bud/symlink/tarball.tar
Binary files differ
diff --git a/tests/bud/symlink/tarball.tar.gz b/tests/bud/symlink/tarball.tar.gz
new file mode 100644
index 0000000..8f8cd1a
--- /dev/null
+++ b/tests/bud/symlink/tarball.tar.gz
Binary files differ
diff --git a/tests/bud/symlink/tarball_latest.tar b/tests/bud/symlink/tarball_latest.tar
new file mode 120000
index 0000000..938a760
--- /dev/null
+++ b/tests/bud/symlink/tarball_latest.tar
@@ -0,0 +1 @@
+tarball.tar \ No newline at end of file
diff --git a/tests/bud/symlink/tarball_latest.tar.gz b/tests/bud/symlink/tarball_latest.tar.gz
new file mode 120000
index 0000000..43d6716
--- /dev/null
+++ b/tests/bud/symlink/tarball_latest.tar.gz
@@ -0,0 +1 @@
+tarball.tar.gz \ No newline at end of file
diff --git a/tests/bud/target/Dockerfile b/tests/bud/target/Dockerfile
new file mode 100644
index 0000000..47e8727
--- /dev/null
+++ b/tests/bud/target/Dockerfile
@@ -0,0 +1,13 @@
+FROM ubuntu:latest
+RUN touch /1
+RUN touch hello
+
+FROM alpine:latest AS mytarget
+RUN touch /2
+# Just add a copy so we don't skip stage:0
+COPY --from=0 hello .
+
+FROM busybox:latest AS mytarget2
+RUN touch /3
+# Just add a copy so we don't skip stage:1
+COPY --from=1 hello .
diff --git a/tests/bud/targetarch/Dockerfile b/tests/bud/targetarch/Dockerfile
new file mode 100644
index 0000000..a73e170
--- /dev/null
+++ b/tests/bud/targetarch/Dockerfile
@@ -0,0 +1,3 @@
+FROM alpine
+ARG TARGETARCH
+ARG TARGETOS
diff --git a/tests/bud/terminal/Dockerfile b/tests/bud/terminal/Dockerfile
new file mode 100644
index 0000000..cc7cfc9
--- /dev/null
+++ b/tests/bud/terminal/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+RUN ! tty
diff --git a/tests/bud/unrecognized/Dockerfile b/tests/bud/unrecognized/Dockerfile
new file mode 100644
index 0000000..614d460
--- /dev/null
+++ b/tests/bud/unrecognized/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+BOGUS nope-nope-nope
diff --git a/tests/bud/use-args/Containerfile b/tests/bud/use-args/Containerfile
new file mode 100644
index 0000000..ad9784f
--- /dev/null
+++ b/tests/bud/use-args/Containerfile
@@ -0,0 +1,4 @@
+FROM alpine
+ARG testArg
+RUN echo ${testArg}
+COPY ${testArg} .
diff --git a/tests/bud/use-args/Containerfile.dest_nobrace b/tests/bud/use-args/Containerfile.dest_nobrace
new file mode 100644
index 0000000..57ec703
--- /dev/null
+++ b/tests/bud/use-args/Containerfile.dest_nobrace
@@ -0,0 +1,6 @@
+FROM alpine
+ARG testArg
+ARG destination
+RUN echo $testArg
+RUN echo $destination
+COPY $testArg $destination
diff --git a/tests/bud/use-args/Containerfile.destination b/tests/bud/use-args/Containerfile.destination
new file mode 100644
index 0000000..c94ea89
--- /dev/null
+++ b/tests/bud/use-args/Containerfile.destination
@@ -0,0 +1,6 @@
+FROM alpine
+ARG testArg
+ARG destination
+RUN echo ${testArg}
+RUN echo ${destination}
+COPY ${testArg} ${destination}
diff --git a/tests/bud/use-layers/Dockerfile b/tests/bud/use-layers/Dockerfile
new file mode 100644
index 0000000..3c83723
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine
+RUN mkdir /hello
+VOLUME /var/lib/testdata
+RUN touch file.txt
+EXPOSE 8080
+ADD https://github.com/containers/buildah/blob/main/README.md /tmp/
+ENV foo=bar
diff --git a/tests/bud/use-layers/Dockerfile.2 b/tests/bud/use-layers/Dockerfile.2
new file mode 100644
index 0000000..5b9ab0f
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.2
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir /hello
+RUN touch blah.txt
+ENV foo=bar
diff --git a/tests/bud/use-layers/Dockerfile.3 b/tests/bud/use-layers/Dockerfile.3
new file mode 100644
index 0000000..a86ba55
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.3
@@ -0,0 +1,5 @@
+FROM alpine
+RUN mkdir /hello
+RUN touch blah.txt
+COPY mount world/
+ENV foo=bar
diff --git a/tests/bud/use-layers/Dockerfile.4 b/tests/bud/use-layers/Dockerfile.4
new file mode 100644
index 0000000..7964490
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.4
@@ -0,0 +1,3 @@
+FROM alpine
+COPY hello_world.sh /tmp/
+CMD bash /tmp/hello_world.sh
diff --git a/tests/bud/use-layers/Dockerfile.5 b/tests/bud/use-layers/Dockerfile.5
new file mode 100644
index 0000000..4b54246
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.5
@@ -0,0 +1,2 @@
+FROM alpine
+RUN touch /home/blah \ No newline at end of file
diff --git a/tests/bud/use-layers/Dockerfile.6 b/tests/bud/use-layers/Dockerfile.6
new file mode 100644
index 0000000..c3c78df
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.6
@@ -0,0 +1 @@
+FROM alpine \ No newline at end of file
diff --git a/tests/bud/use-layers/Dockerfile.7 b/tests/bud/use-layers/Dockerfile.7
new file mode 100644
index 0000000..481d194
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.7
@@ -0,0 +1,2 @@
+FROM alpine
+COPY . world/
diff --git a/tests/bud/use-layers/Dockerfile.build-args b/tests/bud/use-layers/Dockerfile.build-args
new file mode 100644
index 0000000..8622edb
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.build-args
@@ -0,0 +1,4 @@
+FROM alpine
+ARG user
+RUN echo $user | base64
+RUN touch /tmp/hello
diff --git a/tests/bud/use-layers/Dockerfile.dangling-symlink b/tests/bud/use-layers/Dockerfile.dangling-symlink
new file mode 100644
index 0000000..752e7f8
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.dangling-symlink
@@ -0,0 +1,2 @@
+FROM alpine
+COPY blah /tmp/
diff --git a/tests/bud/use-layers/Dockerfile.fail-case b/tests/bud/use-layers/Dockerfile.fail-case
new file mode 100644
index 0000000..6b774c0
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.fail-case
@@ -0,0 +1,5 @@
+FROM alpine
+RUN mkdir /hello
+RUN touch blah.txt
+COPY non-existent world/
+ENV foo=bar
diff --git a/tests/bud/use-layers/Dockerfile.multistage-copy b/tests/bud/use-layers/Dockerfile.multistage-copy
new file mode 100644
index 0000000..d97376f
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.multistage-copy
@@ -0,0 +1,9 @@
+FROM alpine AS uuid
+COPY uuid /src
+
+FROM alpine AS date
+COPY date /src
+
+FROM alpine
+COPY --from=uuid /src/data /uuid
+COPY --from=date /src/data /date
diff --git a/tests/bud/use-layers/Dockerfile.non-existent-registry b/tests/bud/use-layers/Dockerfile.non-existent-registry
new file mode 100644
index 0000000..e53864a
--- /dev/null
+++ b/tests/bud/use-layers/Dockerfile.non-existent-registry
@@ -0,0 +1,5 @@
+FROM non-existent-registry.com/alpine
+RUN mkdir /hello
+RUN touch blah.txt
+COPY non-existent world/
+ENV foo=bar
diff --git a/tests/bud/verify-cleanup/Dockerfile b/tests/bud/verify-cleanup/Dockerfile
new file mode 100644
index 0000000..dc63d85
--- /dev/null
+++ b/tests/bud/verify-cleanup/Dockerfile
@@ -0,0 +1,24 @@
+FROM alpine as builder
+RUN mkdir subdir
+COPY hey .
+
+FROM debian
+RUN --mount=type=bind,source=.,dst=/tmp,z \
+ --mount=type=tmpfs,dst=/var/tmp \
+ cat /tmp/hey
+RUN --mount=type=cache,from=builder,target=/cachedir cat /cachedir/hey
+RUN --mount=type=secret,id=secret-foo,dst=secret1.txt cat secret1.txt
+ARG TMP="/tmp"
+ARG VARTMP="/var/tmp"
+ARG CACHEDIR="/cachedir"
+ARG TESTDIR="/testdir"
+ARG SECRETFILE="secret1.txt"
+RUN [ -d "/tmp" ] && echo "Directory $TMP exists."
+RUN [ -d "/var/tmp" ] && echo "Directory $VARTMP exists."
+
+#Following path should not exists after the --mount step
+RUN [ ! -d "/testdir" ] && echo "Directory $TESTDIR DOES NOT exists."
+RUN [ ! -d "/cachedir" ] && echo "Cache Directory $CACHEDIR DOES NOT exists."
+RUN [ ! -f "secret1.txt" ] && echo "Secret File $SECRETFILE DOES NOT exists."
+# This should fail
+RUN cat /tmp/hey
diff --git a/tests/bud/verify-cleanup/hey b/tests/bud/verify-cleanup/hey
new file mode 100644
index 0000000..ce01362
--- /dev/null
+++ b/tests/bud/verify-cleanup/hey
@@ -0,0 +1 @@
+hello
diff --git a/tests/bud/verify-cleanup/secret1.txt b/tests/bud/verify-cleanup/secret1.txt
new file mode 100644
index 0000000..e05f366
--- /dev/null
+++ b/tests/bud/verify-cleanup/secret1.txt
@@ -0,0 +1 @@
+secrettext
diff --git a/tests/bud/volume-ownership/Dockerfile b/tests/bud/volume-ownership/Dockerfile
new file mode 100644
index 0000000..fe0ee86
--- /dev/null
+++ b/tests/bud/volume-ownership/Dockerfile
@@ -0,0 +1,10 @@
+FROM alpine
+RUN adduser -D -H testuser && addgroup testgroup
+RUN mkdir -p /vol/subvol
+RUN chown testuser:testgroup /vol/subvol
+VOLUME /vol/subvol
+
+# Run some command after VOLUME to ensure that the volume cache behavior is invoked
+# See https://github.com/containers/buildah/blob/843d15de3e797bd912607d27324d13a9d5c27dfb/imagebuildah/stage_executor.go#L61-L72 and
+# for more details
+RUN touch /test
diff --git a/tests/bud/volume-perms/Dockerfile b/tests/bud/volume-perms/Dockerfile
new file mode 100644
index 0000000..4dced77
--- /dev/null
+++ b/tests/bud/volume-perms/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+VOLUME /vol/subvol
+# At this point, the directory should exist, with default permissions 0755, the
+# contents below /vol/subvol should be frozen, and we shouldn't get an error
+# from trying to write to it because we it was created automatically.
+RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
diff --git a/tests/bud/volume-symlink/Dockerfile b/tests/bud/volume-symlink/Dockerfile
new file mode 100644
index 0000000..95c90b3
--- /dev/null
+++ b/tests/bud/volume-symlink/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+# Create symbolic links to simplify mounting
+RUN mkdir -p /home/app/myvolume \
+&& touch /home/app/myvolume/foo.txt \
+&& ln -s /home/app/myvolume /config
+VOLUME ["/config"]
diff --git a/tests/bud/volume-symlink/Dockerfile.no-symlink b/tests/bud/volume-symlink/Dockerfile.no-symlink
new file mode 100644
index 0000000..ee343a7
--- /dev/null
+++ b/tests/bud/volume-symlink/Dockerfile.no-symlink
@@ -0,0 +1,4 @@
+FROM alpine
+RUN mkdir -p /home/app/myvolume \
+&& touch /home/app/myvolume/foo.txt
+VOLUME ["/home/app/myvolume"]
diff --git a/tests/bud/with-arg/Dockerfile b/tests/bud/with-arg/Dockerfile
new file mode 100644
index 0000000..3f747dd
--- /dev/null
+++ b/tests/bud/with-arg/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+ARG FOO
+ENV FOO=bat
+RUN echo $FOO
diff --git a/tests/bud/with-arg/Dockerfile2 b/tests/bud/with-arg/Dockerfile2
new file mode 100644
index 0000000..70035cf
--- /dev/null
+++ b/tests/bud/with-arg/Dockerfile2
@@ -0,0 +1,4 @@
+FROM alpine
+ARG FOO
+ENV FOO=${FOO}
+RUN echo $FOO
diff --git a/tests/bud/with-arg/Dockerfilefromarg b/tests/bud/with-arg/Dockerfilefromarg
new file mode 100644
index 0000000..0b78d0e
--- /dev/null
+++ b/tests/bud/with-arg/Dockerfilefromarg
@@ -0,0 +1,16 @@
+ARG app_type
+ARG another_app_type
+ARG another_app_type_default=m
+
+FROM alpine as x
+RUN echo hello
+
+FROM ${app_type} as m
+RUN echo world
+
+# Do not supply this in cli, lets use default
+FROM ${another_app_type_default} as final
+RUN echo hello
+
+FROM ${another_app_type} as final
+RUN echo hello
diff --git a/tests/bud/workdir-symlink/Dockerfile b/tests/bud/workdir-symlink/Dockerfile
new file mode 100644
index 0000000..5341fe9
--- /dev/null
+++ b/tests/bud/workdir-symlink/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+RUN mkdir /var/lib/tempest
+RUN ln -sf /var/lib/tempest /tempest
+WORKDIR /tempest
+RUN touch /etc/notareal.conf
+RUN chmod 664 /etc/notareal.conf
diff --git a/tests/bud/workdir-symlink/Dockerfile-2 b/tests/bud/workdir-symlink/Dockerfile-2
new file mode 100644
index 0000000..f6fc1c3
--- /dev/null
+++ b/tests/bud/workdir-symlink/Dockerfile-2
@@ -0,0 +1,7 @@
+# No directory created for the target of the symlink
+FROM alpine
+RUN ln -sf /var/lib/tempest /tempest
+WORKDIR /tempest
+RUN touch /etc/notareal.conf
+RUN chmod 664 /etc/notareal.conf
+COPY Dockerfile-2 ./Dockerfile-2
diff --git a/tests/bud/workdir-symlink/Dockerfile-3 b/tests/bud/workdir-symlink/Dockerfile-3
new file mode 100644
index 0000000..2f21e38
--- /dev/null
+++ b/tests/bud/workdir-symlink/Dockerfile-3
@@ -0,0 +1,10 @@
+# No directory created for the target of the symlink
+FROM alpine
+RUN ln -sf /var/lib/tempest /tempest
+WORKDIR /tempest/lowerdir
+RUN touch /etc/notareal.conf
+RUN chmod 664 /etc/notareal.conf
+RUN mkdir -p /tempest/lowerdir
+COPY Dockerfile-3 ./Dockerfile-3
+COPY Dockerfile-3 /tempest/Dockerfile-3
+COPY Dockerfile-3 /tempest/lowerdir/Dockerfile-3
diff --git a/tests/bud/workdir-user/Dockerfile b/tests/bud/workdir-user/Dockerfile
new file mode 100644
index 0000000..bafca2d
--- /dev/null
+++ b/tests/bud/workdir-user/Dockerfile
@@ -0,0 +1,6 @@
+FROM alpine
+RUN adduser -D http -h /home/http
+USER http
+WORKDIR /home/http/public
+RUN stat -c '%u:%g %n' $PWD
+RUN touch foobar
diff --git a/tests/bud_overlay_leaks.bats b/tests/bud_overlay_leaks.bats
new file mode 100644
index 0000000..f4b1c77
--- /dev/null
+++ b/tests/bud_overlay_leaks.bats
@@ -0,0 +1,18 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "bud overlay storage leaked mount" {
+ if test \! -e /usr/bin/fuse-overlayfs -a "$BUILDAH_ISOLATION" = "rootless"; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION" and no /usr/bin/fuse-overlayfs present
+ fi
+
+ target=pull
+ run_buildah 125 --storage-driver=overlay bud $WITH_POLICY_JSON -t ${target} --pull-never $BUDFILES/pull
+ expect_output --substring "image not known"
+
+ leftover=$(mount | grep $TEST_SCRATCH_DIR | cat)
+ if [ -n "$leftover" ]; then
+ die "buildah leaked a mount on error: $leftover"
+ fi
+}
diff --git a/tests/byid.bats b/tests/byid.bats
new file mode 100644
index 0000000..902c19f
--- /dev/null
+++ b/tests/byid.bats
@@ -0,0 +1,104 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "from-by-id" {
+ image=busybox
+ _prefetch $image
+
+ # Pull down the image, if we have to.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON $image
+ expect_output "$image-working-container"
+ cid=$output
+ run_buildah rm $cid
+
+ # Get the image's ID.
+ run_buildah images -q $image
+ expect_line_count 1
+ iid="$output"
+
+ # Use the image's ID to create a container.
+ run_buildah from --pull=false $WITH_POLICY_JSON ${iid}
+ expect_line_count 1
+ cid="$output"
+ run_buildah rm $cid
+
+ # Use a truncated form of the image's ID to create a container.
+ run_buildah from --pull=false $WITH_POLICY_JSON ${iid:0:6}
+ expect_line_count 1
+ cid="$output"
+ run_buildah rm $cid
+
+ run_buildah rmi $iid
+}
+
+@test "inspect-by-id" {
+ image=busybox
+ _prefetch $image
+
+ # Pull down the image, if we have to.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON $image
+ expect_output "$image-working-container"
+ cid=$output
+ run_buildah rm $cid
+
+ # Get the image's ID.
+ run_buildah images -q $image
+ expect_line_count 1
+ iid="$output"
+
+ # Use the image's ID to inspect it.
+ run_buildah inspect --type=image ${iid}
+
+ # Use a truncated copy of the image's ID to inspect it.
+ run_buildah inspect --type=image ${iid:0:6}
+
+ run_buildah rmi $iid
+}
+
+@test "push-by-id" {
+ for image in busybox registry.k8s.io/pause ; do
+ echo pulling/pushing image $image
+ _prefetch $image
+
+ TARGET=${TEST_SCRATCH_DIR}/subdir-$(basename $image)
+ mkdir -p $TARGET $TARGET-truncated
+
+ # Pull down the image, if we have to.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON $image
+ expect_output "${image##*/}-working-container" # image, w/o registry prefix
+ run_buildah rm $output
+
+ # Get the image's ID.
+ run_buildah images -q $image
+ expect_output --substring '^[0-9a-f]{12,64}$'
+ iid="$output"
+
+ # Use the image's ID to push it.
+ run_buildah push $WITH_POLICY_JSON $iid dir:$TARGET
+
+ # Use a truncated form of the image's ID to push it.
+ run_buildah push $WITH_POLICY_JSON ${iid:0:6} dir:$TARGET-truncated
+
+ # Use the image's complete ID to remove it.
+ run_buildah rmi $iid
+ done
+}
+
+@test "rmi-by-id" {
+ image=busybox
+ _prefetch $image
+
+ # Pull down the image, if we have to.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON $image
+ expect_output "$image-working-container"
+ run_buildah rm $output
+
+ # Get the image's ID.
+ run_buildah images -q $image
+ expect_output --substring '^[0-9a-f]{12,64}$'
+ iid="$output"
+
+ # Use a truncated copy of the image's ID to remove it.
+ run_buildah rmi ${iid:0:6}
+}
diff --git a/tests/chroot.bats b/tests/chroot.bats
new file mode 100644
index 0000000..110fc95
--- /dev/null
+++ b/tests/chroot.bats
@@ -0,0 +1,105 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test chroot-mount-flags {
+ skip_if_no_unshare
+ if ! test -e /etc/subuid ; then
+ skip "we can't bind mount over /etc/subuid during the test if there is no /etc/subuid file"
+ fi
+ if ! test -e /etc/subgid ; then
+ skip "we can't bind mount over /etc/subgid during the test if there is no /etc/subgid file"
+ fi
+ # whom should we map to root in a nested namespace?
+ if is_rootless ; then
+ subid=128
+ rangesize=1024
+ else
+ subid=1048576
+ rangesize=16384
+ fi
+ # we're going to have to prefetch into storage used by someone else image
+ # chosen because its rootfs doesn't have any uid/gid ownership above
+ # $rangesize, because the nested namespace needs to be able to represent all
+ # of them
+ baseimage=registry.access.redhat.com/ubi9-micro:latest
+ _prefetch $baseimage
+ baseimagef=$(tr -c a-zA-Z0-9.- - <<< "$baseimage")
+ # create the directories that we need
+ tmpfs=${TEST_SCRATCH_DIR}/tmpfs
+ mkdir $tmpfs
+ context=${TEST_SCRATCH_DIR}/context
+ mkdir $context
+ storagedir=${TEST_SCRATCH_DIR}/storage
+ mkdir $storagedir
+ rootdir=${storagedir}/rootdir
+ mkdir $rootdir
+ runrootdir=${storagedir}/runrootdir
+ mkdir $runrootdir
+ xdgruntimedir=${storagedir}/xdgruntime
+ mkdir $xdgruntimedir
+ xdgconfighome=${storagedir}/xdgconfighome
+ mkdir $xdgconfighome
+ xdgdatahome=${storagedir}/xdgdatahome
+ mkdir $xdgdatahome
+ storageopts="--storage-driver vfs --root $rootdir --runroot $runrootdir"
+ # our temporary parent directory might not be world-searchable, which will
+ # cause someone in the nested user namespace to hit permissions issues even
+ # looking for $storagedir, so tweak perms to let them do at least that much
+ fixupdir=$storagedir
+ while test $(stat -c %d:%i $fixupdir) != $(stat -c %d:%i /) ; do
+ # walk up to root, or the first parent that we don't own
+ if test $(stat -c %u $fixupdir) -ne $(id -u) ; then
+ break
+ fi
+ chmod +x $fixupdir
+ fixupdir=$fixupdir/..
+ done
+ # start writing the script to run in the nested user namespace
+ echo set -e > ${TEST_SCRATCH_DIR}/script.sh
+ echo export XDG_RUNTIME_DIR=$xdgruntimedir >> ${TEST_SCRATCH_DIR}/script.sh
+ echo export XDG_CONFIG_HOME=$xdgconfighome >> ${TEST_SCRATCH_DIR}/script.sh
+ echo export XDG_DATA_HOME=$xdgdatahome >> ${TEST_SCRATCH_DIR}/script.sh
+ # give our would-be user ownership of that directory
+ echo chown --recursive ${subid}:${subid} ${storagedir} >> ${TEST_SCRATCH_DIR}/script.sh
+ # make newuidmap/newgidmap, invoked by unshare even for uid=0, happy
+ echo root:0:4294967295 > ${TEST_SCRATCH_DIR}/subid
+ echo mount --bind -r ${TEST_SCRATCH_DIR}/subid /etc/subuid >> ${TEST_SCRATCH_DIR}/script.sh
+ echo mount --bind -r ${TEST_SCRATCH_DIR}/subid /etc/subgid >> ${TEST_SCRATCH_DIR}/script.sh
+ # don't get tripped up by ${TEST_SCRATCH_DIR} potentially being on a filesystem with non-default mount flags
+ echo mount -t tmpfs -o size=256K tmpfs $tmpfs >> ${TEST_SCRATCH_DIR}/script.sh
+ # mount a small tmpfs with every mount flag combination that concerns us, and
+ # be ready to tell buildah to mount everything conservatively, to mirror the
+ # TransientMounts API being used to nodev/noexec/nosuid/ro bind in a source
+ # that doesn't necessarily have those flags already set on it
+ for d in dev nodev ; do
+ for e in exec noexec ; do
+ for s in suid nosuid ; do
+ for r in ro rw ; do
+ subdir=$tmpfs/d-$d-$e-$s-$r
+ echo mkdir ${subdir} >> ${TEST_SCRATCH_DIR}/script.sh
+ echo mount -t tmpfs -o size=256K,$d,$e,$s,$r tmpfs ${subdir} >> ${TEST_SCRATCH_DIR}/script.sh
+ mounts="${mounts:+${mounts} }--volume ${subdir}:/mounts/d-$d-$e-$s-$r:nodev,noexec,nosuid,ro"
+ done
+ done
+ done
+ done
+ # make sure that RUN doesn't just break when we try to use volume mounts with
+ # flags set that we're not allowed to modify
+ echo FROM $baseimage > $context/Dockerfile
+ echo RUN cat /proc/mounts >> $context/Dockerfile
+ # copy in the prefetched image
+ # unshare from util-linux 2.39 also accepts INNER:OUTER:SIZE for --map-users
+ # and --map-groups, but fedora 37's is too old, so the older OUTER,INNER,SIZE
+ # (using commas instead of colons as field separators) will have to do
+ echo "unshare -Umpf --mount-proc --setuid 0 --setgid 0 --map-users=${subid},0,${rangesize} --map-groups=${subid},0,${rangesize} ${COPY_BINARY} ${storageopts} dir:$_BUILDAH_IMAGE_CACHEDIR/$baseimagef containers-storage:$baseimage" >> ${TEST_SCRATCH_DIR}/script.sh
+ # try to do a build with all of the volume mounts
+ echo "unshare -Umpf --mount-proc --setuid 0 --setgid 0 --map-users=${subid},0,${rangesize} --map-groups=${subid},0,${rangesize} ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${storageopts} build --isolation chroot --pull=never $mounts $context" >> ${TEST_SCRATCH_DIR}/script.sh
+ # run that whole script in a nested mount namespace with no $XDG_...
+ # variables leaked into it
+ if is_rootless ; then
+ run_buildah unshare env -i bash -x ${TEST_SCRATCH_DIR}/script.sh
+ else
+ unshare -mpf --mount-proc env -i bash -x ${TEST_SCRATCH_DIR}/script.sh
+ fi
+}
diff --git a/tests/commit.bats b/tests/commit.bats
new file mode 100644
index 0000000..8caa73b
--- /dev/null
+++ b/tests/commit.bats
@@ -0,0 +1,328 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "commit-flags-order-verification" {
+ run_buildah 125 commit cnt1 --tls-verify
+ check_options_flag_err "--tls-verify"
+
+ run_buildah 125 commit cnt1 -q
+ check_options_flag_err "-q"
+
+ run_buildah 125 commit cnt1 -f=docker --quiet --creds=bla:bla
+ check_options_flag_err "-f=docker"
+
+ run_buildah 125 commit cnt1 --creds=bla:bla
+ check_options_flag_err "--creds=bla:bla"
+}
+
+@test "commit" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid alpine-image
+ run_buildah images alpine-image
+}
+
+# Mainly this test is added for rootless setups where XDG_RUNTIME_DIR
+# is not set and we end up setting incorrect runroot at various steps
+# Use case is typically seen on environments where current session
+# is invalid login session.
+@test "commit image on rootless setup with mount" {
+ unset XDG_RUNTIME_DIR
+ run dd if=/dev/zero of=${TEST_SCRATCH_DIR}/file count=1 bs=10M
+ run_buildah from scratch
+ CONT=$output
+ unset XDG_RUNTIME_DIR
+ run_buildah mount $CONT
+ MNT=$output
+ run cp ${TEST_SCRATCH_DIR}/file $MNT/file
+ run_buildah umount $CONT
+ run_buildah commit $CONT foo
+ run_buildah images foo
+ expect_output --substring "10.5 MB"
+}
+
+@test "commit-with-remove-identity-label" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit --identity-label=false $WITH_POLICY_JSON $cid alpine-image
+ run_buildah images alpine-image
+ run_buildah inspect --format '{{printf "%q" .Docker.Config.Labels}}' alpine-image
+ expect_output "map[]"
+}
+
+@test "commit format test" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid alpine-image-oci
+ run_buildah commit --format docker --disable-compression=false $WITH_POLICY_JSON $cid alpine-image-docker
+
+ run_buildah inspect --type=image --format '{{.Manifest}}' alpine-image-oci
+ mediatype=$(jq -r '.layers[0].mediaType' <<<"$output")
+ expect_output --from="$mediatype" "application/vnd.oci.image.layer.v1.tar"
+ run_buildah inspect --type=image --format '{{.Manifest}}' alpine-image-docker
+ mediatype=$(jq -r '.layers[1].mediaType' <<<"$output")
+ expect_output --from="$mediatype" "application/vnd.docker.image.rootfs.diff.tar.gzip"
+}
+
+@test "commit --unsetenv PATH" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit --unsetenv PATH $WITH_POLICY_JSON $cid alpine-image-oci
+ run_buildah commit --unsetenv PATH --format docker --disable-compression=false $WITH_POLICY_JSON $cid alpine-image-docker
+
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' alpine-image-oci
+ expect_output "[]" "No Path should be defined"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' alpine-image-docker
+ expect_output "[]" "No Path should be defined"
+}
+
+@test "commit quiet test" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit --iidfile /dev/null $WITH_POLICY_JSON -q $cid alpine-image
+ expect_output ""
+}
+
+@test "commit rm test" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON --rm $cid alpine-image
+ run_buildah 125 rm $cid
+ expect_output --substring "removing container \"alpine-working-container\": container not known"
+}
+
+@test "commit-alternate-storage" {
+ _prefetch alpine
+ echo FROM
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ echo COMMIT
+ run_buildah commit $WITH_POLICY_JSON $cid "containers-storage:[vfs@${TEST_SCRATCH_DIR}/root2+${TEST_SCRATCH_DIR}/runroot2]newimage"
+ echo FROM
+ run_buildah --storage-driver vfs --root ${TEST_SCRATCH_DIR}/root2 --runroot ${TEST_SCRATCH_DIR}/runroot2 from $WITH_POLICY_JSON newimage
+}
+
+@test "commit-rejected-name" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 125 commit $WITH_POLICY_JSON $cid ThisNameShouldBeRejected
+ expect_output --substring "must be lower"
+}
+
+@test "commit-no-empty-created-by" {
+ if ! python3 -c 'import json, sys' 2> /dev/null ; then
+ skip "python interpreter with json module not found"
+ fi
+ target=new-image
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+
+ run_buildah config --created-by "untracked actions" $cid
+ run_buildah commit $WITH_POLICY_JSON $cid ${target}
+ run_buildah inspect --format '{{.Config}}' ${target}
+ config="$output"
+ run python3 -c 'import json, sys; config = json.load(sys.stdin); print(config["history"][len(config["history"])-1]["created_by"])' <<< "$config"
+ echo "$output"
+ assert "$status" -eq 0 "status from python command 1"
+ expect_output "untracked actions"
+
+ run_buildah config --created-by "" $cid
+ run_buildah commit $WITH_POLICY_JSON $cid ${target}
+ run_buildah inspect --format '{{.Config}}' ${target}
+ config="$output"
+ run python3 -c 'import json, sys; config = json.load(sys.stdin); print(config["history"][len(config["history"])-1]["created_by"])' <<< "$config"
+ echo "$output"
+ assert "$status" -eq 0 "status from python command 2"
+ expect_output "/bin/sh"
+}
+
+@test "commit-no-name" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid
+}
+
+@test "commit should fail with nonexistent authfile" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 125 commit --authfile /tmp/nonexistent $WITH_POLICY_JSON $cid alpine-image
+}
+
+@test "commit-builder-identity" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid alpine-image
+
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "io.buildah.version"}}' alpine-image
+ expect_output "$buildah_version"
+}
+
+@test "commit-parent-id" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format '{{.FromImageID}}' $cid
+ iid=$output
+
+ run_buildah commit $WITH_POLICY_JSON --format docker $cid alpine-image
+ run_buildah inspect --format '{{.Docker.Parent}}' alpine-image
+ expect_output "sha256:$iid" "alpine-image -> .Docker.Parent"
+}
+
+@test "commit-container-id" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+
+ # There is exactly one container. Get its ID.
+ run_buildah containers --format '{{.ContainerID}}'
+ cid=$output
+
+ run_buildah commit $WITH_POLICY_JSON --format docker $cid alpine-image
+ run_buildah inspect --format '{{.Docker.Container}}' alpine-image
+ expect_output "$cid" "alpine-image -> .Docker.Container"
+}
+
+@test "commit with name" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON --name busyboxc busybox
+ expect_output "busyboxc"
+
+ # Commit with a new name
+ newname="commitbyname/busyboxname"
+ run_buildah commit $WITH_POLICY_JSON busyboxc $newname
+
+ run_buildah from $WITH_POLICY_JSON localhost/$newname
+ expect_output "busyboxname-working-container"
+
+ cname=$output
+ run_buildah inspect --format '{{.FromImage}}' $cname
+ expect_output "localhost/$newname:latest"
+}
+
+@test "commit to docker-distribution" {
+ _prefetch busybox
+ run_buildah from $WITH_POLICY_JSON --name busyboxc busybox
+ start_registry
+ run_buildah commit $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword busyboxc docker://localhost:${REGISTRY_PORT}/commit/busybox
+ run_buildah from $WITH_POLICY_JSON --name fromdocker --tls-verify=false --creds testuser:testpassword docker://localhost:${REGISTRY_PORT}/commit/busybox
+}
+
+@test "commit encrypted local oci image" {
+ skip_if_rootless_environment
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah commit --iidfile /dev/null $WITH_POLICY_JSON --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub -q $cid oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ imgtype -show-manifest oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc | grep "+encrypted"
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "commit oci encrypt to registry" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ start_registry
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah commit --iidfile /dev/null --tls-verify=false --creds testuser:testpassword $WITH_POLICY_JSON --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub -q $cid docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ # this test, just checks the ability to commit an image to a registry
+ # there is no good way to test the details of the image unless with ./buildah pull, test will be in pull.bats
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+
+ # verify that encrypted layers are not cached or reused for an non-encrypted image (See containers/image#1533)
+ run_buildah commit --iidfile /dev/null --tls-verify=false --creds testuser:testpassword $WITH_POLICY_JSON -q $cid docker://localhost:${REGISTRY_PORT}/buildah/busybox_not_encrypted:latest
+ run_buildah from $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword docker://localhost:${REGISTRY_PORT}/buildah/busybox_not_encrypted:latest
+}
+
+@test "commit omit-timestamp" {
+ _prefetch busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah run $cid touch /test
+ run_buildah commit $WITH_POLICY_JSON --omit-timestamp -q $cid omit
+ run_buildah inspect --format '{{ .Docker.Created }}' omit
+ expect_output --substring "1970-01-01"
+ run_buildah inspect --format '{{ .OCIv1.Created }}' omit
+ expect_output --substring "1970-01-01"
+
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON omit
+ cid=$output
+ run_buildah run $cid ls -l /test
+ expect_output --substring "1970"
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "commit timestamp" {
+ _prefetch busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah run $cid touch /test
+ run_buildah commit $WITH_POLICY_JSON --timestamp 0 -q $cid omit
+ run_buildah inspect --format '{{ .Docker.Created }}' omit
+ expect_output --substring "1970-01-01"
+ run_buildah inspect --format '{{ .OCIv1.Created }}' omit
+ expect_output --substring "1970-01-01"
+
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON omit
+ cid=$output
+ run_buildah run $cid ls -l /test
+ expect_output --substring "1970"
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "commit with authfile" {
+ _prefetch busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah run $cid touch /test
+
+ start_registry
+ run_buildah login --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword --tls-verify=false localhost:${REGISTRY_PORT}
+ run_buildah commit --authfile ${TEST_SCRATCH_DIR}/test.auth $WITH_POLICY_JSON --tls-verify=false $cid docker://localhost:${REGISTRY_PORT}/buildah/my-busybox
+ expect_output --substring "Writing manifest to image destination"
+}
+
+@test "commit-without-names" {
+ _prefetch busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah run $cid touch /testfile
+ run_buildah run $cid chown $(id -u):$(id -g) /testfile
+ run_buildah commit $cid dir:${TEST_SCRATCH_DIR}/new-image
+ config=$(jq -r .config.digest ${TEST_SCRATCH_DIR}/new-image/manifest.json)
+ echo "config blob is $config"
+ diffid=$(jq -r '.rootfs.diff_ids[-1]' ${TEST_SCRATCH_DIR}/new-image/${config##*:})
+ echo "new layer is $diffid"
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/new-image/${diffid##*:} /testdiff.tar
+ # use in-container version of tar to avoid worrying about differences in
+ # output formats between tar implementations
+ run_buildah run $cid tar tvf /testdiff.tar testfile
+ echo "new file looks like [$output]"
+ # ownership information should be forced to be in number/number format
+ # instead of name/name because the names are gone
+ assert "$output" =~ $(id -u)/$(id -g)
+}
diff --git a/tests/config.bats b/tests/config.bats
new file mode 100644
index 0000000..96b5b83
--- /dev/null
+++ b/tests/config.bats
@@ -0,0 +1,433 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "config-flags-order-verification" {
+ run_buildah 125 config cnt1 --author=user1
+ check_options_flag_err "--author=user1"
+
+ run_buildah 125 config cnt1 --arch x86_54
+ check_options_flag_err "--arch"
+
+ run_buildah 125 config cnt1 --created-by buildahcli --cmd "/usr/bin/run.sh" --hostname "localhost1"
+ check_options_flag_err "--created-by"
+
+ run_buildah 125 config cnt1 --annotation=service=cache
+ check_options_flag_err "--annotation=service=cache"
+}
+
+@test "config-flags-verification" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --label LABEL $cid
+ run_buildah config --annotation ANNOTATION $cid
+
+ run_buildah 125 config --healthcheck 'AB "CD' $cid
+ expect_output --substring 'parsing --healthcheck "AB \\"CD": invalid command line string'
+
+ run_buildah 125 config --healthcheck-interval ABCD $cid
+ expect_output --substring 'parsing --healthcheck-interval "ABCD": time: invalid duration "?ABCD"?'
+
+ run_buildah 125 config --cmd 'AB "CD' $cid
+ expect_output --substring 'parsing --cmd "AB \\"CD": invalid command line string'
+
+ run_buildah 125 config --env ENV $cid
+ expect_output --substring 'setting env "ENV": no value given'
+
+ run_buildah 125 config --shell 'AB "CD' $cid
+ expect_output --substring 'parsing --shell "AB \\"CD": invalid command line string'
+}
+
+function check_matrix() {
+ local setting=$1
+ local expect=$2
+
+ # matrix test: all permutations of .Docker.* and .OCIv1.* in all image types
+ for image in docker oci; do
+ for which in Docker OCIv1; do
+ run_buildah inspect --type=image --format "{{.$which.$setting}}" scratch-image-$image
+ expect_output "$expect"
+ done
+ done
+}
+
+@test "config entrypoint using single element in JSON array (exec form)" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --entrypoint '[ "/ENTRYPOINT" ]' $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix "Config.Entrypoint" '[/ENTRYPOINT]'
+}
+
+@test "config entrypoint using multiple elements in JSON array (exec form)" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --entrypoint '[ "/ENTRYPOINT", "ELEMENT2" ]' $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Entrypoint' '[/ENTRYPOINT ELEMENT2]'
+}
+
+@test "config entrypoint using string (shell form)" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --entrypoint /ENTRYPOINT $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Entrypoint' '[/bin/sh -c /ENTRYPOINT]'
+}
+
+@test "config --unsetlabel" {
+ _prefetch registry.fedoraproject.org/fedora-minimal
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON registry.fedoraproject.org/fedora-minimal
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid with-name-label
+ run_buildah config --unsetlabel name $cid
+ run_buildah commit $WITH_POLICY_JSON $cid without-name-label
+
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "name"}}' with-name-label
+ expect_output "fedora" "name label should be set with value as fedora"
+ run_buildah inspect --format '{{ index .Docker.Config.Labels "name"}}' without-name-label
+ expect_output "" "name label should be removed"
+}
+
+@test "config set empty entrypoint doesn't wipe cmd" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --cmd "command" $cid
+ run_buildah config --entrypoint "" $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Cmd' '[command]'
+}
+
+@test "config cmd without entrypoint" {
+ run_buildah from --pull-never $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --cmd COMMAND-OR-ARGS \
+ $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Cmd' '[COMMAND-OR-ARGS]'
+ check_matrix 'Config.Entrypoint' '[]'
+}
+
+@test "config entrypoint with cmd" {
+ run_buildah from --pull-never $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --entrypoint /ENTRYPOINT \
+ --cmd COMMAND-OR-ARGS \
+ $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Cmd' '[COMMAND-OR-ARGS]'
+
+ run_buildah from --pull-never $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --entrypoint /ENTRYPOINT \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Config.Cmd' '[]'
+
+ run_buildah config \
+ --entrypoint /ENTRYPOINT \
+ --cmd COMMAND-OR-ARGS \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+ check_matrix 'Config.Cmd' '[COMMAND-OR-ARGS]'
+
+ run_buildah config \
+ --entrypoint /ENTRYPOINT \
+ --cmd '[ "/COMMAND", "ARG1", "ARG2"]' \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+ check_matrix 'Config.Cmd' '[/COMMAND ARG1 ARG2]'
+}
+
+@test "config remove all" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --port 12345 \
+ --annotation ANNOTATION=VALUE1,VALUE2 \
+ --env VARIABLE=VALUE1,VALUE2 \
+ --volume /VOLUME \
+ --label LABEL=VALUE \
+ $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ run_buildah inspect --type=image --format '{{index .ImageAnnotations "ANNOTATION"}}' scratch-image-oci
+ expect_output "VALUE1,VALUE2"
+ run_buildah inspect --format '{{index .ImageAnnotations "ANNOTATION"}}' $cid
+ expect_output "VALUE1,VALUE2"
+ check_matrix 'Config.ExposedPorts' 'map[12345:{}]'
+ check_matrix 'Config.Env' '[VARIABLE=VALUE1,VALUE2]'
+ check_matrix 'Config.Labels.LABEL' 'VALUE'
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --port - \
+ --annotation - \
+ --env - \
+ --volume - \
+ --label - \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ run_buildah inspect --type=image --format '{{.ImageAnnotations}}' scratch-image-oci
+ expect_output "map[]"
+ run_buildah inspect --format '{{.ImageAnnotations}}' $cid
+ expect_output "map[]"
+ check_matrix 'Config.ExposedPorts' 'map[]'
+ check_matrix 'Config.Env' '[]'
+ check_matrix 'Config.Labels.LABEL' '<no value>'
+}
+
+@test "config" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --author TESTAUTHOR \
+ --created-by COINCIDENCE \
+ --arch amd64 \
+ --os linux \
+ --variant abc \
+ --user likes:things \
+ --port 12345 \
+ --env VARIABLE=VALUE1,VALUE2 \
+ --entrypoint /ENTRYPOINT \
+ --cmd COMMAND-OR-ARGS \
+ --comment INFORMATIVE \
+ --history-comment PROBABLY-EMPTY \
+ --volume /VOLUME \
+ --workingdir /tmp \
+ --label LABEL=VALUE \
+ --label exec='podman run -it --mount=type=bind,bind-propagation=Z,source=foo,destination=bar /script buz'\
+ --stop-signal SIGINT \
+ --annotation ANNOTATION=VALUE1,VALUE2 \
+ --shell /bin/arbitrarysh \
+ --domainname mydomain.local \
+ --hostname cleverhostname \
+ --healthcheck "CMD /bin/true" \
+ --healthcheck-start-period 5s \
+ --healthcheck-interval 6s \
+ --healthcheck-timeout 7s \
+ --healthcheck-retries 8 \
+ --onbuild "RUN touch /foo" \
+ --os-version "1.0" \
+ --os-feature dynamic --os-feature - --os-feature removed --os-feature removed- --os-feature win32k \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_matrix 'Author' 'TESTAUTHOR'
+ check_matrix 'Architecture' 'amd64'
+ check_matrix 'OS' 'linux'
+ check_matrix 'Variant' 'abc'
+ check_matrix 'OSVersion' '1.0'
+ check_matrix 'OSFeatures' '[win32k]'
+
+ run_buildah inspect --format '{{.ImageCreatedBy}}' $cid
+ expect_output "COINCIDENCE"
+
+ check_matrix 'Config.Cmd' '[COMMAND-OR-ARGS]'
+ check_matrix 'Config.Entrypoint' '[/bin/sh -c /ENTRYPOINT]'
+ check_matrix 'Config.Env' '[VARIABLE=VALUE1,VALUE2]'
+ check_matrix 'Config.ExposedPorts' 'map[12345:{}]'
+ check_matrix 'Config.Labels.exec' 'podman run -it --mount=type=bind,bind-propagation=Z,source=foo,destination=bar /script buz'
+ check_matrix 'Config.Labels.LABEL' 'VALUE'
+ check_matrix 'Config.StopSignal' 'SIGINT'
+ check_matrix 'Config.User' 'likes:things'
+ check_matrix 'Config.Volumes' "map[/VOLUME:{}]"
+ check_matrix 'Config.WorkingDir' '/tmp'
+
+ run_buildah inspect --type=image --format '{{(index .Docker.History 0).Comment}}' scratch-image-docker
+ expect_output "PROBABLY-EMPTY"
+ run_buildah inspect --type=image --format '{{(index .OCIv1.History 0).Comment}}' scratch-image-docker
+ expect_output "PROBABLY-EMPTY"
+ run_buildah inspect --type=image --format '{{(index .Docker.History 0).Comment}}' scratch-image-oci
+ expect_output "PROBABLY-EMPTY"
+ run_buildah inspect --type=image --format '{{(index .OCIv1.History 0).Comment}}' scratch-image-oci
+ expect_output "PROBABLY-EMPTY"
+
+ # The following aren't part of the Docker v2 spec, so they're discarded when we save to Docker format.
+ run_buildah inspect --type=image --format '{{index .ImageAnnotations "ANNOTATION"}}' scratch-image-oci
+ expect_output "VALUE1,VALUE2"
+ run_buildah inspect --format '{{index .ImageAnnotations "ANNOTATION"}}' $cid
+ expect_output "VALUE1,VALUE2"
+ run_buildah inspect --type=image --format '{{.Docker.Comment}}' scratch-image-docker
+ expect_output "INFORMATIVE"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Domainname}}' scratch-image-docker
+ expect_output "mydomain.local"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Hostname}}' scratch-image-docker
+ expect_output "cleverhostname"
+ run_buildah inspect --type=image --format '{{.Docker.Config.Shell}}' scratch-image-docker
+ expect_output "[/bin/arbitrarysh]"
+ run_buildah inspect -f '{{.Docker.Config.Healthcheck.Test}}' scratch-image-docker
+ expect_output "[CMD /bin/true]"
+ run_buildah inspect -f '{{.Docker.Config.Healthcheck.StartPeriod}}' scratch-image-docker
+ expect_output "5s"
+ run_buildah inspect -f '{{.Docker.Config.Healthcheck.Interval}}' scratch-image-docker
+ expect_output "6s"
+ run_buildah inspect -f '{{.Docker.Config.Healthcheck.Timeout}}' scratch-image-docker
+ expect_output "7s"
+ run_buildah inspect -f '{{.Docker.Config.Healthcheck.Retries}}' scratch-image-docker
+ expect_output "8"
+ run_buildah inspect -f '{{.Docker.Config.OnBuild}}' scratch-image-docker
+ expect_output "[RUN touch /foo]"
+ rm -rf /VOLUME
+}
+
+@test "config env using local environment" {
+ export foo=bar
+ run_buildah from --pull-never $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --env 'foo' $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid env-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid env-image-oci
+
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' env-image-docker
+ expect_output --substring "foo=bar"
+
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' env-image-docker
+ expect_output --substring "foo=bar"
+}
+
+@test "docker formatted builds must inherit healthcheck from base image" {
+ _prefetch busybox
+ ctxdir=${TEST_SCRATCH_DIR}/bud
+ mkdir -p $ctxdir
+ cat >$ctxdir/Dockerfile <<EOF
+FROM busybox
+HEALTHCHECK CMD curl --fail http://localhost:3000 || exit 1
+EOF
+
+ run_buildah build --format docker $WITH_POLICY_JSON -t test ${ctxdir}
+
+ cat >$ctxdir/Dockerfile <<EOF
+FROM test
+RUN echo hello
+EOF
+
+ run_buildah build --format docker $WITH_POLICY_JSON -t test2 ${ctxdir}
+ run_buildah inspect --type=image --format '{{.Docker.ContainerConfig.Healthcheck.Test}}' test2
+ expect_output --substring "localhost:3000"
+}
+
+@test "config env using --env expansion" {
+ run_buildah from --pull-never $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --env 'foo=bar' --env 'foo1=bar1' $cid
+ run_buildah config --env 'combined=$foo/${foo1}' $cid
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid env-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid env-image-oci
+
+ run_buildah inspect --type=image --format '{{.Docker.Config.Env}}' env-image-docker
+ expect_output --substring "combined=bar/bar1"
+
+ run_buildah inspect --type=image --format '{{.OCIv1.Config.Env}}' env-image-docker
+ expect_output --substring "combined=bar/bar1"
+}
+
+@test "user" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid grep CapBnd /proc/self/status
+ bndoutput=$output
+ run_buildah config --user 1000 $cid
+ run_buildah run $cid id -u
+ expect_output "1000"
+
+ run_buildah run $cid sh -c "grep CapEff /proc/self/status | cut -f2"
+ expect_output "0000000000000000"
+
+ run_buildah run $cid grep CapBnd /proc/self/status
+ expect_output "$bndoutput"
+}
+
+@test "remove configs using '-' syntax" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config \
+ --created-by COINCIDENCE \
+ --volume /VOLUME \
+ --env VARIABLE=VALUE1,VALUE2 \
+ --label LABEL=VALUE \
+ --port 12345 \
+ --annotation ANNOTATION=VALUE1,VALUE2 \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+ run_buildah inspect --format '{{.ImageCreatedBy}}' $cid
+ expect_output "COINCIDENCE"
+
+ check_matrix 'Config.Volumes' "map[/VOLUME:{}]"
+ check_matrix 'Config.Env' '[VARIABLE=VALUE1,VALUE2]'
+ check_matrix 'Config.Labels.LABEL' 'VALUE'
+ check_matrix 'Config.ExposedPorts' 'map[12345:{}]'
+ run_buildah inspect --type=image --format '{{index .ImageAnnotations "ANNOTATION"}}' scratch-image-oci
+ expect_output "VALUE1,VALUE2"
+ run_buildah inspect --format '{{index .ImageAnnotations "ANNOTATION"}}' $cid
+ expect_output "VALUE1,VALUE2"
+
+ run_buildah config \
+ --created-by COINCIDENCE \
+ --volume /VOLUME- \
+ --env VARIABLE- \
+ --label LABEL- \
+ --port 12345- \
+ --annotation ANNOTATION- \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+ run_buildah inspect --format '{{.ImageCreatedBy}}' $cid
+ expect_output "COINCIDENCE"
+ check_matrix 'Config.Volumes' 'map[]'
+ check_matrix 'Config.Env' '[]'
+ check_matrix 'Config.Labels.LABEL' '<no value>'
+ check_matrix 'Config.ExposedPorts' 'map[]'
+ run_buildah inspect --type=image --format '{{index .ImageAnnotations "ANNOTATION"}}' scratch-image-oci
+ expect_output ""
+ run_buildah inspect --format '{{index .ImageAnnotations "ANNOTATION"}}' $cid
+ expect_output ""
+
+ run_buildah config \
+ --created-by COINCIDENCE \
+ --volume /VOLUME- \
+ --env VARIABLE=VALUE1,VALUE2 \
+ --label LABEL=VALUE \
+ --annotation ANNOTATION=VALUE1,VALUE2 \
+ $cid
+
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+ run_buildah inspect --format '{{.ImageCreatedBy}}' $cid
+ expect_output "COINCIDENCE"
+
+ check_matrix 'Config.Volumes' "map[]"
+}
diff --git a/tests/conformance/README.md b/tests/conformance/README.md
new file mode 100644
index 0000000..b07ac5f
--- /dev/null
+++ b/tests/conformance/README.md
@@ -0,0 +1,39 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Buildah/Docker Conformance Test Suite
+
+The conformance test for buildah is used to verify the images built with Buildah are equivalent to those built by Docker. It does this by building an image using the version of buildah library that's being tested, building what should be the same image using the docker engine's build API, and comparing them.
+
+## Installing dependencies
+
+The additional dependencies for conformance testing are:
+ * docker
+
+### Install Docker CE
+
+Conformance tests use Docker CE to build images to be compared with images built with Buildah. Install Docker CE with dnf, yum or apt-get, based on your distribution and verify that the `docker` service is started. In Fedora, RHEL and CentOS `docker` or `moby-engine` rather than Docker CE may be installed by default. In Debian or Ubuntu you may instead have the `docker.io` package. Please verify that you install at least version 19.03.
+
+## Run conformance tests
+
+First, pull base images used by various conformance tests:
+```
+bash
+docker pull alpine
+docker pull busybox
+docker pull quay.io/libpod/centos:7
+```
+
+Then you can run all of the tests with go test:
+```
+go test -v -timeout=30m -tags "$(./btrfs_tag.sh) $(./btrfs_installed_tag.sh)" ./tests/conformance
+```
+
+If you want to run one of the test cases you can use the "-run" flag:
+```
+go test -v -timeout=30m -tags "$(./btrfs_tag.sh) $(./btrfs_installed_tag.sh)" -run TestConformance/shell ./tests/conformance
+```
+
+If you also want to build and compare on a line-by-line basis, run:
+```
+go test -v -timeout=60m -tags "$(./btrfs_tag.sh) $(./btrfs_installed_tag.sh)" ./tests/conformance -compare-layers
+```
diff --git a/tests/conformance/conformance_test.go b/tests/conformance/conformance_test.go
new file mode 100644
index 0000000..62d701c
--- /dev/null
+++ b/tests/conformance/conformance_test.go
@@ -0,0 +1,3587 @@
+package conformance
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "testing"
+ "text/tabwriter"
+ "time"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/copier"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/imagebuildah"
+ "github.com/containers/buildah/internal/config"
+ "github.com/containers/image/v5/docker/daemon"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ is "github.com/containers/image/v5/storage"
+ istorage "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ dockertypes "github.com/docker/docker/api/types"
+ dockerdockerclient "github.com/docker/docker/client"
+ docker "github.com/fsouza/go-dockerclient"
+ digest "github.com/opencontainers/go-digest"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerclient"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
+ cISUID = 04000 // Set uid, from archive/tar
+ cISGID = 02000 // Set gid, from archive/tar
+ cISVTX = 01000 // Save text (sticky bit), from archive/tar
+)
+
+var (
+ originalSkip = []string{
+ "created",
+ "container",
+ "docker_version",
+ "container_config:hostname",
+ "config:hostname",
+ "config:image",
+ "container_config:cmd",
+ "container_config:image",
+ "history",
+ "rootfs:diff_ids",
+ "moby.buildkit.buildinfo.v1",
+ }
+ ociSkip = []string{
+ "created",
+ "history",
+ "rootfs:diff_ids",
+ }
+ fsSkip = []string{
+ // things that we volume mount or synthesize for RUN statements that currently bleed through
+ "(dir):etc:mtime",
+ "(dir):etc:(dir):hosts",
+ "(dir):etc:(dir):resolv.conf",
+ "(dir):run",
+ "(dir):run:mtime",
+ "(dir):run:(dir):.containerenv",
+ "(dir):run:(dir):secrets",
+ "(dir):proc",
+ "(dir):proc:mtime",
+ "(dir):sys",
+ "(dir):sys:mtime",
+ }
+ testDate = time.Unix(1485449953, 0)
+ compareLayers = false
+ compareImagebuilder = false
+ testDataDir = ""
+ dockerDir = ""
+ imagebuilderDir = ""
+ buildahDir = ""
+ contextCanDoXattrs *bool
+ storageCanDoXattrs *bool
+)
+
+func TestMain(m *testing.M) {
+ var logLevel string
+ if reexec.Init() {
+ return
+ }
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ logrus.Fatalf("error finding current directory: %v", err)
+ }
+ testDataDir = filepath.Join(cwd, "testdata")
+
+ flag.StringVar(&logLevel, "log-level", "error", "buildah logging log level")
+ flag.BoolVar(&compareLayers, "compare-layers", compareLayers, "compare instruction-by-instruction")
+ flag.BoolVar(&compareImagebuilder, "compare-imagebuilder", compareImagebuilder, "also compare using imagebuilder")
+ flag.StringVar(&testDataDir, "testdata", testDataDir, "location of conformance testdata")
+ flag.StringVar(&dockerDir, "docker-dir", dockerDir, "location to save docker build results")
+ flag.StringVar(&imagebuilderDir, "imagebuilder-dir", imagebuilderDir, "location to save imagebuilder build results")
+ flag.StringVar(&buildahDir, "buildah-dir", buildahDir, "location to save buildah build results")
+ flag.Parse()
+ var tempdir string
+ if buildahDir == "" || dockerDir == "" || imagebuilderDir == "" {
+ if tempdir == "" {
+ if tempdir, err = os.MkdirTemp("", "conformance"); err != nil {
+ logrus.Fatalf("creating temporary directory: %v", err)
+ os.Exit(1)
+ }
+ }
+ }
+ if buildahDir == "" {
+ buildahDir = filepath.Join(tempdir, "buildah")
+ }
+ if dockerDir == "" {
+ dockerDir = filepath.Join(tempdir, "docker")
+ }
+ if imagebuilderDir == "" {
+ imagebuilderDir = filepath.Join(tempdir, "imagebuilder")
+ }
+ level, err := logrus.ParseLevel(logLevel)
+ if err != nil {
+ logrus.Fatalf("error parsing log level %q: %v", logLevel, err)
+ }
+ logrus.SetLevel(level)
+ result := m.Run()
+ if err = os.RemoveAll(tempdir); err != nil {
+ logrus.Errorf("removing temporary directory %q: %v", tempdir, err)
+ }
+ os.Exit(result)
+}
+
+func TestConformance(t *testing.T) {
+ dateStamp := fmt.Sprintf("%d", time.Now().UnixNano())
+ for i := range internalTestCases {
+ t.Run(internalTestCases[i].name, func(t *testing.T) {
+ testConformanceInternal(t, dateStamp, i)
+ })
+ }
+}
+
+func testConformanceInternal(t *testing.T, dateStamp string, testIndex int) {
+ test := internalTestCases[testIndex]
+ ctx := context.TODO()
+
+ cwd, err := os.Getwd()
+ require.NoError(t, err, "error finding current directory")
+
+ // create a temporary directory to hold our build context
+ tempdir := t.TempDir()
+
+ // create subdirectories to use as the build context and for buildah storage
+ contextDir := filepath.Join(tempdir, "context")
+ rootDir := filepath.Join(tempdir, "root")
+ runrootDir := filepath.Join(tempdir, "runroot")
+
+ // check if we can test xattrs where we're storing build contexts
+ if contextCanDoXattrs == nil {
+ testDir := filepath.Join(tempdir, "test")
+ if err := os.Mkdir(testDir, 0700); err != nil {
+ require.NoErrorf(t, err, "error creating test directory to check if xattrs are testable: %v", err)
+ }
+ testFile := filepath.Join(testDir, "testfile")
+ if err := os.WriteFile(testFile, []byte("whatever"), 0600); err != nil {
+ require.NoErrorf(t, err, "error creating test file to check if xattrs are testable: %v", err)
+ }
+ can := false
+ if err := copier.Lsetxattrs(testFile, map[string]string{"user.test": "test"}); err == nil {
+ can = true
+ }
+ contextCanDoXattrs = &can
+ }
+
+ // copy either a directory or just a Dockerfile into the temporary directory
+ pipeReader, pipeWriter := io.Pipe()
+ var getErr, putErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ if test.contextDir != "" {
+ getErr = copier.Get("", testDataDir, copier.GetOptions{}, []string{test.contextDir}, pipeWriter)
+ } else if test.dockerfile != "" {
+ getErr = copier.Get("", testDataDir, copier.GetOptions{}, []string{test.dockerfile}, pipeWriter)
+ }
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ if test.contextDir != "" || test.dockerfile != "" {
+ putErr = copier.Put("", contextDir, copier.PutOptions{}, pipeReader)
+ } else {
+ putErr = os.Mkdir(contextDir, 0755)
+ }
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+ assert.NoErrorf(t, getErr, "error reading build info from %q", filepath.Join("testdata", test.dockerfile))
+ assert.NoErrorf(t, putErr, "error writing build info to %q", contextDir)
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ // construct the names that we want to assign to the images. these should be reasonably unique
+ buildahImage := fmt.Sprintf("conformance-buildah:%s-%d", dateStamp, testIndex)
+ dockerImage := fmt.Sprintf("conformance-docker:%s-%d", dateStamp, testIndex)
+ imagebuilderImage := fmt.Sprintf("conformance-imagebuilder:%s-%d", dateStamp, testIndex)
+
+ // compute the name of the Dockerfile in the build context directory
+ var dockerfileName string
+ if test.dockerfile != "" {
+ dockerfileName = filepath.Join(contextDir, test.dockerfile)
+ } else {
+ dockerfileName = filepath.Join(contextDir, "Dockerfile")
+ }
+
+ // read the Dockerfile, for inclusion in failure messages
+ dockerfileContents := []byte(test.dockerfileContents)
+ if len(dockerfileContents) == 0 {
+ // no inlined contents -> read them from the specified location
+ contents, err := os.ReadFile(dockerfileName)
+ require.NoErrorf(t, err, "error reading Dockerfile %q", filepath.Join(tempdir, dockerfileName))
+ dockerfileContents = contents
+ }
+
+ // initialize storage for buildah
+ options := storage.StoreOptions{
+ GraphDriverName: os.Getenv("STORAGE_DRIVER"),
+ GraphRoot: rootDir,
+ RunRoot: runrootDir,
+ RootlessStoragePath: rootDir,
+ }
+ store, err := storage.GetStore(options)
+ require.NoErrorf(t, err, "error creating buildah storage at %q", rootDir)
+ defer func() {
+ if store != nil {
+ _, err := store.Shutdown(true)
+ require.NoError(t, err, "error shutting down storage for buildah")
+ }
+ }()
+ storageDriver := store.GraphDriverName()
+ storageRoot := store.GraphRoot()
+
+ // now that we have a Store, check if we can test xattrs in storage layers
+ if storageCanDoXattrs == nil {
+ layer, err := store.CreateLayer("", "", nil, "", true, nil)
+ if err != nil {
+ require.NoErrorf(t, err, "error creating test layer to check if xattrs are testable: %v", err)
+ }
+ mountPoint, err := store.Mount(layer.ID, "")
+ if err != nil {
+ require.NoErrorf(t, err, "error mounting test layer to check if xattrs are testable: %v", err)
+ }
+ testFile := filepath.Join(mountPoint, "testfile")
+ if err := os.WriteFile(testFile, []byte("whatever"), 0600); err != nil {
+ require.NoErrorf(t, err, "error creating file in test layer to check if xattrs are testable: %v", err)
+ }
+ can := false
+ if err := copier.Lsetxattrs(testFile, map[string]string{"user.test": "test"}); err == nil {
+ can = true
+ }
+ storageCanDoXattrs = &can
+ err = store.DeleteLayer(layer.ID)
+ if err != nil {
+ require.NoErrorf(t, err, "error removing test layer after checking if xattrs are testable: %v", err)
+ }
+ }
+
+ // connect to dockerd using the docker client library
+ dockerClient, err := dockerdockerclient.NewClientWithOpts(dockerdockerclient.FromEnv)
+ require.NoError(t, err, "unable to initialize docker.client")
+ dockerClient.NegotiateAPIVersion(ctx)
+ if test.dockerUseBuildKit {
+ if err := dockerClient.NewVersionError("1.38", "buildkit"); err != nil {
+ t.Skipf("%v", err)
+ }
+ }
+
+ // connect to dockerd using go-dockerclient
+ client, err := docker.NewClientFromEnv()
+ require.NoError(t, err, "unable to initialize docker client")
+ var dockerVersion []string
+ if version, err := client.Version(); err == nil {
+ if version != nil {
+ for _, s := range *version {
+ dockerVersion = append(dockerVersion, s)
+ }
+ }
+ } else {
+ require.NoError(t, err, "unable to connect to docker daemon")
+ }
+
+ // make any last-minute tweaks to the build context directory that this test requires
+ if test.tweakContextDir != nil {
+ err = test.tweakContextDir(t, contextDir, storageDriver, storageRoot)
+ require.NoErrorf(t, err, "error tweaking context directory using test-specific callback: %v", err)
+ }
+
+ // decide whether we're building just one image for this Dockerfile, or
+ // one for each line in it after the first, which we'll assume is a FROM
+ if compareLayers {
+ // build and compare one line at a time
+ line := 1
+ for i := range dockerfileContents {
+ // scan the byte slice for newlines or the end of the slice, and build using the contents up to that point
+ if i == len(dockerfileContents)-1 || (dockerfileContents[i] == '\n' && (i == 0 || dockerfileContents[i-1] != '\\')) {
+ if line > 1 || !bytes.HasPrefix(dockerfileContents, []byte("FROM ")) {
+ // hack: skip trying to build just the first FROM line
+ t.Run(fmt.Sprintf("%d", line), func(t *testing.T) {
+ testConformanceInternalBuild(ctx, t, cwd, store, client, dockerClient, fmt.Sprintf("%s.%d", buildahImage, line), fmt.Sprintf("%s.%d", dockerImage, line), fmt.Sprintf("%s.%d", imagebuilderImage, line), contextDir, dockerfileName, dockerfileContents[:i+1], test, line, i == len(dockerfileContents)-1, dockerVersion)
+ })
+ }
+ line++
+ }
+ }
+ } else {
+ // build to completion
+ testConformanceInternalBuild(ctx, t, cwd, store, client, dockerClient, buildahImage, dockerImage, imagebuilderImage, contextDir, dockerfileName, dockerfileContents, test, 0, true, dockerVersion)
+ }
+}
+
+func testConformanceInternalBuild(ctx context.Context, t *testing.T, cwd string, store storage.Store, client *docker.Client, dockerClient *dockerdockerclient.Client, buildahImage, dockerImage, imagebuilderImage, contextDir, dockerfileName string, dockerfileContents []byte, test testCase, line int, finalOfSeveral bool, dockerVersion []string) {
+ var buildahLog, dockerLog, imagebuilderLog []byte
+ var buildahRef, dockerRef, imagebuilderRef types.ImageReference
+
+ // overwrite the Dockerfile in the build context for this run using the
+ // contents we were passed, which may only be an initial subset of the
+ // original file, or inlined information, in which case the file didn't
+ // necessarily exist
+ err := os.WriteFile(dockerfileName, dockerfileContents, 0644)
+ require.NoErrorf(t, err, "error writing Dockerfile at %q", dockerfileName)
+ err = os.Chtimes(dockerfileName, testDate, testDate)
+ require.NoErrorf(t, err, "error resetting timestamp on Dockerfile at %q", dockerfileName)
+ err = os.Chtimes(contextDir, testDate, testDate)
+ require.NoErrorf(t, err, "error resetting timestamp on context directory at %q", contextDir)
+
+ defer func() {
+ if t.Failed() {
+ if test.contextDir != "" {
+ t.Logf("Context %q", filepath.Join(cwd, "testdata", test.contextDir))
+ }
+ if test.dockerfile != "" {
+ if test.contextDir != "" {
+ t.Logf("Dockerfile: %q", filepath.Join(cwd, "testdata", test.contextDir, test.dockerfile))
+ } else {
+ t.Logf("Dockerfile: %q", filepath.Join(cwd, "testdata", test.dockerfile))
+ }
+ }
+ if !bytes.HasSuffix(dockerfileContents, []byte{'\n'}) && !bytes.HasSuffix(dockerfileContents, []byte{'\r'}) {
+ dockerfileContents = append(dockerfileContents, []byte("\n(no final end-of-line)")...)
+ }
+ t.Logf("Dockerfile contents:\n%s", dockerfileContents)
+ if dockerignoreContents, err := os.ReadFile(filepath.Join(contextDir, ".dockerignore")); err == nil {
+ t.Logf(".dockerignore contents:\n%s", string(dockerignoreContents))
+ }
+ }
+ }()
+
+ // build using docker
+ if !test.withoutDocker {
+ dockerRef, dockerLog = buildUsingDocker(ctx, t, client, dockerClient, test, dockerImage, contextDir, dockerfileName, line, finalOfSeveral)
+ if dockerRef != nil {
+ defer func() {
+ err := client.RemoveImageExtended(dockerImage, docker.RemoveImageOptions{
+ Context: ctx,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting newly-built-by-docker image %q", dockerImage)
+ }()
+ }
+ saveReport(ctx, t, dockerRef, filepath.Join(dockerDir, t.Name()), dockerfileContents, dockerLog, dockerVersion)
+ if finalOfSeveral && compareLayers {
+ saveReport(ctx, t, dockerRef, filepath.Join(dockerDir, t.Name(), ".."), dockerfileContents, dockerLog, dockerVersion)
+ }
+ }
+
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ // build using imagebuilder if we're testing with it, too
+ if compareImagebuilder && !test.withoutImagebuilder {
+ imagebuilderRef, imagebuilderLog = buildUsingImagebuilder(t, client, test, imagebuilderImage, contextDir, dockerfileName, line, finalOfSeveral)
+ if imagebuilderRef != nil {
+ defer func() {
+ err := client.RemoveImageExtended(imagebuilderImage, docker.RemoveImageOptions{
+ Context: ctx,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting newly-built-by-imagebuilder image %q", imagebuilderImage)
+ }()
+ }
+ saveReport(ctx, t, imagebuilderRef, filepath.Join(imagebuilderDir, t.Name()), dockerfileContents, imagebuilderLog, dockerVersion)
+ if finalOfSeveral && compareLayers {
+ saveReport(ctx, t, imagebuilderRef, filepath.Join(imagebuilderDir, t.Name(), ".."), dockerfileContents, imagebuilderLog, dockerVersion)
+ }
+ }
+
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ // always build using buildah
+ buildahRef, buildahLog = buildUsingBuildah(ctx, t, store, test, buildahImage, contextDir, dockerfileName, line, finalOfSeveral)
+ if buildahRef != nil {
+ defer func() {
+ err := buildahRef.DeleteImage(ctx, nil)
+ assert.Nil(t, err, "error deleting newly-built-by-buildah image %q", buildahImage)
+ }()
+ }
+ saveReport(ctx, t, buildahRef, filepath.Join(buildahDir, t.Name()), dockerfileContents, buildahLog, nil)
+ if finalOfSeveral && compareLayers {
+ saveReport(ctx, t, buildahRef, filepath.Join(buildahDir, t.Name(), ".."), dockerfileContents, buildahLog, nil)
+ }
+
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ if test.shouldFailAt != 0 {
+ // the build is expected to fail, so there's no point in comparing information about any images
+ return
+ }
+
+ // the report on the buildah image should always be there
+ _, originalBuildahConfig, ociBuildahConfig, fsBuildah := readReport(t, filepath.Join(buildahDir, t.Name()))
+ if t.Failed() {
+ t.FailNow()
+ }
+ deleteLabel := func(config map[string]interface{}, label string) {
+ for _, configName := range []string{"config", "container_config"} {
+ if configStruct, ok := config[configName]; ok {
+ if configMap, ok := configStruct.(map[string]interface{}); ok {
+ if labels, ok := configMap["Labels"]; ok {
+ if labelMap, ok := labels.(map[string]interface{}); ok {
+ delete(labelMap, label)
+ }
+ }
+ }
+ }
+ }
+ }
+ deleteLabel(originalBuildahConfig, buildah.BuilderIdentityAnnotation)
+ deleteLabel(ociBuildahConfig, buildah.BuilderIdentityAnnotation)
+
+ var originalDockerConfig, ociDockerConfig, fsDocker map[string]interface{}
+
+ // the report on the docker image should be there if we expected the build to succeed
+ if !test.withoutDocker {
+ var mediaType string
+ mediaType, originalDockerConfig, ociDockerConfig, fsDocker = readReport(t, filepath.Join(dockerDir, t.Name()))
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mediaType, "Image built by docker build didn't use Docker MIME type - tests require update")
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ miss, left, diff, same := compareJSON(originalDockerConfig, originalBuildahConfig, originalSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ as committed in Docker format", configCompareResult(miss, left, diff, "buildah"))
+ }
+ miss, left, diff, same = compareJSON(ociDockerConfig, ociBuildahConfig, ociSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ when converted to OCI format", configCompareResult(miss, left, diff, "buildah"))
+ }
+ miss, left, diff, same = compareJSON(fsDocker, fsBuildah, append(fsSkip, test.fsSkip...))
+ if !same {
+ assert.Failf(t, "Filesystem contents differ", fsCompareResult(miss, left, diff, "buildah"))
+ }
+ }
+
+ // the report on the imagebuilder image should be there if we expected the build to succeed
+ if compareImagebuilder && !test.withoutImagebuilder {
+ _, originalDockerConfig, ociDockerConfig, fsDocker = readReport(t, filepath.Join(dockerDir, t.Name()))
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ _, originalImagebuilderConfig, ociImagebuilderConfig, fsImagebuilder := readReport(t, filepath.Join(imagebuilderDir, t.Name()))
+ if t.Failed() {
+ t.FailNow()
+ }
+
+ // compare the reports between docker and imagebuilder
+ miss, left, diff, same := compareJSON(originalDockerConfig, originalImagebuilderConfig, originalSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ as committed in Docker format", configCompareResult(miss, left, diff, "imagebuilder"))
+ }
+ miss, left, diff, same = compareJSON(ociDockerConfig, ociImagebuilderConfig, ociSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ when converted to OCI format", configCompareResult(miss, left, diff, "imagebuilder"))
+ }
+ miss, left, diff, same = compareJSON(fsDocker, fsImagebuilder, append(fsSkip, test.fsSkip...))
+ if !same {
+ assert.Failf(t, "Filesystem contents differ", fsCompareResult(miss, left, diff, "imagebuilder"))
+ }
+ }
+}
+
+func buildUsingBuildah(ctx context.Context, t *testing.T, store storage.Store, test testCase, buildahImage, contextDir, dockerfileName string, line int, finalOfSeveral bool) (buildahRef types.ImageReference, buildahLog []byte) {
+ // buildah tests might be using transient mounts. replace "@@TEMPDIR@@"
+ // in such specifications with the path of the context directory
+ var transientMounts []string
+ for _, mount := range test.transientMounts {
+ transientMounts = append(transientMounts, strings.Replace(mount, "@@TEMPDIR@@", contextDir, 1))
+ }
+ // set up build options
+ output := &bytes.Buffer{}
+ options := define.BuildOptions{
+ ContextDirectory: contextDir,
+ CommonBuildOpts: &define.CommonBuildOptions{},
+ NamespaceOptions: []define.NamespaceOption{{
+ Name: string(rspec.NetworkNamespace),
+ Host: true,
+ }},
+ TransientMounts: transientMounts,
+ Output: buildahImage,
+ OutputFormat: buildah.Dockerv2ImageManifest,
+ Out: output,
+ Err: output,
+ Layers: true,
+ NoCache: true,
+ RemoveIntermediateCtrs: true,
+ ForceRmIntermediateCtrs: true,
+ }
+ // build the image and gather output. log the output if the build part of the test failed
+ imageID, _, err := imagebuildah.BuildDockerfiles(ctx, store, options, dockerfileName)
+ if err != nil {
+ output.WriteString("\n" + err.Error())
+ }
+
+ outputString := output.String()
+ defer func() {
+ if t.Failed() {
+ t.Logf("buildah output:\n%s", outputString)
+ }
+ }()
+
+ buildPost(t, test, err, "buildah", outputString, test.buildahRegex, test.buildahErrRegex, line, finalOfSeveral)
+
+ // return a reference to the new image, if we succeeded
+ if err == nil {
+ buildahRef, err = istorage.Transport.ParseStoreReference(store, imageID)
+ assert.Nil(t, err, "error parsing reference to newly-built image with ID %q", imageID)
+ }
+ return buildahRef, []byte(outputString)
+}
+
+func pullImageIfMissing(t *testing.T, client *docker.Client, image string) {
+ if _, err := client.InspectImage(image); err != nil {
+ repository, tag := docker.ParseRepositoryTag(image)
+ if tag == "" {
+ tag = "latest"
+ }
+ pullOptions := docker.PullImageOptions{
+ Repository: repository,
+ Tag: tag,
+ }
+ pullAuths := docker.AuthConfiguration{}
+ if err := client.PullImage(pullOptions, pullAuths); err != nil {
+ t.Fatalf("while pulling %q: %v", image, err)
+ }
+ }
+}
+
+func buildUsingDocker(ctx context.Context, t *testing.T, client *docker.Client, dockerClient *dockerdockerclient.Client, test testCase, dockerImage, contextDir, dockerfileName string, line int, finalOfSeveral bool) (dockerRef types.ImageReference, dockerLog []byte) {
+ // compute the path of the dockerfile relative to the build context
+ dockerfileRelativePath, err := filepath.Rel(contextDir, dockerfileName)
+ require.NoErrorf(t, err, "unable to compute path of dockerfile %q relative to context directory %q", dockerfileName, contextDir)
+
+ // read the Dockerfile so that we can pull base images
+ dockerfileContent, err := os.ReadFile(dockerfileName)
+ require.NoErrorf(t, err, "reading dockerfile %q", dockerfileName)
+ for _, line := range strings.Split(string(dockerfileContent), "\n") {
+ line = strings.TrimSpace(line)
+ if strings.HasPrefix(line, "# syntax=") {
+ pullImageIfMissing(t, client, strings.TrimPrefix(line, "# syntax="))
+ }
+ }
+ parsed, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfileContent))
+ require.NoErrorf(t, err, "parsing dockerfile %q", dockerfileName)
+ dummyBuilder := imagebuilder.NewBuilder(nil)
+ stages, err := imagebuilder.NewStages(parsed, dummyBuilder)
+ require.NoErrorf(t, err, "breaking dockerfile %q up into stages", dockerfileName)
+ for i := range stages {
+ stageBase, err := dummyBuilder.From(stages[i].Node)
+ require.NoErrorf(t, err, "parsing base image for stage %d in %q", i, dockerfileName)
+ if stageBase == "" || stageBase == imagebuilder.NoBaseImageSpecifier {
+ continue
+ }
+ needToEnsureBase := true
+ for j := 0; j < i; j++ {
+ if stageBase == stages[j].Name {
+ needToEnsureBase = false
+ }
+ }
+ if !needToEnsureBase {
+ continue
+ }
+ pullImageIfMissing(t, client, stageBase)
+ }
+
+ excludes, err := imagebuilder.ParseDockerignore(contextDir)
+ require.NoErrorf(t, err, "parsing ignores file in %q", contextDir)
+ excludes = append(excludes, "!"+dockerfileRelativePath, "!.dockerignore")
+ tarOptions := &archive.TarOptions{
+ ExcludePatterns: excludes,
+ ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
+ }
+ input, err := archive.TarWithOptions(contextDir, tarOptions)
+ require.NoErrorf(t, err, "archiving context directory %q", contextDir)
+ defer input.Close()
+
+ // set up build options
+ output := &bytes.Buffer{}
+ options := docker.BuildImageOptions{
+ Context: ctx,
+ Dockerfile: dockerfileRelativePath,
+ InputStream: input,
+ OutputStream: output,
+ Name: dockerImage,
+ NoCache: true,
+ RmTmpContainer: true,
+ ForceRmTmpContainer: true,
+ }
+ if test.dockerUseBuildKit {
+ options.Version = docker.BuilderBuildKit
+ }
+ // build the image and gather output. log the output if the build part of the test failed
+ err = client.BuildImage(options)
+ if err != nil {
+ output.WriteString("\n" + err.Error())
+ }
+ if _, err := dockerClient.BuildCachePrune(ctx, dockertypes.BuildCachePruneOptions{All: true}); err != nil {
+ t.Logf("docker build cache prune: %v", err)
+ }
+
+ outputString := output.String()
+ defer func() {
+ if t.Failed() {
+ t.Logf("docker build output:\n%s", outputString)
+ }
+ }()
+
+ buildPost(t, test, err, "docker build", outputString, test.dockerRegex, test.dockerErrRegex, line, finalOfSeveral)
+
+ // return a reference to the new image, if we succeeded
+ if err == nil {
+ dockerRef, err = daemon.ParseReference(dockerImage)
+ assert.Nil(t, err, "error parsing reference to newly-built image with name %q", dockerImage)
+ }
+ return dockerRef, []byte(outputString)
+}
+
+func buildUsingImagebuilder(t *testing.T, client *docker.Client, test testCase, imagebuilderImage, contextDir, dockerfileName string, line int, finalOfSeveral bool) (imagebuilderRef types.ImageReference, imagebuilderLog []byte) {
+ // compute the path of the dockerfile relative to the build context
+ dockerfileRelativePath, err := filepath.Rel(contextDir, dockerfileName)
+ require.NoErrorf(t, err, "unable to compute path of dockerfile %q relative to context directory %q", dockerfileName, contextDir)
+ // set up build options
+ output := &bytes.Buffer{}
+ executor := dockerclient.NewClientExecutor(client)
+ executor.Directory = contextDir
+ executor.Tag = imagebuilderImage
+ executor.AllowPull = true
+ executor.Out = output
+ executor.ErrOut = output
+ executor.LogFn = func(format string, args ...interface{}) {
+ fmt.Fprintf(output, "--> %s\n", fmt.Sprintf(format, args...))
+ }
+ // buildah tests might be using transient mounts. replace "@@TEMPDIR@@"
+ // in such specifications with the path of the context directory
+ for _, mount := range test.transientMounts {
+ var src, dest string
+ mountSpec := strings.SplitN(strings.Replace(mount, "@@TEMPDIR@@", contextDir, 1), ":", 2)
+ if len(mountSpec) > 1 {
+ src = mountSpec[0]
+ }
+ dest = mountSpec[len(mountSpec)-1]
+ executor.TransientMounts = append(executor.TransientMounts, dockerclient.Mount{
+ SourcePath: src,
+ DestinationPath: dest,
+ })
+ }
+ // build the image and gather output. log the output if the build part of the test failed
+ builder := imagebuilder.NewBuilder(nil)
+ node, err := imagebuilder.ParseFile(filepath.Join(contextDir, dockerfileRelativePath))
+ if err != nil {
+ assert.Nil(t, err, "error parsing Dockerfile: %v", err)
+ }
+ if _, err = os.Stat(filepath.Join(contextDir, ".dockerignore")); err == nil {
+ if builder.Excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
+ assert.Nil(t, err, "error parsing .dockerignore file: %v", err)
+ }
+ }
+ stages, err := imagebuilder.NewStages(node, builder)
+ if err != nil {
+ assert.Nil(t, err, "error breaking Dockerfile into stages")
+ } else {
+ if finalExecutor, err := executor.Stages(builder, stages, ""); err != nil {
+ output.WriteString("\n" + err.Error())
+ } else {
+ if err = finalExecutor.Commit(stages[len(stages)-1].Builder); err != nil {
+ assert.Nil(t, err, "error committing final stage: %v", err)
+ }
+ }
+ }
+
+ outputString := output.String()
+ defer func() {
+ if t.Failed() {
+ t.Logf("imagebuilder build output:\n%s", outputString)
+ }
+ for err := range executor.Release() {
+ t.Logf("imagebuilder build post-error: %v", err)
+ }
+ }()
+
+ buildPost(t, test, err, "imagebuilder", outputString, test.imagebuilderRegex, test.imagebuilderErrRegex, line, finalOfSeveral)
+
+ // return a reference to the new image, if we succeeded
+ if err == nil {
+ imagebuilderRef, err = daemon.ParseReference(imagebuilderImage)
+ assert.Nil(t, err, "error parsing reference to newly-built image with name %q", imagebuilderImage)
+ }
+ return imagebuilderRef, []byte(outputString)
+}
+
+func buildPost(t *testing.T, test testCase, err error, buildTool, outputString, stdoutRegex, stderrRegex string, line int, finalOfSeveral bool) {
+ // check if the build succeeded or failed, whichever was expected
+ if test.shouldFailAt != 0 && (line == 0 || line >= test.shouldFailAt) {
+ // this is expected to fail, and we're either at/past
+ // the line where it should fail, or we're not going
+ // line-by-line
+ assert.NotNil(t, err, fmt.Sprintf("%s build was expected to fail, but succeeded", buildTool))
+ } else {
+ assert.Nil(t, err, fmt.Sprintf("%s build was expected to succeed, but failed", buildTool))
+ }
+
+ // if the build failed, and we have an error message we expected, check for it
+ if err != nil && test.failureRegex != "" {
+ outputTokens := strings.Join(strings.Fields(err.Error()), " ")
+ assert.Regexpf(t, regexp.MustCompile(test.failureRegex), outputTokens, "build failure did not match %q", test.failureRegex)
+ }
+
+ // if this is the last image we're building for this case, we can scan
+ // the build log for expected messages
+ if finalOfSeveral {
+ outputTokens := strings.Join(strings.Fields(outputString), " ")
+ // check for expected output
+ if stdoutRegex != "" {
+ assert.Regexpf(t, regexp.MustCompile(stdoutRegex), outputTokens, "build output did not match %q", stdoutRegex)
+ }
+ if stderrRegex != "" {
+ assert.Regexpf(t, regexp.MustCompile(stderrRegex), outputTokens, "build error did not match %q", stderrRegex)
+ }
+ }
+}
+
+// FSTree holds the information we have about an image's filesystem
+type FSTree struct {
+ Layers []Layer `json:"layers,omitempty"`
+ Tree FSEntry `json:"tree,omitempty"`
+}
+
+// Layer keeps track of the digests and contents of a layer blob
+type Layer struct {
+ UncompressedDigest digest.Digest `json:"uncompressed-digest,omitempty"`
+ CompressedDigest digest.Digest `json:"compressed-digest,omitempty"`
+ Headers []FSHeader `json:"-,omitempty"`
+}
+
+// FSHeader is the parts of the tar.Header for an entry in a layer blob that
+// are relevant
+type FSHeader struct {
+ Typeflag byte `json:"typeflag,omitempty"`
+ Name string `json:"name,omitempty"`
+ Linkname string `json:"linkname,omitempty"`
+ Size int64 `json:"size"`
+ Mode int64 `json:"mode,omitempty"`
+ UID int `json:"uid"`
+ GID int `json:"gid"`
+ ModTime time.Time `json:"mtime,omitempty"`
+ Devmajor int64 `json:"devmajor,omitempty"`
+ Devminor int64 `json:"devminor,omitempty"`
+ Xattrs map[string]string `json:"xattrs,omitempty"`
+ Digest digest.Digest `json:"digest,omitempty"`
+}
+
+// FSEntry stores one item in a filesystem tree. If it represents a directory,
+// its contents are stored as its children
+type FSEntry struct {
+ FSHeader
+ Children map[string]*FSEntry `json:"(dir),omitempty"`
+}
+
+// fsHeaderForEntry converts a tar header to an FSHeader, in the process
+// discarding some fields which we don't care to compare
+func fsHeaderForEntry(hdr *tar.Header) FSHeader {
+ return FSHeader{
+ Typeflag: hdr.Typeflag,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Size: hdr.Size,
+ Mode: (hdr.Mode & int64(fs.ModePerm)),
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: hdr.ModTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: hdr.Xattrs, // nolint:staticcheck
+ }
+}
+
+// save information about the specified image to the specified directory
+func saveReport(ctx context.Context, t *testing.T, ref types.ImageReference, directory string, dockerfileContents []byte, buildLog []byte, version []string) {
+ imageName := ""
+ // make sure the directory exists
+ err := os.MkdirAll(directory, 0755)
+ require.NoErrorf(t, err, "error ensuring directory %q exists for storing a report", directory)
+ // save the Dockerfile that was used to generate the image
+ err = os.WriteFile(filepath.Join(directory, "Dockerfile"), dockerfileContents, 0644)
+ require.NoErrorf(t, err, "error saving Dockerfile for image %q", imageName)
+ // save the log generated while building the image
+ err = os.WriteFile(filepath.Join(directory, "build.log"), buildLog, 0644)
+ require.NoErrorf(t, err, "error saving build log for image %q", imageName)
+ // save the version information
+ if len(version) > 0 {
+ err = os.WriteFile(filepath.Join(directory, "version"), []byte(strings.Join(version, "\n")+"\n"), 0644)
+ require.NoErrorf(t, err, "error saving builder version information for image %q", imageName)
+ }
+ // open the image for reading
+ if ref == nil {
+ return
+ }
+ imageName = transports.ImageName(ref)
+ src, err := ref.NewImageSource(ctx, nil)
+ require.NoErrorf(t, err, "error opening image %q as source to read its configuration", imageName)
+ closer := io.Closer(src)
+ defer func() {
+ closer.Close()
+ }()
+ img, err := image.FromSource(ctx, nil, src)
+ require.NoErrorf(t, err, "error opening image %q to read its configuration", imageName)
+ closer = img
+ // read the manifest in its original form
+ rawManifest, _, err := src.GetManifest(ctx, nil)
+ require.NoErrorf(t, err, "error reading raw manifest from image %q", imageName)
+ // read the config blob in its original form
+ rawConfig, err := img.ConfigBlob(ctx)
+ require.NoErrorf(t, err, "error reading configuration from image %q", imageName)
+ // read the config blob, converted to OCI format by the image library, and re-encode it
+ ociConfig, err := img.OCIConfig(ctx)
+ require.NoErrorf(t, err, "error reading OCI-format configuration from image %q", imageName)
+ encodedConfig, err := json.Marshal(ociConfig)
+ require.NoErrorf(t, err, "error encoding OCI-format configuration from image %q for saving", imageName)
+ // save the manifest in its original form
+ err = os.WriteFile(filepath.Join(directory, "manifest.json"), rawManifest, 0644)
+ require.NoErrorf(t, err, "error saving original manifest from image %q", imageName)
+ // save the config blob in the OCI format
+ err = os.WriteFile(filepath.Join(directory, "oci-config.json"), encodedConfig, 0644)
+ require.NoErrorf(t, err, "error saving OCI-format configuration from image %q", imageName)
+ // save the config blob in its original format
+ err = os.WriteFile(filepath.Join(directory, "config.json"), rawConfig, 0644)
+ require.NoErrorf(t, err, "error saving original configuration from image %q", imageName)
+ // start pulling layer information
+ layerBlobInfos, err := img.LayerInfosForCopy(ctx)
+ require.NoErrorf(t, err, "error reading blob infos for image %q", imageName)
+ if len(layerBlobInfos) == 0 {
+ layerBlobInfos = img.LayerInfos()
+ }
+ fstree := FSTree{Tree: FSEntry{Children: make(map[string]*FSEntry)}}
+ // grab digest and header information from the layer blob
+ for _, layerBlobInfo := range layerBlobInfos {
+ rc, _, err := src.GetBlob(ctx, layerBlobInfo, nil)
+ require.NoErrorf(t, err, "error reading blob %+v for image %q", layerBlobInfo, imageName)
+ defer rc.Close()
+ layer := summarizeLayer(t, imageName, layerBlobInfo, rc)
+ fstree.Layers = append(fstree.Layers, layer)
+ }
+ // apply the header information from blobs, in the order they're listed
+ // in the config blob, to produce what we think the filesystem tree
+ // would look like
+ for _, diffID := range ociConfig.RootFS.DiffIDs {
+ var layer *Layer
+ for i := range fstree.Layers {
+ if fstree.Layers[i].CompressedDigest == diffID {
+ layer = &fstree.Layers[i]
+ break
+ }
+ if fstree.Layers[i].UncompressedDigest == diffID {
+ layer = &fstree.Layers[i]
+ break
+ }
+ }
+ if layer == nil {
+ require.Failf(t, "missing layer diff", "config for image %q specifies a layer with diffID %q, but we found no layer blob matching that digest", imageName, diffID)
+ }
+ applyLayerToFSTree(t, layer, &fstree.Tree)
+ }
+ // encode the filesystem tree information and save it to a file,
+ // discarding the layer summaries because different tools may choose
+ // between marking a directory as opaque and removing each of its
+ // contents individually, which would produce the same result, so
+ // there's no point in saving them for comparison later
+ encodedFSTree, err := json.Marshal(fstree.Tree)
+ require.NoErrorf(t, err, "error encoding filesystem tree from image %q for saving", imageName)
+ err = os.WriteFile(filepath.Join(directory, "fs.json"), encodedFSTree, 0644)
+ require.NoErrorf(t, err, "error saving filesystem tree from image %q", imageName)
+}
+
+// summarizeLayer reads a blob and returns a summary of the parts of its contents that we care about
+func summarizeLayer(t *testing.T, imageName string, blobInfo types.BlobInfo, reader io.Reader) (layer Layer) {
+ compressedDigest := digest.Canonical.Digester()
+ uncompressedBlob, _, err := compression.AutoDecompress(io.TeeReader(reader, compressedDigest.Hash()))
+ require.NoErrorf(t, err, "error decompressing blob %+v for image %q", blobInfo, imageName)
+ defer uncompressedBlob.Close()
+ uncompressedDigest := digest.Canonical.Digester()
+ tr := tar.NewReader(io.TeeReader(uncompressedBlob, uncompressedDigest.Hash()))
+ hdr, err := tr.Next()
+ for err == nil {
+ header := fsHeaderForEntry(hdr)
+ if hdr.Size != 0 {
+ contentDigest := digest.Canonical.Digester()
+ n, err := io.Copy(contentDigest.Hash(), tr)
+ require.NoErrorf(t, err, "error digesting contents of %q from layer %+v for image %q", hdr.Name, blobInfo, imageName)
+ require.Equal(t, hdr.Size, n, "error reading contents of %q from layer %+v for image %q: wrong size", hdr.Name, blobInfo, imageName)
+ header.Digest = contentDigest.Digest()
+ }
+ layer.Headers = append(layer.Headers, header)
+ hdr, err = tr.Next()
+ }
+ require.Equal(t, io.EOF, err, "unexpected error reading layer contents %+v for image %q", blobInfo, imageName)
+ layer.CompressedDigest = compressedDigest.Digest()
+ require.Equal(t, blobInfo.Digest, layer.CompressedDigest, "calculated digest of compressed blob didn't match expected digest")
+ layer.UncompressedDigest = uncompressedDigest.Digest()
+ return layer
+}
+
+// applyLayerToFSTree updates the in-memory summary of a tree to incorporate
+// changes described in the layer. This is a little naive, in that we don't
+// expect the pathname to include symlinks, which we don't resolve, as
+// components, but tools that currently generate layer diffs don't create
+// those.
+func applyLayerToFSTree(t *testing.T, layer *Layer, root *FSEntry) {
+ for i, entry := range layer.Headers {
+ if entry.Typeflag == tar.TypeLink {
+ // if the entry is a hard link, replace it with the
+ // contents of the hard-linked file
+ replaced := false
+ name := entry.Name
+ for j, otherEntry := range layer.Headers {
+ if j >= i {
+ break
+ }
+ if otherEntry.Name == entry.Linkname {
+ entry = otherEntry
+ entry.Name = name
+ replaced = true
+ break
+ }
+ }
+ if !replaced {
+ require.Fail(t, "layer diff error", "hardlink entry referenced a file that isn't in the layer")
+ }
+ }
+ // parse the name from the entry, and don't get tripped up by a final '/'
+ dirEntry := root
+ components := strings.Split(strings.Trim(entry.Name, string(os.PathSeparator)), string(os.PathSeparator))
+ require.NotEmpty(t, entry.Name, "layer diff error", "entry has no name")
+ require.NotZerof(t, len(components), "entry name %q has no components", entry.Name)
+ require.NotZerof(t, components[0], "entry name %q has no components", entry.Name)
+ // "split" the final part of the path from the rest
+ base := components[len(components)-1]
+ components = components[:len(components)-1]
+ // find the directory that contains this entry
+ for i, component := range components {
+ // this should be a parent directory, so check if it looks like a parent directory
+ if dirEntry.Children == nil {
+ require.Failf(t, "layer diff error", "layer diff %q includes entry for %q, but %q is not a directory", layer.UncompressedDigest, entry.Name, strings.Join(components[:i], string(os.PathSeparator)))
+ }
+ // if the directory is already there, move into it
+ if child, ok := dirEntry.Children[component]; ok {
+ dirEntry = child
+ continue
+ }
+ // if the directory should be there, but we haven't
+ // created it yet, blame the tool that generated this
+ // layer diff
+ require.Failf(t, "layer diff error", "layer diff %q includes entry for %q, but %q doesn't exist", layer.UncompressedDigest, entry.Name, strings.Join(components[:i], string(os.PathSeparator)))
+ }
+ // if the current directory is marked as "opaque", remove all
+ // of its contents
+ if base == ".wh..opq" {
+ dirEntry.Children = make(map[string]*FSEntry)
+ continue
+ }
+ // if the item is a whiteout, strip the "this is a whiteout
+ // entry" prefix and remove the item it names
+ if strings.HasPrefix(base, ".wh.") {
+ delete(dirEntry.Children, strings.TrimPrefix(base, ".wh."))
+ continue
+ }
+ // if the item already exists, make sure we don't get confused
+ // by replacing a directory with a non-directory or vice-versa
+ if child, ok := dirEntry.Children[base]; ok {
+ if child.Children != nil {
+ // it's a directory
+ if entry.Typeflag == tar.TypeDir {
+ // new entry is a directory, too. no
+ // sweat, just update the metadata
+ child.FSHeader = entry
+ continue
+ }
+ // nope, a directory no longer
+ } else {
+ // it's not a directory
+ if entry.Typeflag != tar.TypeDir {
+ // new entry is not a directory, too.
+ // no sweat, just update the metadata
+ dirEntry.Children[base].FSHeader = entry
+ continue
+ }
+ // well, it's a directory now
+ }
+ }
+ // the item doesn't already exist, or it needs to be replaced, so we need to add it
+ var children map[string]*FSEntry
+ if entry.Typeflag == tar.TypeDir {
+ // only directory entries can hold items
+ children = make(map[string]*FSEntry)
+ }
+ dirEntry.Children[base] = &FSEntry{FSHeader: entry, Children: children}
+ }
+}
+
+// read information about the specified image from the specified directory
+func readReport(t *testing.T, directory string) (manifestType string, original, oci, fs map[string]interface{}) {
+ // read the manifest in the as-committed format, whatever that is
+ originalManifest, err := os.ReadFile(filepath.Join(directory, "manifest.json"))
+ require.NoErrorf(t, err, "error reading manifest %q", filepath.Join(directory, "manifest.json"))
+ // dump it into a map
+ manifest := make(map[string]interface{})
+ err = json.Unmarshal(originalManifest, &manifest)
+ require.NoErrorf(t, err, "error decoding manifest %q", filepath.Join(directory, "manifest.json"))
+ if str, ok := manifest["mediaType"].(string); ok {
+ manifestType = str
+ }
+ // read the config in the as-committed (docker) format
+ originalConfig, err := os.ReadFile(filepath.Join(directory, "config.json"))
+ require.NoErrorf(t, err, "error reading configuration file %q", filepath.Join(directory, "config.json"))
+ // dump it into a map
+ original = make(map[string]interface{})
+ err = json.Unmarshal(originalConfig, &original)
+ require.NoErrorf(t, err, "error decoding configuration from file %q", filepath.Join(directory, "config.json"))
+ // read the config in converted-to-OCI format
+ ociConfig, err := os.ReadFile(filepath.Join(directory, "oci-config.json"))
+ require.NoErrorf(t, err, "error reading OCI configuration file %q", filepath.Join(directory, "oci-config.json"))
+ // dump it into a map
+ oci = make(map[string]interface{})
+ err = json.Unmarshal(ociConfig, &oci)
+ require.NoErrorf(t, err, "error decoding OCI configuration from file %q", filepath.Join(directory, "oci.json"))
+ // read the filesystem
+ fsInfo, err := os.ReadFile(filepath.Join(directory, "fs.json"))
+ require.NoErrorf(t, err, "error reading filesystem summary file %q", filepath.Join(directory, "fs.json"))
+ // dump it into a map for comparison
+ fs = make(map[string]interface{})
+ err = json.Unmarshal(fsInfo, &fs)
+ require.NoErrorf(t, err, "error decoding filesystem summary from file %q", filepath.Join(directory, "fs.json"))
+ // return both
+ return manifestType, original, oci, fs
+}
+
+// contains is used to check if item exists in []string or not, ignoring case
+func contains(slice []string, item string) bool {
+ for _, s := range slice {
+ if strings.EqualFold(s, item) {
+ return true
+ }
+ }
+ return false
+}
+
+// addPrefix prepends the given prefix to each string in []string.
+// The prefix and the string are joined with ":"
+func addPrefix(a []string, prefix string) []string {
+ b := make([]string, 0, len(a))
+ for _, s := range a {
+ b = append(b, prefix+":"+s)
+ }
+ return b
+}
+
+// diffDebug returns a row for a tabwriter that summarizes a field name and the
+// values for that field in two documents
+func diffDebug(k string, a, b interface{}) string {
+ if k == "mode" {
+ // force modes to be displayed in octal instead of decimal
+ a, aok := a.(float64)
+ b, bok := b.(float64)
+ if aok && bok {
+ return fmt.Sprintf("%v\t0%o\t0%o\n", k, int64(a), int64(b))
+ }
+ }
+ return fmt.Sprintf("%v\t%v\t%v\n", k, a, b)
+}
+
+// compareJSON compares two parsed JSON structures. missKeys and leftKeys are
+// lists of field names present only in the first map or the second,
+// respectively, while diffKeys is a list of items which are present in both
+// maps, but which have different values, formatted with diffDebug.
+func compareJSON(a, b map[string]interface{}, skip []string) (missKeys, leftKeys, diffKeys []string, isSame bool) {
+ isSame = true
+
+ for k, v := range a {
+ vb, ok := b[k]
+ if ok {
+ // remove this item from b. when we're done, all that's
+ // left in b will be the items that weren't also in a.
+ delete(b, k)
+ }
+ if contains(skip, k) {
+ continue
+ }
+ if !ok {
+ // key is in a, but not in b
+ missKeys = append(missKeys, k)
+ isSame = false
+ continue
+ }
+ if reflect.TypeOf(v) != reflect.TypeOf(vb) {
+ if reflect.TypeOf(v) == nil && reflect.ValueOf(vb).Len() == 0 {
+ continue
+ }
+ if reflect.TypeOf(vb) == nil && reflect.ValueOf(v).Len() == 0 {
+ continue
+ }
+ diffKeys = append(diffKeys, diffDebug(k, v, vb))
+ isSame = false
+ continue
+ }
+ switch v.(type) {
+ case map[string]interface{}:
+ // this field in the object is itself an object (e.g.
+ // "config" or "container_config"), so recursively
+ // compare them
+ var nextSkip []string
+ prefix := k + ":"
+ for _, s := range skip {
+ if strings.HasPrefix(s, prefix) {
+ nextSkip = append(nextSkip, strings.TrimPrefix(s, prefix))
+ }
+ }
+ submiss, subleft, subdiff, ok := compareJSON(v.(map[string]interface{}), vb.(map[string]interface{}), nextSkip)
+ missKeys = append(missKeys, addPrefix(submiss, k)...)
+ leftKeys = append(leftKeys, addPrefix(subleft, k)...)
+ diffKeys = append(diffKeys, addPrefix(subdiff, k)...)
+ if !ok {
+ isSame = false
+ }
+ case []interface{}:
+ // this field in the object is an array; make sure both
+ // arrays have the same set of elements, which is more
+ // or less correct for labels and environment
+ // variables.
+ // this will break if it tries to compare an array of
+ // objects like "history", since maps, slices, and
+ // functions can't be used as keys in maps
+ tmpa := v.([]interface{})
+ tmpb := vb.([]interface{})
+ if len(tmpa) != len(tmpb) {
+ diffKeys = append(diffKeys, diffDebug(k, v, vb))
+ isSame = false
+ break
+ }
+ m := make(map[interface{}]struct{})
+ for i := 0; i < len(tmpb); i++ {
+ m[tmpb[i]] = struct{}{}
+ }
+ for i := 0; i < len(tmpa); i++ {
+ if _, ok := m[tmpa[i]]; !ok {
+ diffKeys = append(diffKeys, diffDebug(k, v, vb))
+ isSame = false
+ break
+ }
+ }
+ default:
+ // this field in the object is neither an object nor an
+ // array, so assume it's a scalar item
+ if !reflect.DeepEqual(v, vb) {
+ diffKeys = append(diffKeys, diffDebug(k, v, vb))
+ isSame = false
+ }
+ }
+ }
+
+ if len(b) > 0 {
+ for k := range b {
+ if !contains(skip, k) {
+ leftKeys = append(leftKeys, k)
+ }
+ }
+ }
+
+ replace := func(slice []string) []string {
+ return append([]string{}, slice...)
+ }
+
+ return replace(missKeys), replace(leftKeys), replace(diffKeys), isSame
+}
+
+// configCompareResult summarizes the output of compareJSON for display
+func configCompareResult(miss, left, diff []string, notDocker string) string {
+ var buffer bytes.Buffer
+ if len(miss) > 0 {
+ buffer.WriteString(fmt.Sprintf("Fields missing from %s version: %s\n", notDocker, strings.Join(miss, " ")))
+ }
+ if len(left) > 0 {
+ buffer.WriteString(fmt.Sprintf("Fields which only exist in %s version: %s\n", notDocker, strings.Join(left, " ")))
+ }
+ if len(diff) > 0 {
+ buffer.WriteString("Fields present in both versions have different values:\n")
+ tw := tabwriter.NewWriter(&buffer, 1, 1, 8, ' ', 0)
+ if _, err := tw.Write([]byte(fmt.Sprintf("Field\tDocker\t%s\n", notDocker))); err != nil {
+ panic(err)
+ }
+ for _, d := range diff {
+ if _, err := tw.Write([]byte(d)); err != nil {
+ panic(err)
+ }
+ }
+ tw.Flush()
+ }
+ return buffer.String()
+}
+
+// fsCompareResult summarizes the output of compareJSON for display
+func fsCompareResult(miss, left, diff []string, notDocker string) string {
+ var buffer bytes.Buffer
+ fixup := func(names []string) []string {
+ n := make([]string, 0, len(names))
+ for _, name := range names {
+ n = append(n, strings.ReplaceAll(strings.ReplaceAll(name, ":(dir):", "/"), "(dir):", "/"))
+ }
+ return n
+ }
+ if len(miss) > 0 {
+ buffer.WriteString(fmt.Sprintf("Content missing from %s version: %s\n", notDocker, strings.Join(fixup(miss), " ")))
+ }
+ if len(left) > 0 {
+ buffer.WriteString(fmt.Sprintf("Content which only exists in %s version: %s\n", notDocker, strings.Join(fixup(left), " ")))
+ }
+ if len(diff) > 0 {
+ buffer.WriteString("File attributes in both versions have different values:\n")
+ tw := tabwriter.NewWriter(&buffer, 1, 1, 8, ' ', 0)
+ if _, err := tw.Write([]byte(fmt.Sprintf("File:attr\tDocker\t%s\n", notDocker))); err != nil {
+ panic(err)
+ }
+ for _, d := range fixup(diff) {
+ if _, err := tw.Write([]byte(d)); err != nil {
+ panic(err)
+ }
+ }
+ tw.Flush()
+ }
+ return buffer.String()
+}
+
+type testCaseTweakContextDirFn func(*testing.T, string, string, string) error
+type testCase struct {
+ name string // name of the test
+ dockerfileContents string // inlined Dockerfile content to use instead of possible file in the build context
+ dockerfile string // name of the Dockerfile, relative to contextDir, if not Dockerfile
+ contextDir string // name of context subdirectory, if there is one to be copied
+ tweakContextDir testCaseTweakContextDirFn // callback to make updates to the temporary build context before we build it
+ shouldFailAt int // line where a build is expected to fail (starts with 1, 0 if it should succeed
+ buildahRegex string // if set, expect this to be present in output
+ dockerRegex string // if set, expect this to be present in output
+ imagebuilderRegex string // if set, expect this to be present in output
+ buildahErrRegex string // if set, expect this to be present in output
+ dockerErrRegex string // if set, expect this to be present in output
+ imagebuilderErrRegex string // if set, expect this to be present in output
+ failureRegex string // if set, expect this to be present in output when the build fails
+ withoutDocker bool // don't build this with docker, because it depends on a buildah-specific feature
+ dockerUseBuildKit bool // if building with docker, request that dockerd use buildkit
+ withoutImagebuilder bool // don't build this with imagebuilder, because it depends on a buildah-specific feature
+ transientMounts []string // one possible buildah-specific feature
+ fsSkip []string // expected filesystem differences, typically timestamps on files or directories we create or modify during the build and don't reset
+}
+
+var internalTestCases = []testCase{
+ {
+ name: "shell test",
+ dockerfile: "Dockerfile.shell",
+ buildahRegex: "(?s)[0-9a-z]+(.*)--",
+ dockerRegex: "(?s)RUN env.*?Running in [0-9a-z]+(.*?)---",
+ },
+
+ {
+ name: "copy file to root",
+ dockerfile: "Dockerfile.copyfrom_1",
+ buildahRegex: "[-rw]+.*?/a",
+ fsSkip: []string{"(dir):a:mtime"},
+ },
+
+ {
+ name: "copy file to same file",
+ dockerfile: "Dockerfile.copyfrom_2",
+ buildahRegex: "[-rw]+.*?/a",
+ fsSkip: []string{"(dir):a:mtime"},
+ },
+
+ {
+ name: "copy file to workdir",
+ dockerfile: "Dockerfile.copyfrom_3",
+ buildahRegex: "[-rw]+.*?/b/a",
+ fsSkip: []string{"(dir):b:mtime", "(dir):b:(dir):a:mtime"},
+ },
+
+ {
+ name: "copy file to workdir rename",
+ dockerfile: "Dockerfile.copyfrom_3_1",
+ buildahRegex: "[-rw]+.*?/b/b",
+ fsSkip: []string{"(dir):b:mtime", "(dir):b:(dir):a:mtime"},
+ },
+
+ {
+ name: "copy folder contents to higher level",
+ dockerfile: "Dockerfile.copyfrom_4",
+ buildahRegex: "(?s)[-rw]+.*?/b/1.*?[-rw]+.*?/b/2.*?/b.*?[-rw]+.*?1.*?[-rw]+.*?2",
+ buildahErrRegex: "/a: No such file or directory",
+ fsSkip: []string{"(dir):b:mtime"},
+ },
+
+ {
+ name: "copy wildcard folder contents to higher level",
+ dockerfile: "Dockerfile.copyfrom_5",
+ buildahRegex: "(?s)[-rw]+.*?/b/1.*?[-rw]+.*?/b/2.*?/b.*?[-rw]+.*?1.*?[-rw]+.*?2",
+ buildahErrRegex: "(?s)/a: No such file or directory.*?/b/a: No such file or directory.*?/b/b: No such file or director",
+ fsSkip: []string{"(dir):b:mtime", "(dir):b:(dir):1:mtime", "(dir):b:(dir):2:mtime"},
+ },
+
+ {
+ name: "copy folder with dot contents to higher level",
+ dockerfile: "Dockerfile.copyfrom_6",
+ buildahRegex: "(?s)[-rw]+.*?/b/1.*?[-rw]+.*?/b/2.*?/b.*?[-rw]+.*?1.*?[-rw]+.*?2",
+ buildahErrRegex: "(?s)/a: No such file or directory.*?/b/a: No such file or directory.*?/b/b: No such file or director",
+ fsSkip: []string{"(dir):b:mtime", "(dir):b:(dir):1:mtime", "(dir):b:(dir):2:mtime"},
+ },
+
+ {
+ name: "copy root file to different root name",
+ dockerfile: "Dockerfile.copyfrom_7",
+ buildahRegex: "[-rw]+.*?/a",
+ buildahErrRegex: "/b: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime"},
+ },
+
+ {
+ name: "copy nested file to different root name",
+ dockerfile: "Dockerfile.copyfrom_8",
+ buildahRegex: "[-rw]+.*?/a",
+ buildahErrRegex: "/b: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime"},
+ },
+
+ {
+ name: "copy file to deeper directory with explicit slash",
+ dockerfile: "Dockerfile.copyfrom_9",
+ buildahRegex: "[-rw]+.*?/a/b/c/1",
+ buildahErrRegex: "/a/b/1: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime", "(dir):a:(dir):b:mtime", "(dir):a:(dir):b:(dir):c:mtime", "(dir):a:(dir):b:(dir):c:(dir):1:mtime"},
+ },
+
+ {
+ name: "copy file to deeper directory without explicit slash",
+ dockerfile: "Dockerfile.copyfrom_10",
+ buildahRegex: "[-rw]+.*?/a/b/c",
+ buildahErrRegex: "/a/b/1: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime", "(dir):a:(dir):b:mtime", "(dir):a:(dir):b:(dir):c:mtime"},
+ },
+
+ {
+ name: "copy directory to deeper directory without explicit slash",
+ dockerfile: "Dockerfile.copyfrom_11",
+ buildahRegex: "[-rw]+.*?/a/b/c/1",
+ buildahErrRegex: "/a/b/1: No such file or directory",
+ fsSkip: []string{
+ "(dir):a:mtime", "(dir):a:(dir):b:mtime", "(dir):a:(dir):b:(dir):c:mtime",
+ "(dir):a:(dir):b:(dir):c:(dir):1:mtime",
+ },
+ },
+
+ {
+ name: "copy directory to root without explicit slash",
+ dockerfile: "Dockerfile.copyfrom_12",
+ buildahRegex: "[-rw]+.*?/a/1",
+ buildahErrRegex: "/a/a: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime", "(dir):a:(dir):1:mtime"},
+ },
+
+ {
+ name: "copy directory trailing to root without explicit slash",
+ dockerfile: "Dockerfile.copyfrom_13",
+ buildahRegex: "[-rw]+.*?/a/1",
+ buildahErrRegex: "/a/a: No such file or directory",
+ fsSkip: []string{"(dir):a:mtime", "(dir):a:(dir):1:mtime"},
+ },
+
+ {
+ name: "multi stage base",
+ dockerfile: "Dockerfile.reusebase",
+ buildahRegex: "[0-9a-z]+ /1",
+ fsSkip: []string{"(dir):1:mtime"},
+ },
+
+ {
+ name: "directory",
+ contextDir: "dir",
+ fsSkip: []string{"(dir):dir:mtime", "(dir):test:mtime"},
+ },
+
+ {
+ name: "copy to dir",
+ contextDir: "copy",
+ fsSkip: []string{"(dir):usr:(dir):bin:mtime"},
+ },
+
+ {
+ name: "copy dir",
+ contextDir: "copydir",
+ fsSkip: []string{"(dir):dir"},
+ },
+
+ {
+ name: "copy from symlink source",
+ contextDir: "copysymlink",
+ },
+
+ {
+ name: "copy-symlink-2",
+ contextDir: "copysymlink",
+ dockerfile: "Dockerfile2",
+ },
+
+ {
+ name: "copy from subdir to new directory",
+ contextDir: "copydir",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY dir/file /subdir/",
+ }, "\n"),
+ fsSkip: []string{"(dir):subdir"},
+ },
+
+ {
+ name: "copy to renamed file",
+ contextDir: "copyrename",
+ fsSkip: []string{"(dir):usr:(dir):bin:mtime"},
+ },
+
+ {
+ name: "copy with --chown",
+ contextDir: "copychown",
+ fsSkip: []string{"(dir):usr:(dir):bin:mtime", "(dir):usr:(dir):local:(dir):bin:mtime"},
+ },
+
+ {
+ name: "directory with slash",
+ contextDir: "overlapdirwithslash",
+ },
+
+ {
+ name: "directory without slash",
+ contextDir: "overlapdirwithoutslash",
+ },
+
+ {
+ name: "environment",
+ dockerfile: "Dockerfile.env",
+ shouldFailAt: 12,
+ },
+
+ {
+ name: "edgecases",
+ dockerfile: "Dockerfile.edgecases",
+ fsSkip: []string{
+ "(dir):test:mtime", "(dir):test:(dir):copy:mtime", "(dir):test2:mtime", "(dir):test3:mtime",
+ "(dir):test3:(dir):copy:mtime",
+ "(dir):test3:(dir):test:mtime", "(dir):tmp:mtime", "(dir):tmp:(dir):passwd:mtime",
+ },
+ },
+
+ {
+ name: "exposed default",
+ dockerfile: "Dockerfile.exposedefault",
+ },
+
+ {
+ name: "add",
+ dockerfile: "Dockerfile.add",
+ fsSkip: []string{"(dir):b:mtime", "(dir):tmp:mtime"},
+ },
+
+ {
+ name: "run with JSON",
+ dockerfile: "Dockerfile.run.args",
+ buildahRegex: "(first|third|fifth|inner) (second|fourth|sixth|outer)",
+ dockerRegex: "Running in [0-9a-z]+.*?(first|third|fifth|inner) (second|fourth|sixth|outer)",
+ },
+
+ {
+ name: "wildcard",
+ contextDir: "wildcard",
+ fsSkip: []string{"(dir):usr:mtime", "(dir):usr:(dir):test:mtime"},
+ },
+
+ {
+ name: "volume",
+ contextDir: "volume",
+ fsSkip: []string{"(dir):var:mtime", "(dir):var:(dir):www:mtime"},
+ },
+
+ {
+ name: "volumerun",
+ contextDir: "volumerun",
+ fsSkip: []string{"(dir):var:mtime", "(dir):var:(dir):www:mtime"},
+ },
+
+ {
+ name: "mount",
+ contextDir: "mount",
+ buildahRegex: "/tmp/test/file.*?regular file.*?/tmp/test/file2.*?regular file",
+ withoutDocker: true,
+ transientMounts: []string{"@@TEMPDIR@@:/tmp/test" + selinuxMountFlag()},
+ },
+
+ {
+ name: "transient-mount",
+ contextDir: "transientmount",
+ buildahRegex: "file2.*?FROM busybox ENV name value",
+ withoutDocker: true,
+ transientMounts: []string{
+ "@@TEMPDIR@@:/mountdir" + selinuxMountFlag(),
+ "@@TEMPDIR@@/Dockerfile.env:/mountfile" + selinuxMountFlag(),
+ },
+ },
+
+ {
+ // from internal team chat
+ name: "ci-pipeline-modified",
+ dockerfileContents: strings.Join([]string{
+ "FROM busybox",
+ "WORKDIR /go/src/github.com/openshift/ocp-release-operator-sdk/",
+ "ENV GOPATH=/go",
+ "RUN env | grep -E -v '^(HOSTNAME|OLDPWD)=' | LANG=C sort | tee /env-contents.txt\n",
+ }, "\n"),
+ fsSkip: []string{
+ "(dir):go:mtime",
+ "(dir):go:(dir):src:mtime",
+ "(dir):go:(dir):src:(dir):github.com:mtime",
+ "(dir):go:(dir):src:(dir):github.com:(dir):openshift:mtime",
+ "(dir):go:(dir):src:(dir):github.com:(dir):openshift:(dir):ocp-release-operator-sdk:mtime",
+ "(dir):env-contents.txt:mtime",
+ },
+ },
+
+ {
+ name: "add-permissions",
+ withoutDocker: true,
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "# Does ADD preserve permissions differently for archives and files?",
+ "ADD archive.tar subdir1/",
+ "ADD archive/ subdir2/",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ content := []byte("test content")
+
+ if err := os.Mkdir(filepath.Join(contextDir, "archive"), 0755); err != nil {
+ return fmt.Errorf("creating subdirectory of temporary context directory: %w", err)
+ }
+ filename := filepath.Join(contextDir, "archive", "should-be-owned-by-root")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 0, 0); err != nil {
+ return fmt.Errorf("setting ownership on file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by root file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "archive", "should-be-owned-by-99")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 99, 99); err != nil {
+ return fmt.Errorf("setting ownership on file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by 99 in temporary context directory: %w", err)
+ }
+
+ filename = filepath.Join(contextDir, "archive.tar")
+ f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return fmt.Errorf("creating archive file: %w", err)
+ }
+ defer f.Close()
+ tw := tar.NewWriter(f)
+ defer tw.Close()
+ err = tw.WriteHeader(&tar.Header{
+ Name: "archive/should-be-owned-by-root",
+ Typeflag: tar.TypeReg,
+ Size: int64(len(content)),
+ ModTime: testDate,
+ Mode: 0640,
+ Uid: 0,
+ Gid: 0,
+ })
+ if err != nil {
+ return fmt.Errorf("writing archive file header: %w", err)
+ }
+ n, err := tw.Write(content)
+ if err != nil {
+ return fmt.Errorf("writing archive file contents: %w", err)
+ }
+ if n != len(content) {
+ return errors.New("short write writing archive file contents")
+ }
+ err = tw.WriteHeader(&tar.Header{
+ Name: "archive/should-be-owned-by-99",
+ Typeflag: tar.TypeReg,
+ Size: int64(len(content)),
+ ModTime: testDate,
+ Mode: 0640,
+ Uid: 99,
+ Gid: 99,
+ })
+ if err != nil {
+ return fmt.Errorf("writing archive file header: %w", err)
+ }
+ n, err = tw.Write(content)
+ if err != nil {
+ return fmt.Errorf("writing archive file contents: %w", err)
+ }
+ if n != len(content) {
+ return errors.New("short write writing archive file contents")
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir1:mtime", "(dir):subdir2:mtime"},
+ },
+
+ {
+ name: "copy-permissions",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "# Does COPY --chown change permissions on already-present directories?",
+ "COPY subdir/ subdir/",
+ "COPY --chown=99:99 subdir/ subdir/",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ content := []byte("test content")
+
+ if err := os.Mkdir(filepath.Join(contextDir, "subdir"), 0755); err != nil {
+ return fmt.Errorf("creating subdirectory of temporary context directory: %w", err)
+ }
+ filename := filepath.Join(contextDir, "subdir", "would-be-owned-by-root")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 0, 0); err != nil {
+ return fmt.Errorf("setting ownership on file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by root file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "subdir", "would-be-owned-by-99")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 99, 99); err != nil {
+ return fmt.Errorf("setting ownership on file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by 99 in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "copy-permissions-implicit",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "# Does COPY --chown change permissions on already-present directories?",
+ "COPY --chown=99:99 subdir/ subdir/",
+ "COPY subdir/ subdir/",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ content := []byte("test content")
+
+ if err := os.Mkdir(filepath.Join(contextDir, "subdir"), 0755); err != nil {
+ return fmt.Errorf("creating subdirectory of temporary context directory: %w", err)
+ }
+ filename := filepath.Join(contextDir, "subdir", "would-be-owned-by-root")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 0, 0); err != nil {
+ return fmt.Errorf("setting ownership on file owned by root in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by root file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "subdir", "would-be-owned-by-99")
+ if err = os.WriteFile(filename, content, 0640); err != nil {
+ return fmt.Errorf("creating file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chown(filename, 99, 99); err != nil {
+ return fmt.Errorf("setting ownership on file owned by 99 in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on file owned by 99 in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ // the digest just ensures that we can handle a digest
+ // reference to a manifest list; the digest of any manifest
+ // list in the image repository would do
+ name: "stage-container-as-source-plus-hardlinks",
+ dockerfileContents: strings.Join([]string{
+ "FROM busybox@sha256:9ddee63a712cea977267342e8750ecbc60d3aab25f04ceacfa795e6fce341793 AS build",
+ "RUN mkdir -p /target/subdir",
+ "RUN cp -p /etc/passwd /target/",
+ "RUN ln /target/passwd /target/subdir/passwd",
+ "RUN ln /target/subdir/passwd /target/subdir/passwd2",
+ "FROM scratch",
+ "COPY --from=build /target/subdir /subdir",
+ }, "\n"),
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerfile-in-subdirectory",
+ dockerfile: "subdir/Dockerfile",
+ contextDir: "subdir",
+ },
+
+ {
+ name: "setuid-file-in-context",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ fmt.Sprintf("# Does the setuid file (0%o) in the context dir end up setuid in the image?", syscall.S_ISUID),
+ "COPY . subdir1",
+ "ADD . subdir2",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ filename := filepath.Join(contextDir, "should-be-setuid-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating setuid test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, syscall.S_ISUID|0755); err != nil {
+ return fmt.Errorf("setting setuid bit on test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on setuid test file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "should-be-setgid-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating setgid test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, syscall.S_ISGID|0755); err != nil {
+ return fmt.Errorf("setting setgid bit on test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on setgid test file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "should-be-sticky-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating sticky test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, syscall.S_ISVTX|0755); err != nil {
+ return fmt.Errorf("setting permissions on sticky test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on sticky test file in temporary context directory: %w", err)
+ }
+ filename = filepath.Join(contextDir, "should-not-be-setuid-setgid-sticky-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating non-suid non-sgid non-sticky test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, 0640); err != nil {
+ return fmt.Errorf("setting permissions on plain test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on plain test file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir1:mtime", "(dir):subdir2:mtime"},
+ },
+
+ {
+ name: "xattrs-file-in-context",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "# Do the xattrs on a file in the context dir end up in the image?",
+ "COPY . subdir1",
+ "ADD . subdir2",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ if !*contextCanDoXattrs {
+ t.Skipf("test context directory %q doesn't support xattrs", contextDir)
+ }
+ if !*storageCanDoXattrs {
+ t.Skipf("test storage driver %q and directory %q don't support xattrs together", storageDriver, storageRoot)
+ }
+
+ filename := filepath.Join(contextDir, "xattrs-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating test file with xattrs in temporary context directory: %w", err)
+ }
+ if err = copier.Lsetxattrs(filename, map[string]string{"user.a": "test"}); err != nil {
+ return fmt.Errorf("setting xattrs on test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, 0640); err != nil {
+ return fmt.Errorf("setting permissions on test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on test file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir1:mtime", "(dir):subdir2:mtime"},
+ },
+
+ {
+ name: "setuid-file-in-archive",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ fmt.Sprintf("# Do the setuid/setgid/sticky files in this archive end up setuid(0%o)/setgid(0%o)/sticky(0%o)?", syscall.S_ISUID, syscall.S_ISGID, syscall.S_ISVTX),
+ "ADD archive.tar .",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ filename := filepath.Join(contextDir, "archive.tar")
+ f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return fmt.Errorf("creating new archive file in temporary context directory: %w", err)
+ }
+ defer f.Close()
+ tw := tar.NewWriter(f)
+ defer tw.Close()
+ hdr := tar.Header{
+ Name: "setuid-file",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeReg,
+ Size: 8,
+ Mode: cISUID | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("writing tar archive header: %w", err)
+ }
+ if _, err = io.Copy(tw, bytes.NewReader([]byte("whatever"))); err != nil {
+ return fmt.Errorf("writing tar archive content: %w", err)
+ }
+ hdr = tar.Header{
+ Name: "setgid-file",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeReg,
+ Size: 8,
+ Mode: cISGID | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("writing tar archive header: %w", err)
+ }
+ if _, err = io.Copy(tw, bytes.NewReader([]byte("whatever"))); err != nil {
+ return fmt.Errorf("writing tar archive content: %w", err)
+ }
+ hdr = tar.Header{
+ Name: "sticky-file",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeReg,
+ Size: 8,
+ Mode: cISVTX | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("writing tar archive header: %w", err)
+ }
+ if _, err = io.Copy(tw, bytes.NewReader([]byte("whatever"))); err != nil {
+ return fmt.Errorf("writing tar archive content: %w", err)
+ }
+ hdr = tar.Header{
+ Name: "setuid-dir",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeDir,
+ Size: 0,
+ Mode: cISUID | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("error writing tar archive header: %w", err)
+ }
+ hdr = tar.Header{
+ Name: "setgid-dir",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeDir,
+ Size: 0,
+ Mode: cISGID | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("error writing tar archive header: %w", err)
+ }
+ hdr = tar.Header{
+ Name: "sticky-dir",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeDir,
+ Size: 0,
+ Mode: cISVTX | 0755,
+ ModTime: testDate,
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("error writing tar archive header: %w", err)
+ }
+ return nil
+ },
+ },
+
+ {
+ name: "xattrs-file-in-archive",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "# Do the xattrs on a file in an archive end up in the image?",
+ "ADD archive.tar .",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ if !*contextCanDoXattrs {
+ t.Skipf("test context directory %q doesn't support xattrs", contextDir)
+ }
+ if !*storageCanDoXattrs {
+ t.Skipf("test storage driver %q and directory %q don't support xattrs together", storageDriver, storageRoot)
+ }
+
+ filename := filepath.Join(contextDir, "archive.tar")
+ f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return fmt.Errorf("creating new archive file in temporary context directory: %w", err)
+ }
+ defer f.Close()
+ tw := tar.NewWriter(f)
+ defer tw.Close()
+ hdr := tar.Header{
+ Name: "xattr-file",
+ Uid: 0,
+ Gid: 0,
+ Typeflag: tar.TypeReg,
+ Size: 8,
+ Mode: 0640,
+ ModTime: testDate,
+ Xattrs: map[string]string{"user.a": "test"},
+ }
+ if err = tw.WriteHeader(&hdr); err != nil {
+ return fmt.Errorf("writing tar archive header: %w", err)
+ }
+ if _, err = io.Copy(tw, bytes.NewReader([]byte("whatever"))); err != nil {
+ return fmt.Errorf("writing tar archive content: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir1:mtime", "(dir):subdir2:mtime"},
+ },
+
+ {
+ // docker build apparently stopped preserving this bit somewhere between 18.09.7 and 19.03,
+ // possibly around https://github.com/moby/moby/pull/38599
+ name: "setuid-file-in-other-stage",
+ dockerfileContents: strings.Join([]string{
+ "FROM busybox",
+ "RUN mkdir /a && echo whatever > /a/setuid && chmod u+xs /a/setuid && touch -t @1485449953 /a/setuid",
+ "RUN mkdir /b && echo whatever > /b/setgid && chmod g+xs /b/setgid && touch -t @1485449953 /b/setgid",
+ "RUN mkdir /c && echo whatever > /c/sticky && chmod o+x /c/sticky && chmod +t /c/sticky && touch -t @1485449953 /c/sticky",
+ "FROM scratch",
+ fmt.Sprintf("# Does this setuid/setgid/sticky file copied from another stage end up setuid/setgid/sticky (0%o/0%o/0%o)?", syscall.S_ISUID, syscall.S_ISGID, syscall.S_ISVTX),
+ "COPY --from=0 /a/setuid /b/setgid /c/sticky /",
+ }, "\n"),
+ },
+
+ {
+ name: "xattrs-file-in-other-stage",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . .",
+ "FROM scratch",
+ "# Do the xattrs on a file in another stage end up in the image?",
+ "COPY --from=0 /xattrs-file /",
+ }, "\n"),
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ if !*contextCanDoXattrs {
+ t.Skipf("test context directory %q doesn't support xattrs", contextDir)
+ }
+ if !*storageCanDoXattrs {
+ t.Skipf("test storage driver %q and directory %q don't support xattrs together", storageDriver, storageRoot)
+ }
+
+ filename := filepath.Join(contextDir, "xattrs-file")
+ if err = os.WriteFile(filename, []byte("test content"), 0644); err != nil {
+ return fmt.Errorf("creating test file with xattrs in temporary context directory: %w", err)
+ }
+ if err = copier.Lsetxattrs(filename, map[string]string{"user.a": "test"}); err != nil {
+ return fmt.Errorf("setting xattrs on test file in temporary context directory: %w", err)
+ }
+ if err = syscall.Chmod(filename, 0640); err != nil {
+ return fmt.Errorf("setting permissions on test file in temporary context directory: %w", err)
+ }
+ if err = os.Chtimes(filename, testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on test file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ },
+
+ {
+ name: "copy-multiple-some-missing",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY file-a.txt subdir-a file-z.txt subdir-z subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ shouldFailAt: 2,
+ },
+
+ {
+ name: "copy-multiple-missing-file-with-glob",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY file-z.txt subdir-* subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ shouldFailAt: 2,
+ },
+
+ {
+ name: "copy-multiple-missing-file-with-nomatch-on-glob",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY missing* subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ shouldFailAt: 2,
+ },
+
+ {
+ name: "copy-multiple-some-missing-glob",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY file-a.txt subdir-* file-?.txt missing* subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "file-in-workdir-in-other-stage",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch AS base",
+ "COPY . /subdir/",
+ "WORKDIR /subdir",
+ "FROM base",
+ "COPY --from=base . .", // --from=otherstage ignores that stage's WORKDIR
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ fsSkip: []string{"(dir):subdir:mtime", "(dir):subdir:(dir):subdir:mtime"},
+ },
+
+ {
+ name: "copy-integration1",
+ contextDir: "dockerignore/integration1",
+ shouldFailAt: 3,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "copy-integration2",
+ contextDir: "dockerignore/integration2",
+ },
+
+ {
+ name: "copy-integration3",
+ contextDir: "dockerignore/integration3",
+ shouldFailAt: 4,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "copy-empty-1",
+ contextDir: "copyempty",
+ dockerfile: "Dockerfile",
+ fsSkip: []string{"(dir):usr:(dir):local:mtime", "(dir):usr:(dir):local:(dir):tmp:mtime"},
+ },
+
+ {
+ name: "copy-empty-2",
+ contextDir: "copyempty",
+ dockerfile: "Dockerfile2",
+ fsSkip: []string{"(dir):usr:(dir):local:mtime", "(dir):usr:(dir):local:(dir):tmp:mtime"},
+ },
+
+ {
+ name: "copy-absolute-directory-1",
+ contextDir: "copyblahblub",
+ dockerfile: "Dockerfile",
+ fsSkip: []string{"(dir):var:mtime"},
+ },
+
+ {
+ name: "copy-absolute-directory-2",
+ contextDir: "copyblahblub",
+ dockerfile: "Dockerfile2",
+ fsSkip: []string{"(dir):var:mtime"},
+ },
+
+ {
+ name: "copy-absolute-directory-3",
+ contextDir: "copyblahblub",
+ dockerfile: "Dockerfile3",
+ fsSkip: []string{"(dir):var:mtime"},
+ },
+
+ {
+ name: "multi-stage-through-base",
+ dockerfileContents: strings.Join([]string{
+ "FROM alpine AS base",
+ "RUN touch -t @1485449953 /1",
+ "ENV LOCAL=/1",
+ "RUN find $LOCAL",
+ "FROM base",
+ "RUN find $LOCAL",
+ }, "\n"),
+ fsSkip: []string{"(dir):root:mtime", "(dir):1:mtime"},
+ },
+
+ {
+ name: "multi-stage-derived", // from #2415
+ dockerfileContents: strings.Join([]string{
+ "FROM busybox as layer",
+ "RUN touch /root/layer",
+ "FROM layer as derived",
+ "RUN touch -t @1485449953 /root/derived ; rm /root/layer",
+ "FROM busybox AS output",
+ "COPY --from=layer /root /root",
+ }, "\n"),
+ fsSkip: []string{"(dir):root:mtime", "(dir):root:(dir):layer:mtime"},
+ },
+
+ {
+ name: "dockerignore-minimal-test", // from #2237
+ contextDir: "dockerignore/minimal_test",
+ withoutDocker: true,
+ fsSkip: []string{"(dir):tmp:mtime", "(dir):tmp:(dir):stuff:mtime"},
+ },
+
+ {
+ name: "dockerignore-is-even-there",
+ contextDir: "dockerignore/empty",
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-irrelevant",
+ contextDir: "dockerignore/empty",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/*-a", "!**/*-c"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0600); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exceptions-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/*-a", "!**/*-c"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0644); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-extensions-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/*-a", "!**/*-c"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0600); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-includes-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"!**/*-c"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0640); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-includes-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("!**/*-c\n")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0100); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-plain-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("subdir-c")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0200); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-plain-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("subdir-c")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0400); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-plain-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-c")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0200); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-plain-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-c")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0400); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-wildcard-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("subdir-*")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0000); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-wildcard-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("subdir-*")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0660); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-wildcard-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-*")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0000); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-wildcard-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-*")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0660); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-subdir-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-f")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0666); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-subdir-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-f")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0640); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-not-so-deep-subdir-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-b")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0705); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-not-so-deep-subdir-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte("**/subdir-b")
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0750); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-kind-of-deep-subdir-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/subdir-e", "!**/subdir-f"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0750); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-kind-of-deep-subdir-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/subdir-e", "!**/subdir-f"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0750); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-subdir-dot",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY . subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/subdir-f", "!**/subdir-g"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0750); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exclude-deep-subdir-star",
+ dockerfileContents: strings.Join([]string{
+ "FROM scratch",
+ "COPY * subdir/",
+ }, "\n"),
+ contextDir: "dockerignore/populated",
+ tweakContextDir: func(t *testing.T, contextDir, storageDriver, storageRoot string) (err error) {
+ dockerignore := []byte(strings.Join([]string{"**/subdir-f", "!**/subdir-g"}, "\n"))
+ if err := os.WriteFile(filepath.Join(contextDir, ".dockerignore"), dockerignore, 0750); err != nil {
+ return fmt.Errorf("writing .dockerignore file: %w", err)
+ }
+ if err = os.Chtimes(filepath.Join(contextDir, ".dockerignore"), testDate, testDate); err != nil {
+ return fmt.Errorf("setting date on .dockerignore file in temporary context directory: %w", err)
+ }
+ return nil
+ },
+ fsSkip: []string{"(dir):subdir:mtime"},
+ },
+
+ {
+ name: "env-whitespace",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name value`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-simple",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name=value`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-unquoted-list",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name=value name2=value2`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-dquoted-list",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name="value value1"`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-escaped-value",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name=value\ value2`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-squote-in-dquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name="value'quote space'value2"`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-dquote-in-squote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name='value"double quote"value2'`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-escaped-list",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name=value\ value2 name2=value2\ value3`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-eddquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name="a\"b"`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-invalid-ssquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name='a\'b'`,
+ }, "\n"),
+ shouldFailAt: 3,
+ },
+
+ {
+ name: "env-esdquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name="a\'b"`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-essquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name='a\'b''`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-edsquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name='a\"b'`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-empty-squote-in-empty-dquote",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `ENV name="''"`,
+ }, "\n"),
+ },
+
+ {
+ name: "env-multiline",
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM scratch`,
+ `COPY script .`,
+ `# don't put anything after the next line - it must be the last line of the`,
+ `# Dockerfile and it must end with \`,
+ `ENV name=value \`,
+ ` name1=value1 \`,
+ ` name2="value2a \`,
+ ` value2b" \`,
+ ` name3="value3a\n\"value3b\"" \`,
+ ` name4="value4a\\nvalue4b" \`,
+ }, "\n"),
+ },
+
+ {
+ name: "copy-from-owner", // from issue #2518
+ dockerfileContents: strings.Join([]string{
+ `FROM alpine`,
+ `RUN set -ex; touch -t @1485449953 /test; chown 65:65 /test`,
+ `FROM scratch`,
+ `USER 66:66`,
+ `COPY --from=0 /test /test`,
+ }, "\n"),
+ fsSkip: []string{"test:mtime"},
+ },
+
+ {
+ name: "copy-from-owner-with-chown", // issue #2518, but with chown to override
+ dockerfileContents: strings.Join([]string{
+ `FROM alpine`,
+ `RUN set -ex; touch -t @1485449953 /test; chown 65:65 /test`,
+ `FROM scratch`,
+ `USER 66:66`,
+ `COPY --from=0 --chown=1:1 /test /test`,
+ }, "\n"),
+ fsSkip: []string{"test:mtime"},
+ },
+
+ {
+ name: "copy-for-user", // flip side of issue #2518
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM alpine`,
+ `USER 66:66`,
+ `COPY /script /script`,
+ }, "\n"),
+ },
+
+ {
+ name: "copy-for-user-with-chown", // flip side of issue #2518, but with chown to override
+ contextDir: "copy",
+ dockerfileContents: strings.Join([]string{
+ `FROM alpine`,
+ `USER 66:66`,
+ `COPY --chown=1:1 /script /script`,
+ }, "\n"),
+ },
+
+ {
+ name: "add-parent-symlink",
+ contextDir: "add/parent-symlink",
+ fsSkip: []string{"(dir):testsubdir:mtime", "(dir):testsubdir:(dir):etc:mtime"},
+ },
+
+ {
+ name: "add-parent-dangling",
+ contextDir: "add/parent-dangling",
+ fsSkip: []string{"(dir):symlink:mtime", "(dir):symlink-target:mtime", "(dir):symlink-target:(dir):subdirectory:mtime"},
+ },
+
+ {
+ name: "add-parent-clean",
+ contextDir: "add/parent-clean",
+ fsSkip: []string{"(dir):symlink:mtime", "(dir):symlink-target:mtime", "(dir):symlink-target:(dir):subdirectory:mtime"},
+ },
+
+ {
+ name: "add-archive-1",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.1",
+ },
+
+ {
+ name: "add-archive-2",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.2",
+ },
+
+ {
+ name: "add-archive-3",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.3",
+ },
+
+ {
+ name: "add-archive-4",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.4",
+ fsSkip: []string{"(dir):sub:mtime"},
+ },
+
+ {
+ name: "add-archive-5",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.5",
+ fsSkip: []string{"(dir):sub:mtime"},
+ },
+
+ {
+ name: "add-archive-6",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.6",
+ fsSkip: []string{"(dir):sub:mtime"},
+ },
+
+ {
+ name: "add-archive-7",
+ contextDir: "add/archive",
+ dockerfile: "Dockerfile.7",
+ fsSkip: []string{"(dir):sub:mtime"},
+ },
+
+ {
+ name: "add-dir-not-dir",
+ contextDir: "add/dir-not-dir",
+ },
+
+ {
+ name: "add-not-dir-dir",
+ contextDir: "add/not-dir-dir",
+ },
+
+ {
+ name: "add-populated-dir-not-dir",
+ contextDir: "add/populated-dir-not-dir",
+ },
+
+ {
+ name: "dockerignore-allowlist-subdir-nofile-dir",
+ contextDir: "dockerignore/allowlist/subdir-nofile",
+ shouldFailAt: 2,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "dockerignore-allowlist-subdir-nofile-file",
+ contextDir: "dockerignore/allowlist/subdir-nofile",
+ shouldFailAt: 2,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "dockerignore-allowlist-subdir-file-dir",
+ contextDir: "dockerignore/allowlist/subdir-file",
+ fsSkip: []string{"(dir):f1:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-subdir-file-file",
+ contextDir: "dockerignore/allowlist/subdir-file",
+ fsSkip: []string{"(dir):f1:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-nothing-dot",
+ contextDir: "dockerignore/allowlist/nothing-dot",
+ fsSkip: []string{"file:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-nothing-slash",
+ contextDir: "dockerignore/allowlist/nothing-slash",
+ fsSkip: []string{"file:mtime"},
+ },
+
+ {
+ // the directories are excluded, so entries for them don't get
+ // included in the build context archive, so they only get
+ // created implicitly when extracted, so there's no point in us
+ // trying to preserve any of that, either
+ name: "dockerignore-allowlist-subsubdir-file",
+ contextDir: "dockerignore/allowlist/subsubdir-file",
+ withoutDocker: true,
+ fsSkip: []string{"(dir):folder:mtime", "(dir):folder:(dir):subfolder:mtime", "file:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-subsubdir-nofile",
+ contextDir: "dockerignore/allowlist/subsubdir-nofile",
+ fsSkip: []string{"file:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-subsubdir-nosubdir",
+ contextDir: "dockerignore/allowlist/subsubdir-nosubdir",
+ fsSkip: []string{"file:mtime"},
+ },
+
+ {
+ name: "dockerignore-allowlist-alternating",
+ contextDir: "dockerignore/allowlist/alternating",
+ withoutDocker: true,
+ fsSkip: []string{
+ "(dir):subdir1:mtime",
+ "(dir):subdir1:(dir):subdir2:(dir):subdir3:mtime",
+ "(dir):subdir1:(dir):subdir2:(dir):subdir3:(dir):subdir4:(dir):subdir5:mtime",
+ "(dir):subdir2:(dir):subdir3:mtime",
+ "(dir):subdir2:(dir):subdir3:(dir):subdir4:(dir):subdir5:mtime",
+ "(dir):subdir3:mtime",
+ "(dir):subdir3:(dir):subdir4:(dir):subdir5:mtime",
+ "(dir):subdir4:(dir):subdir5:mtime",
+ "(dir):subdir5:mtime",
+ },
+ },
+
+ {
+ name: "dockerignore-allowlist-alternating-nothing",
+ contextDir: "dockerignore/allowlist/alternating-nothing",
+ shouldFailAt: 7,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "dockerignore-allowlist-alternating-other",
+ contextDir: "dockerignore/allowlist/alternating-other",
+ shouldFailAt: 7,
+ failureRegex: "(no such file or directory)|(file not found)|(file does not exist)",
+ },
+
+ {
+ name: "tar-g",
+ contextDir: "tar-g",
+ withoutDocker: true,
+ fsSkip: []string{"(dir):tmp:mtime"},
+ },
+
+ {
+ name: "dockerignore-exceptions-skip",
+ contextDir: "dockerignore/exceptions-skip",
+ fsSkip: []string{"(dir):volume:mtime"},
+ },
+
+ {
+ name: "dockerignore-exceptions-weirdness-1",
+ contextDir: "dockerignore/exceptions-weirdness-1",
+ fsSkip: []string{"(dir):newdir:mtime", "(dir):newdir:(dir):subdir:mtime"},
+ },
+
+ {
+ name: "dockerignore-exceptions-weirdness-2",
+ contextDir: "dockerignore/exceptions-weirdness-2",
+ fsSkip: []string{"(dir):newdir:mtime", "(dir):newdir:(dir):subdir:mtime"},
+ },
+
+ {
+ name: "multistage-builtin-args",
+ dockerfile: "Dockerfile.margs",
+ dockerUseBuildKit: true,
+ },
+
+ {
+ name: "heredoc-copy",
+ dockerfile: "Dockerfile.heredoc_copy",
+ dockerUseBuildKit: true,
+ contextDir: "heredoc",
+ fsSkip: []string{"(dir):test:mtime",
+ "(dir):test2:mtime",
+ "(dir):test:(dir):humans.txt:mtime",
+ "(dir):test:(dir):robots.txt:mtime",
+ "(dir):test2:(dir):humans.txt:mtime",
+ "(dir):test2:(dir):robots.txt:mtime",
+ "(dir):test2:(dir):image_file:mtime",
+ "(dir):etc:(dir):hostname" /* buildkit does not contains /etc/hostname like buildah */},
+ },
+
+ {
+ name: "replace-symlink-with-directory",
+ contextDir: "replace/symlink-with-directory",
+ },
+
+ {
+ name: "replace-directory-with-symlink",
+ contextDir: "replace/symlink-with-directory",
+ dockerfile: "Dockerfile.2",
+ },
+
+ {
+ name: "replace-symlink-with-directory-subdir",
+ contextDir: "replace/symlink-with-directory",
+ dockerfile: "Dockerfile.3",
+ fsSkip: []string{"(dir):tree:mtime"},
+ },
+
+ {
+ name: "replace-directory-with-symlink-subdir",
+ contextDir: "replace/symlink-with-directory",
+ dockerfile: "Dockerfile.4",
+ fsSkip: []string{"(dir):tree:mtime"},
+ },
+
+ {
+ name: "workdir-owner", // from issue #3620
+ dockerfileContents: strings.Join([]string{
+ `# syntax=docker/dockerfile:1.4`,
+ `FROM alpine`,
+ `USER daemon`,
+ `WORKDIR /created/directory`,
+ `RUN ls /created`,
+ }, "\n"),
+ fsSkip: []string{"(dir):created:mtime", "(dir):created:(dir):directory:mtime"},
+ dockerUseBuildKit: true,
+ },
+
+ {
+ name: "env-precedence",
+ contextDir: "env/precedence",
+ dockerUseBuildKit: true,
+ },
+}
+
+func TestCommit(t *testing.T) {
+ testCases := []struct {
+ description string
+ baseImage string
+ changes, derivedChanges []string
+ config, derivedConfig *docker.Config
+ }{
+ {
+ description: "defaults",
+ baseImage: "docker.io/library/busybox",
+ },
+ {
+ description: "empty change",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{""},
+ },
+ {
+ description: "empty config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{},
+ },
+ {
+ description: "cmd just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"CMD /bin/imaginarySh"},
+ },
+ {
+ description: "cmd just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Cmd: []string{"/usr/bin/imaginarySh"},
+ },
+ },
+ {
+ description: "cmd conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"CMD /bin/imaginarySh"},
+ config: &docker.Config{
+ Cmd: []string{"/usr/bin/imaginarySh"},
+ },
+ },
+ {
+ description: "entrypoint just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ENTRYPOINT /bin/imaginarySh"},
+ },
+ {
+ description: "entrypoint just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Entrypoint: []string{"/usr/bin/imaginarySh"},
+ },
+ },
+ {
+ description: "entrypoint conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ENTRYPOINT /bin/imaginarySh"},
+ config: &docker.Config{
+ Entrypoint: []string{"/usr/bin/imaginarySh"},
+ },
+ },
+ {
+ description: "environment just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ENV A=1", "ENV C=2"},
+ },
+ {
+ description: "environment just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Env: []string{"A=B"},
+ },
+ },
+ {
+ description: "environment with conflict union",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ENV A=1", "ENV C=2"},
+ config: &docker.Config{
+ Env: []string{"A=B"},
+ },
+ },
+ {
+ description: "expose just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"EXPOSE 12345"},
+ },
+ {
+ description: "expose just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ ExposedPorts: map[docker.Port]struct{}{"23456": struct{}{}},
+ },
+ },
+ {
+ description: "expose union",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"EXPOSE 12345"},
+ config: &docker.Config{
+ ExposedPorts: map[docker.Port]struct{}{"23456": struct{}{}},
+ },
+ },
+ {
+ description: "healthcheck just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{`HEALTHCHECK --interval=1s --timeout=1s --start-period=1s --retries=1 CMD ["/bin/false"]`},
+ },
+ {
+ description: "healthcheck just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Healthcheck: &docker.HealthConfig{
+ Test: []string{"/bin/true"},
+ Interval: 2 * time.Second,
+ Timeout: 2 * time.Second,
+ StartPeriod: 2 * time.Second,
+ Retries: 2,
+ },
+ },
+ },
+ {
+ description: "healthcheck conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{`HEALTHCHECK --interval=1s --timeout=1s --start-period=1s --retries=1 CMD ["/bin/false"]`},
+ config: &docker.Config{
+ Healthcheck: &docker.HealthConfig{
+ Test: []string{"/bin/true"},
+ Interval: 2 * time.Second,
+ Timeout: 2 * time.Second,
+ StartPeriod: 2 * time.Second,
+ Retries: 2,
+ },
+ },
+ },
+ {
+ description: "label just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"LABEL A=1 C=2"},
+ },
+ {
+ description: "label just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Labels: map[string]string{"A": "B"},
+ },
+ },
+ {
+ description: "label with conflict union",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"LABEL A=1 C=2"},
+ config: &docker.Config{
+ Labels: map[string]string{"A": "B"},
+ },
+ },
+ // n.b. dockerd didn't like a MAINTAINER change, so no test for it, and it's not in a config blob
+ {
+ description: "onbuild just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ONBUILD USER alice", "ONBUILD LABEL A=1"},
+ derivedChanges: []string{"LABEL C=3"},
+ },
+ {
+ description: "onbuild just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ OnBuild: []string{"USER bob", `CMD ["/bin/smash"]`, "LABEL B=2"},
+ },
+ derivedChanges: []string{"LABEL C=3"},
+ },
+ {
+ description: "onbuild conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"ONBUILD USER alice", "ONBUILD LABEL A=1"},
+ config: &docker.Config{
+ OnBuild: []string{"USER bob", `CMD ["/bin/smash"]`, "LABEL B=2"},
+ },
+ derivedChanges: []string{"LABEL C=3"},
+ },
+ // n.b. dockerd didn't like a SHELL change, so no test for it or a conflict with a config blob
+ {
+ description: "shell just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Shell: []string{"/usr/bin/imaginarySh"},
+ },
+ },
+ {
+ description: "stop signal conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"STOPSIGNAL SIGTERM"},
+ config: &docker.Config{
+ StopSignal: "SIGKILL",
+ },
+ },
+ {
+ description: "stop timeout=0",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ StopTimeout: 0,
+ },
+ },
+ {
+ description: "stop timeout=15",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ StopTimeout: 15,
+ },
+ },
+ {
+ description: "stop timeout=15, then 0",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ StopTimeout: 15,
+ },
+ derivedConfig: &docker.Config{
+ StopTimeout: 0,
+ },
+ },
+ {
+ description: "stop timeout=0, then 15",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ StopTimeout: 0,
+ },
+ derivedConfig: &docker.Config{
+ StopTimeout: 15,
+ },
+ },
+ {
+ description: "user just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"USER 1001:1001"},
+ },
+ {
+ description: "user just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ User: "1000:1000",
+ },
+ },
+ {
+ description: "user with conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"USER 1001:1001"},
+ config: &docker.Config{
+ User: "1000:1000",
+ },
+ },
+ {
+ description: "volume just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"VOLUME /a-volume"},
+ },
+ {
+ description: "volume just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ Volumes: map[string]struct{}{"/b-volume": struct{}{}},
+ },
+ },
+ {
+ description: "volume union",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"VOLUME /a-volume"},
+ config: &docker.Config{
+ Volumes: map[string]struct{}{"/b-volume": struct{}{}},
+ },
+ },
+ {
+ description: "workdir just changes",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"WORKDIR /yeah"},
+ },
+ {
+ description: "workdir just config",
+ baseImage: "docker.io/library/busybox",
+ config: &docker.Config{
+ WorkingDir: "/naw",
+ },
+ },
+ {
+ description: "workdir with conflict",
+ baseImage: "docker.io/library/busybox",
+ changes: []string{"WORKDIR /yeah"},
+ config: &docker.Config{
+ WorkingDir: "/naw",
+ },
+ },
+ }
+
+ var tempdir string
+ buildahDir := buildahDir
+ if buildahDir == "" {
+ if tempdir == "" {
+ tempdir = t.TempDir()
+ }
+ buildahDir = filepath.Join(tempdir, "buildah")
+ }
+ dockerDir := dockerDir
+ if dockerDir == "" {
+ if tempdir == "" {
+ tempdir = t.TempDir()
+ }
+ dockerDir = filepath.Join(tempdir, "docker")
+ }
+
+ ctx := context.TODO()
+
+ // connect to dockerd using go-dockerclient
+ client, err := docker.NewClientFromEnv()
+ require.NoErrorf(t, err, "unable to initialize docker client")
+ var dockerVersion []string
+ if version, err := client.Version(); err == nil {
+ if version != nil {
+ for _, s := range *version {
+ dockerVersion = append(dockerVersion, s)
+ }
+ }
+ } else {
+ require.NoErrorf(t, err, "unable to connect to docker daemon")
+ }
+
+ // find a new place to store buildah builds
+ tempdir = t.TempDir()
+
+ // create subdirectories to use for buildah storage
+ rootDir := filepath.Join(tempdir, "root")
+ runrootDir := filepath.Join(tempdir, "runroot")
+
+ // initialize storage for buildah
+ options := storage.StoreOptions{
+ GraphDriverName: os.Getenv("STORAGE_DRIVER"),
+ GraphRoot: rootDir,
+ RunRoot: runrootDir,
+ RootlessStoragePath: rootDir,
+ }
+ store, err := storage.GetStore(options)
+ require.NoErrorf(t, err, "error creating buildah storage at %q", rootDir)
+ defer func() {
+ if store != nil {
+ _, err := store.Shutdown(true)
+ require.NoErrorf(t, err, "error shutting down storage for buildah")
+ }
+ }()
+
+ // walk through test cases
+ for testIndex, testCase := range testCases {
+ t.Run(testCase.description, func(t *testing.T) {
+ test := testCases[testIndex]
+
+ // create the test container, then commit it, using the docker client
+ baseImage := test.baseImage
+ repository, tag := docker.ParseRepositoryTag(baseImage)
+ if tag == "" {
+ tag = "latest"
+ }
+ baseImage = repository + ":" + tag
+ if _, err := client.InspectImage(test.baseImage); err != nil && errors.Is(err, docker.ErrNoSuchImage) {
+ // oh, we need to pull the base image
+ err = client.PullImage(docker.PullImageOptions{
+ Repository: repository,
+ Tag: tag,
+ }, docker.AuthConfiguration{})
+ require.NoErrorf(t, err, "pulling base image")
+ }
+ container, err := client.CreateContainer(docker.CreateContainerOptions{
+ Context: ctx,
+ Config: &docker.Config{
+ Image: baseImage,
+ },
+ })
+ require.NoErrorf(t, err, "creating the working container with docker")
+ if err == nil {
+ defer func(containerName string) {
+ err := client.RemoveContainer(docker.RemoveContainerOptions{
+ ID: containerName,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting working docker container %q", containerName)
+ }(container.ID)
+ }
+ dockerImageName := "committed:" + strconv.Itoa(testIndex)
+ dockerImage, err := client.CommitContainer(docker.CommitContainerOptions{
+ Container: container.ID,
+ Changes: test.changes,
+ Run: test.config,
+ Repository: dockerImageName,
+ })
+ assert.NoErrorf(t, err, "committing the working container with docker")
+ if err == nil {
+ defer func(dockerImageName string) {
+ err := client.RemoveImageExtended(dockerImageName, docker.RemoveImageOptions{
+ Context: ctx,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting newly-built docker image %q", dockerImage.ID)
+ }(dockerImageName)
+ }
+ dockerRef, err := alltransports.ParseImageName("docker-daemon:" + dockerImageName)
+ assert.NoErrorf(t, err, "parsing name of newly-committed docker image")
+
+ if len(test.derivedChanges) > 0 || test.derivedConfig != nil {
+ container, err := client.CreateContainer(docker.CreateContainerOptions{
+ Context: ctx,
+ Config: &docker.Config{
+ Image: dockerImage.ID,
+ },
+ })
+ require.NoErrorf(t, err, "creating the derived container with docker")
+ if err == nil {
+ defer func(containerName string) {
+ err := client.RemoveContainer(docker.RemoveContainerOptions{
+ ID: containerName,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting derived docker container %q", containerName)
+ }(container.ID)
+ }
+ derivedImageName := "derived:" + strconv.Itoa(testIndex)
+ derivedImage, err := client.CommitContainer(docker.CommitContainerOptions{
+ Container: container.ID,
+ Changes: test.derivedChanges,
+ Run: test.derivedConfig,
+ Repository: derivedImageName,
+ })
+ assert.NoErrorf(t, err, "committing the derived container with docker")
+ defer func(derivedImageName string) {
+ err := client.RemoveImageExtended(derivedImageName, docker.RemoveImageOptions{
+ Context: ctx,
+ Force: true,
+ })
+ assert.Nil(t, err, "error deleting newly-derived docker image %q", derivedImage.ID)
+ }(derivedImageName)
+ dockerRef, err = alltransports.ParseImageName("docker-daemon:" + derivedImageName)
+ assert.NoErrorf(t, err, "parsing name of newly-derived docker image")
+ }
+
+ // create the test container, then commit it, using the buildah API
+ builder, err := buildah.NewBuilder(ctx, store, buildah.BuilderOptions{
+ FromImage: baseImage,
+ })
+ require.NoErrorf(t, err, "creating the working container with buildah")
+ defer func(builder *buildah.Builder) {
+ err := builder.Delete()
+ assert.NoErrorf(t, err, "removing the working container")
+ }(builder)
+ var overrideConfig *manifest.Schema2Config
+ if test.config != nil {
+ overrideConfig = config.Schema2ConfigFromGoDockerclientConfig(test.config)
+ }
+ buildahID, _, _, err := builder.Commit(ctx, nil, buildah.CommitOptions{
+ PreferredManifestType: manifest.DockerV2Schema2MediaType,
+ OverrideChanges: test.changes,
+ OverrideConfig: overrideConfig,
+ })
+ assert.NoErrorf(t, err, "committing buildah image")
+ buildahRef, err := is.Transport.NewStoreReference(store, nil, buildahID)
+ assert.NoErrorf(t, err, "parsing name of newly-built buildah image")
+
+ if len(test.derivedChanges) > 0 || test.derivedConfig != nil {
+ derivedBuilder, err := buildah.NewBuilder(ctx, store, buildah.BuilderOptions{
+ FromImage: buildahID,
+ })
+ defer func(builder *buildah.Builder) {
+ err := builder.Delete()
+ assert.NoErrorf(t, err, "removing the derived container")
+ }(derivedBuilder)
+ require.NoErrorf(t, err, "creating the derived container with buildah")
+ var overrideConfig *manifest.Schema2Config
+ if test.derivedConfig != nil {
+ overrideConfig = config.Schema2ConfigFromGoDockerclientConfig(test.derivedConfig)
+ }
+ derivedID, _, _, err := builder.Commit(ctx, nil, buildah.CommitOptions{
+ PreferredManifestType: manifest.DockerV2Schema2MediaType,
+ OverrideChanges: test.derivedChanges,
+ OverrideConfig: overrideConfig,
+ })
+ assert.NoErrorf(t, err, "committing derived buildah image")
+ buildahRef, err = is.Transport.NewStoreReference(store, nil, derivedID)
+ assert.NoErrorf(t, err, "parsing name of newly-derived buildah image")
+ }
+
+ // scan the images
+ saveReport(ctx, t, dockerRef, filepath.Join(dockerDir, t.Name()), []byte{}, []byte{}, dockerVersion)
+ saveReport(ctx, t, buildahRef, filepath.Join(buildahDir, t.Name()), []byte{}, []byte{}, dockerVersion)
+ // compare the scans
+ _, originalDockerConfig, ociDockerConfig, fsDocker := readReport(t, filepath.Join(dockerDir, t.Name()))
+ _, originalBuildahConfig, ociBuildahConfig, fsBuildah := readReport(t, filepath.Join(buildahDir, t.Name()))
+ miss, left, diff, same := compareJSON(originalDockerConfig, originalBuildahConfig, originalSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ as committed in Docker format", configCompareResult(miss, left, diff, "buildah"))
+ }
+ miss, left, diff, same = compareJSON(ociDockerConfig, ociBuildahConfig, ociSkip)
+ if !same {
+ assert.Failf(t, "Image configurations differ when converted to OCI format", configCompareResult(miss, left, diff, "buildah"))
+ }
+ miss, left, diff, same = compareJSON(fsDocker, fsBuildah, fsSkip)
+ if !same {
+ assert.Failf(t, "Filesystem contents differ", fsCompareResult(miss, left, diff, "buildah"))
+ }
+ })
+ }
+}
diff --git a/tests/conformance/selinux.go b/tests/conformance/selinux.go
new file mode 100644
index 0000000..712f606
--- /dev/null
+++ b/tests/conformance/selinux.go
@@ -0,0 +1,14 @@
+// +build linux
+
+package conformance
+
+import (
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
+
+func selinuxMountFlag() string {
+ if selinux.GetEnabled() {
+ return ":Z"
+ }
+ return ""
+}
diff --git a/tests/conformance/selinux_unsupported.go b/tests/conformance/selinux_unsupported.go
new file mode 100644
index 0000000..70ba147
--- /dev/null
+++ b/tests/conformance/selinux_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package conformance
+
+func selinuxMountFlag() string {
+ return ""
+}
diff --git a/tests/conformance/testdata/Dockerfile.add b/tests/conformance/testdata/Dockerfile.add
new file mode 100644
index 0000000..f78180e
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.add
@@ -0,0 +1,11 @@
+FROM busybox
+ADD https://github.com/openshift/origin/raw/master/README.md README.md
+USER 1001
+ADD https://github.com/openshift/origin/raw/master/LICENSE .
+ADD https://github.com/openshift/origin/raw/master/LICENSE A
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./a
+USER root
+RUN mkdir ./b
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/a
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/.
+ADD https://github.com/openshift/ruby-hello-world/archive/master.zip /tmp/
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_1 b/tests/conformance/testdata/Dockerfile.copyfrom_1
new file mode 100644
index 0000000..684d74b
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_1
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch -t @1485449953 /a /b
+FROM busybox
+COPY --from=base /a /
+RUN ls -al /a \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_10 b/tests/conformance/testdata/Dockerfile.copyfrom_10
new file mode 100644
index 0000000..a521d2d
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_10
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/1
+FROM busybox
+COPY --from=base /a/1 /a/b/c
+RUN ls -al /a/b/c && ! ls -al /a/b/1 \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_11 b/tests/conformance/testdata/Dockerfile.copyfrom_11
new file mode 100644
index 0000000..214d50b
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_11
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/1
+FROM busybox
+COPY --from=base /a /a/b/c
+RUN ls -al /a/b/c/1 && ! ls -al /a/b/1
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_12 b/tests/conformance/testdata/Dockerfile.copyfrom_12
new file mode 100644
index 0000000..9bc1e1c
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_12
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/1
+FROM busybox
+COPY --from=base a /a
+RUN ls -al /a/1 && ! ls -al /a/a
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_13 b/tests/conformance/testdata/Dockerfile.copyfrom_13
new file mode 100644
index 0000000..065593a
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_13
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/1
+FROM busybox
+COPY --from=base a/. /a
+RUN ls -al /a/1 && ! ls -al /a/a
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_2 b/tests/conformance/testdata/Dockerfile.copyfrom_2
new file mode 100644
index 0000000..f27b10e
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_2
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch -t @1485449953 /a
+FROM busybox
+COPY --from=base /a /a
+RUN ls -al /a \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_3 b/tests/conformance/testdata/Dockerfile.copyfrom_3
new file mode 100644
index 0000000..d7dca78
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_3
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN touch -t @1485449953 /a
+FROM busybox
+WORKDIR /b
+COPY --from=base /a .
+RUN ls -al /b/a \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_3_1 b/tests/conformance/testdata/Dockerfile.copyfrom_3_1
new file mode 100644
index 0000000..4f5075c
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_3_1
@@ -0,0 +1,6 @@
+FROM busybox as base
+RUN touch -t @1485449953 /a
+FROM busybox
+WORKDIR /b
+COPY --from=base /a ./b
+RUN ls -al /b/b
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_4 b/tests/conformance/testdata/Dockerfile.copyfrom_4
new file mode 100644
index 0000000..6ccd5c8
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_4
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch -t @1485449953 /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/ /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_5 b/tests/conformance/testdata/Dockerfile.copyfrom_5
new file mode 100644
index 0000000..1200f1f
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_5
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch -t @1485449953 /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/* /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_6 b/tests/conformance/testdata/Dockerfile.copyfrom_6
new file mode 100644
index 0000000..8aff8d9
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_6
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a/b && touch -t @1485449953 /a/b/1 /a/b/2
+FROM busybox
+COPY --from=base /a/b/. /b/
+RUN ls -al /b/1 /b/2 /b && ! ls -al /a /b/a /b/b
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_7 b/tests/conformance/testdata/Dockerfile.copyfrom_7
new file mode 100644
index 0000000..cbf22b2
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_7
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN touch -t @1485449953 /b
+FROM busybox
+COPY --from=base /b /a
+RUN ls -al /a && ! ls -al /b \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_8 b/tests/conformance/testdata/Dockerfile.copyfrom_8
new file mode 100644
index 0000000..510a770
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_8
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/b
+FROM busybox
+COPY --from=base /a/b /a
+RUN ls -al /a && ! ls -al /b \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.copyfrom_9 b/tests/conformance/testdata/Dockerfile.copyfrom_9
new file mode 100644
index 0000000..2b69da8
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.copyfrom_9
@@ -0,0 +1,5 @@
+FROM busybox as base
+RUN mkdir -p /a && touch -t @1485449953 /a/1
+FROM busybox
+COPY --from=base /a/1 /a/b/c/
+RUN ls -al /a/b/c/1 && ! ls -al /a/b/1 \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.edgecases b/tests/conformance/testdata/Dockerfile.edgecases
new file mode 100644
index 0000000..d94a08e
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.edgecases
@@ -0,0 +1,49 @@
+# Note: Hopefully a registries.conf alias redirects this to quay.io/libpod/busybox
+FROM busybox
+
+MAINTAINER docker <docker@docker.io>
+
+ONBUILD RUN ["echo", "test"]
+ONBUILD RUN echo test
+ONBUILD COPY . /
+
+
+# RUN Commands \
+# linebreak in comment \
+RUN ["ls", "-la"]
+RUN ["echo", "'1234'"]
+RUN echo "1234"
+RUN echo 1234
+RUN echo '1234' && \
+ echo "456" && \
+ echo 789
+RUN sh -c 'echo root:testpass \
+ > /tmp/passwd'
+RUN mkdir -p /test /test2 /test3/test
+
+# ENV \
+ENV SCUBA 1 DUBA 3
+ENV SCUBA "1 DUBA 3"
+
+# CMD \
+CMD ["echo", "test"]
+CMD echo test
+CMD echo "test"
+CMD echo 'test'
+CMD echo 'test' | wc -
+
+#EXPOSE\
+EXPOSE 3000
+EXPOSE 9000 5000 6000
+
+USER docker
+USER docker:root
+
+VOLUME ["/test"]
+VOLUME ["/test", "/test2"]
+VOLUME /test3
+
+WORKDIR /test
+
+ADD . /
+COPY . copy
diff --git a/tests/conformance/testdata/Dockerfile.env b/tests/conformance/testdata/Dockerfile.env
new file mode 100644
index 0000000..21e7c4b
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.env
@@ -0,0 +1,23 @@
+FROM busybox
+ENV name value
+ENV name=value
+ENV name=value name2=value2
+ENV name="value value1"
+ENV name=value\ value2
+ENV name="value'quote space'value2"
+ENV name='value"double quote"value2'
+ENV name=value\ value2 name2=value2\ value3
+ENV name="a\"b"
+ENV name="a\'b"
+ENV name='a\'b'
+ENV name='a\'b''
+ENV name='a\"b'
+ENV name="''"
+# don't put anything after the next line - it must be the last line of the
+# Dockerfile and it must end with \
+ENV name=value \
+ name1=value1 \
+ name2="value2a \
+ value2b" \
+ name3="value3a\n\"value3b\"" \
+ name4="value4a\\nvalue4b" \ \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.exposedefault b/tests/conformance/testdata/Dockerfile.exposedefault
new file mode 100644
index 0000000..d3d9862
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.exposedefault
@@ -0,0 +1,2 @@
+FROM busybox
+EXPOSE 3469 \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.margs b/tests/conformance/testdata/Dockerfile.margs
new file mode 100644
index 0000000..5297f30
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.margs
@@ -0,0 +1,39 @@
+FROM alpine
+ARG BUILDPLATFORM
+ARG BUILDOS
+ARG BUILDARCH
+ARG BUILDVARIANT
+ARG TARGETPLATFORM
+ARG TARGETOS
+ARG TARGETARCH
+ARG TARGETVARIANT
+RUN mkdir first
+RUN echo ${BUILDPLATFORM} > first/buildplatform=`echo ${BUILDPLATFORM} | sed s,/,_,g`
+RUN echo ${BUILDOS} > first/buildos=`echo ${BUILDOS} | sed s,/,_,g`
+RUN echo ${BUILDARCH} > first/buildarch=`echo ${BUILDARCH} | sed s,/,_,g`
+RUN echo ${BUILDVARIANT} > first/buildvariant=`echo ${BUILDVARIANT} | sed s,/,_,g`
+RUN echo ${TARGETPLATFORM} > first/targetplatform=`echo ${TARGETPLATFORM} | sed s,/,_,g`
+RUN echo ${TARGETOS} > first/targetos=`echo ${TARGETOS} | sed s,/,_,g`
+RUN echo ${TARGETARCH} > first/targetarch=`echo ${TARGETARCH} | sed s,/,_,g`
+RUN echo ${TARGETVARIANT} > first/targetvariant=`echo ${TARGETVARIANT} | sed s,/,_,g`
+
+FROM alpine
+ARG BUILDPLATFORM
+ARG BUILDOS
+ARG BUILDARCH
+ARG BUILDVARIANT
+ARG TARGETPLATFORM
+ARG TARGETOS
+ARG TARGETARCH
+ARG TARGETVARIANT
+RUN mkdir second
+RUN echo ${BUILDPLATFORM} > second/buildplatform=`echo ${BUILDPLATFORM} | sed s,/,_,g`
+RUN echo ${BUILDOS} > second/buildos=`echo ${BUILDOS} | sed s,/,_,g`
+RUN echo ${BUILDARCH} > second/buildarch=`echo ${BUILDARCH} | sed s,/,_,g`
+RUN echo ${BUILDVARIANT} > second/buildvariant=`echo ${BUILDVARIANT} | sed s,/,_,g`
+RUN echo ${TARGETPLATFORM} > second/targetplatform=`echo ${TARGETPLATFORM} | sed s,/,_,g`
+RUN echo ${TARGETOS} > second/targetos=`echo ${TARGETOS} | sed s,/,_,g`
+RUN echo ${TARGETARCH} > second/targetarch=`echo ${TARGETARCH} | sed s,/,_,g`
+RUN echo ${TARGETVARIANT} > second/targetvariant=`echo ${TARGETVARIANT} | sed s,/,_,g`
+COPY --from=0 first/* ./first/
+RUN touch -r /etc/os-release first first/* second second/*
diff --git a/tests/conformance/testdata/Dockerfile.reusebase b/tests/conformance/testdata/Dockerfile.reusebase
new file mode 100644
index 0000000..8be06c7
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.reusebase
@@ -0,0 +1,6 @@
+FROM quay.io/libpod/centos:7 AS base
+RUN touch -t 201701261659.13 /1
+ENV LOCAL=/1
+
+FROM base
+RUN find $LOCAL
diff --git a/tests/conformance/testdata/Dockerfile.run.args b/tests/conformance/testdata/Dockerfile.run.args
new file mode 100644
index 0000000..e09926c
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.run.args
@@ -0,0 +1,5 @@
+FROM busybox
+RUN echo first second
+RUN /bin/echo third fourth
+RUN ["/bin/echo", "fifth", "sixth"]
+RUN ["/bin/sh", "-c", "echo inner $1", "", "outer"] \ No newline at end of file
diff --git a/tests/conformance/testdata/Dockerfile.shell b/tests/conformance/testdata/Dockerfile.shell
new file mode 100644
index 0000000..bf7f068
--- /dev/null
+++ b/tests/conformance/testdata/Dockerfile.shell
@@ -0,0 +1,3 @@
+FROM quay.io/libpod/centos:7
+SHELL ["/bin/bash", "-xc"]
+RUN env \ No newline at end of file
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.1 b/tests/conformance/testdata/add/archive/Dockerfile.1
new file mode 100644
index 0000000..27c49ea
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.1
@@ -0,0 +1,2 @@
+FROM scratch
+ADD . .
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.2 b/tests/conformance/testdata/add/archive/Dockerfile.2
new file mode 100644
index 0000000..13563cd
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.2
@@ -0,0 +1,2 @@
+FROM scratch
+ADD sub/ /
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.3 b/tests/conformance/testdata/add/archive/Dockerfile.3
new file mode 100644
index 0000000..1395b42
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.3
@@ -0,0 +1,2 @@
+FROM scratch
+ADD sub/subdirectory.tar.gz /
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.4 b/tests/conformance/testdata/add/archive/Dockerfile.4
new file mode 100644
index 0000000..ef994fb
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.4
@@ -0,0 +1,2 @@
+FROM scratch
+ADD / /
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.5 b/tests/conformance/testdata/add/archive/Dockerfile.5
new file mode 100644
index 0000000..70eb72b
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.5
@@ -0,0 +1,2 @@
+FROM scratch
+ADD sub/ sub/
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.6 b/tests/conformance/testdata/add/archive/Dockerfile.6
new file mode 100644
index 0000000..950ef66
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.6
@@ -0,0 +1,2 @@
+FROM scratch
+ADD sub/subdirectory.tar.gz /sub/
diff --git a/tests/conformance/testdata/add/archive/Dockerfile.7 b/tests/conformance/testdata/add/archive/Dockerfile.7
new file mode 100644
index 0000000..c8eb504
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/Dockerfile.7
@@ -0,0 +1,2 @@
+FROM scratch
+ADD sub/* /sub/
diff --git a/tests/conformance/testdata/add/archive/sub/subdirectory.tar.gz b/tests/conformance/testdata/add/archive/sub/subdirectory.tar.gz
new file mode 100644
index 0000000..50b79b4
--- /dev/null
+++ b/tests/conformance/testdata/add/archive/sub/subdirectory.tar.gz
Binary files differ
diff --git a/tests/conformance/testdata/add/dir-not-dir/Dockerfile b/tests/conformance/testdata/add/dir-not-dir/Dockerfile
new file mode 100644
index 0000000..d64852f
--- /dev/null
+++ b/tests/conformance/testdata/add/dir-not-dir/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+RUN mkdir fileone
+ADD test.tar /
diff --git a/tests/conformance/testdata/add/dir-not-dir/test.tar b/tests/conformance/testdata/add/dir-not-dir/test.tar
new file mode 100644
index 0000000..a453463
--- /dev/null
+++ b/tests/conformance/testdata/add/dir-not-dir/test.tar
Binary files differ
diff --git a/tests/conformance/testdata/add/not-dir-dir/Dockerfile b/tests/conformance/testdata/add/not-dir-dir/Dockerfile
new file mode 100644
index 0000000..5cf8016
--- /dev/null
+++ b/tests/conformance/testdata/add/not-dir-dir/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+RUN touch /new-directory
+ADD test.tar /
+RUN ls -ld /new-directory
diff --git a/tests/conformance/testdata/add/not-dir-dir/test.tar b/tests/conformance/testdata/add/not-dir-dir/test.tar
new file mode 100644
index 0000000..80c3972
--- /dev/null
+++ b/tests/conformance/testdata/add/not-dir-dir/test.tar
Binary files differ
diff --git a/tests/conformance/testdata/add/parent-clean/Dockerfile b/tests/conformance/testdata/add/parent-clean/Dockerfile
new file mode 100644
index 0000000..0bc892b
--- /dev/null
+++ b/tests/conformance/testdata/add/parent-clean/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+RUN ln -s /symlink-target/subdirectory /symlink
+ADD . /symlink/..
+RUN find /symlink* -print
diff --git a/tests/conformance/testdata/add/parent-dangling/Dockerfile b/tests/conformance/testdata/add/parent-dangling/Dockerfile
new file mode 100644
index 0000000..03193b0
--- /dev/null
+++ b/tests/conformance/testdata/add/parent-dangling/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+RUN ln -s symlink-target /symlink
+ADD . /symlink/subdirectory/
+RUN find /symlink* -print
diff --git a/tests/conformance/testdata/add/parent-symlink/Dockerfile b/tests/conformance/testdata/add/parent-symlink/Dockerfile
new file mode 100644
index 0000000..ae1e41b
--- /dev/null
+++ b/tests/conformance/testdata/add/parent-symlink/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+ADD foobar.tar /testsubdir
diff --git a/tests/conformance/testdata/add/parent-symlink/foobar.tar b/tests/conformance/testdata/add/parent-symlink/foobar.tar
new file mode 100644
index 0000000..ba1bb18
--- /dev/null
+++ b/tests/conformance/testdata/add/parent-symlink/foobar.tar
Binary files differ
diff --git a/tests/conformance/testdata/add/populated-dir-not-dir/Dockerfile b/tests/conformance/testdata/add/populated-dir-not-dir/Dockerfile
new file mode 100644
index 0000000..50f0f3f
--- /dev/null
+++ b/tests/conformance/testdata/add/populated-dir-not-dir/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN mkdir dirone
+RUN echo one > dirone/onefile.txt
+RUN echo two > dirone/twofile.txt
+ADD test.tar /
diff --git a/tests/conformance/testdata/add/populated-dir-not-dir/test.tar b/tests/conformance/testdata/add/populated-dir-not-dir/test.tar
new file mode 100644
index 0000000..4cd067c
--- /dev/null
+++ b/tests/conformance/testdata/add/populated-dir-not-dir/test.tar
Binary files differ
diff --git a/tests/conformance/testdata/copy/Dockerfile b/tests/conformance/testdata/copy/Dockerfile
new file mode 100644
index 0000000..aedf19d
--- /dev/null
+++ b/tests/conformance/testdata/copy/Dockerfile
@@ -0,0 +1,3 @@
+FROM quay.io/libpod/centos:7
+COPY script /usr/bin
+RUN ls -al /usr/bin/script \ No newline at end of file
diff --git a/tests/conformance/testdata/copy/script b/tests/conformance/testdata/copy/script
new file mode 100644
index 0000000..6c4a1e0
--- /dev/null
+++ b/tests/conformance/testdata/copy/script
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+exit 0 \ No newline at end of file
diff --git a/tests/conformance/testdata/copyblahblub/Dockerfile b/tests/conformance/testdata/copyblahblub/Dockerfile
new file mode 100644
index 0000000..3c64a21
--- /dev/null
+++ b/tests/conformance/testdata/copyblahblub/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+COPY firstdir/seconddir /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
diff --git a/tests/conformance/testdata/copyblahblub/Dockerfile2 b/tests/conformance/testdata/copyblahblub/Dockerfile2
new file mode 100644
index 0000000..688b352
--- /dev/null
+++ b/tests/conformance/testdata/copyblahblub/Dockerfile2
@@ -0,0 +1,4 @@
+FROM busybox
+COPY /firstdir/seconddir /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
diff --git a/tests/conformance/testdata/copyblahblub/Dockerfile3 b/tests/conformance/testdata/copyblahblub/Dockerfile3
new file mode 100644
index 0000000..178c7de
--- /dev/null
+++ b/tests/conformance/testdata/copyblahblub/Dockerfile3
@@ -0,0 +1,8 @@
+FROM busybox
+COPY /firstdir/seconddir /var
+RUN ls -la /var
+RUN ls -la /var/dir-a
+FROM busybox
+COPY --from=0 /var/dir-a /var
+RUN ls -la /var
+RUN ls -la /var/file-a
diff --git a/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a b/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a
new file mode 100644
index 0000000..2f76e89
--- /dev/null
+++ b/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-a/file-a
@@ -0,0 +1 @@
+file-a
diff --git a/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b b/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b
new file mode 100644
index 0000000..3b8ef5a
--- /dev/null
+++ b/tests/conformance/testdata/copyblahblub/firstdir/seconddir/dir-b/file-b
@@ -0,0 +1 @@
+file-b
diff --git a/tests/conformance/testdata/copychown/Dockerfile b/tests/conformance/testdata/copychown/Dockerfile
new file mode 100644
index 0000000..c35e867
--- /dev/null
+++ b/tests/conformance/testdata/copychown/Dockerfile
@@ -0,0 +1,11 @@
+FROM centos:8
+COPY --chown=1:2 script /usr/bin/script.12
+COPY --chown=1:adm script /usr/bin/script.1-adm
+COPY --chown=1 script /usr/bin/script.1
+COPY --chown=lp:adm script /usr/bin/script.lp-adm
+COPY --chown=2:mail script /usr/bin/script.2-mail
+COPY --chown=2 script /usr/bin/script.2
+COPY --chown=bin script /usr/bin/script.bin
+COPY --chown=lp script /usr/bin/script.lp
+COPY --chown=3 script script2 /usr/local/bin/
+RUN ls -al /usr/bin/script
diff --git a/tests/conformance/testdata/copychown/script b/tests/conformance/testdata/copychown/script
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/tests/conformance/testdata/copychown/script
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/tests/conformance/testdata/copychown/script2 b/tests/conformance/testdata/copychown/script2
new file mode 100644
index 0000000..80f336a
--- /dev/null
+++ b/tests/conformance/testdata/copychown/script2
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 1 \ No newline at end of file
diff --git a/tests/conformance/testdata/copydir/Dockerfile b/tests/conformance/testdata/copydir/Dockerfile
new file mode 100644
index 0000000..a8f5df5
--- /dev/null
+++ b/tests/conformance/testdata/copydir/Dockerfile
@@ -0,0 +1,3 @@
+FROM quay.io/libpod/centos:7
+COPY dir /dir
+RUN ls -al /dir/file \ No newline at end of file
diff --git a/tests/conformance/testdata/copydir/dir/file b/tests/conformance/testdata/copydir/dir/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/copydir/dir/file
diff --git a/tests/conformance/testdata/copyempty/.script b/tests/conformance/testdata/copyempty/.script
new file mode 100644
index 0000000..f52d057
--- /dev/null
+++ b/tests/conformance/testdata/copyempty/.script
@@ -0,0 +1,2 @@
+#!/bin/bash
+: \ No newline at end of file
diff --git a/tests/conformance/testdata/copyempty/Dockerfile b/tests/conformance/testdata/copyempty/Dockerfile
new file mode 100644
index 0000000..f32d3e0
--- /dev/null
+++ b/tests/conformance/testdata/copyempty/Dockerfile
@@ -0,0 +1,2 @@
+FROM centos:8
+COPY "" /usr/local/tmp/
diff --git a/tests/conformance/testdata/copyempty/Dockerfile2 b/tests/conformance/testdata/copyempty/Dockerfile2
new file mode 100644
index 0000000..7eb4342
--- /dev/null
+++ b/tests/conformance/testdata/copyempty/Dockerfile2
@@ -0,0 +1,2 @@
+FROM centos:8
+COPY script1 "" script2 /usr/local/tmp/
diff --git a/tests/conformance/testdata/copyempty/script1 b/tests/conformance/testdata/copyempty/script1
new file mode 100644
index 0000000..c3c3f3f
--- /dev/null
+++ b/tests/conformance/testdata/copyempty/script1
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 0 \ No newline at end of file
diff --git a/tests/conformance/testdata/copyempty/script2 b/tests/conformance/testdata/copyempty/script2
new file mode 100644
index 0000000..80f336a
--- /dev/null
+++ b/tests/conformance/testdata/copyempty/script2
@@ -0,0 +1,2 @@
+#!/bin/bash
+exit 1 \ No newline at end of file
diff --git a/tests/conformance/testdata/copyrename/Dockerfile b/tests/conformance/testdata/copyrename/Dockerfile
new file mode 100644
index 0000000..d6711bf
--- /dev/null
+++ b/tests/conformance/testdata/copyrename/Dockerfile
@@ -0,0 +1,3 @@
+FROM quay.io/libpod/centos:7
+COPY file1 /usr/bin/file2
+RUN ls -al /usr/bin/file2 && ! ls -al /usr/bin/file1 \ No newline at end of file
diff --git a/tests/conformance/testdata/copyrename/file1 b/tests/conformance/testdata/copyrename/file1
new file mode 100644
index 0000000..6c4a1e0
--- /dev/null
+++ b/tests/conformance/testdata/copyrename/file1
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+exit 0 \ No newline at end of file
diff --git a/tests/conformance/testdata/copysymlink/Dockerfile b/tests/conformance/testdata/copysymlink/Dockerfile
new file mode 100644
index 0000000..4922f08
--- /dev/null
+++ b/tests/conformance/testdata/copysymlink/Dockerfile
@@ -0,0 +1,2 @@
+FROM quay.io/libpod/centos:7
+COPY file-link.tar.gz /
diff --git a/tests/conformance/testdata/copysymlink/Dockerfile2 b/tests/conformance/testdata/copysymlink/Dockerfile2
new file mode 100644
index 0000000..7d2c0dc
--- /dev/null
+++ b/tests/conformance/testdata/copysymlink/Dockerfile2
@@ -0,0 +1,7 @@
+FROM quay.io/libpod/centos:7
+COPY file.tar.gz /
+RUN ln -s file.tar.gz file-link.tar.gz
+RUN ls -l /file-link.tar.gz
+FROM quay.io/libpod/centos:7
+COPY --from=0 /file-link.tar.gz /
+RUN ls -l /file-link.tar.gz
diff --git a/tests/conformance/testdata/copysymlink/file-link.tar.gz b/tests/conformance/testdata/copysymlink/file-link.tar.gz
new file mode 120000
index 0000000..5efffc8
--- /dev/null
+++ b/tests/conformance/testdata/copysymlink/file-link.tar.gz
@@ -0,0 +1 @@
+file.tar.gz \ No newline at end of file
diff --git a/tests/conformance/testdata/copysymlink/file.tar.gz b/tests/conformance/testdata/copysymlink/file.tar.gz
new file mode 100644
index 0000000..4b5fa63
--- /dev/null
+++ b/tests/conformance/testdata/copysymlink/file.tar.gz
@@ -0,0 +1 @@
+hello, world
diff --git a/tests/conformance/testdata/dir/Dockerfile b/tests/conformance/testdata/dir/Dockerfile
new file mode 100644
index 0000000..4164fec
--- /dev/null
+++ b/tests/conformance/testdata/dir/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+COPY . /
+COPY . dir
+COPY subdir/ test/
diff --git a/tests/conformance/testdata/dir/file b/tests/conformance/testdata/dir/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dir/file
diff --git a/tests/conformance/testdata/dir/subdir/file2 b/tests/conformance/testdata/dir/subdir/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dir/subdir/file2
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/.dockerignore
new file mode 100644
index 0000000..4cf1954
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/.dockerignore
@@ -0,0 +1,7 @@
+**
+!subdir
+subdir/subdir1
+!subdir/subdir1/subdir2
+subdir/subdir1/subdir2/subdir3
+!subdir/subdir1/subdir2/subdir3/subdir4
+subdir/subdir1/subdir2/subdir3/subdir4/subdir5
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/Dockerfile
new file mode 100644
index 0000000..b6137c3
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/Dockerfile
@@ -0,0 +1,8 @@
+FROM scratch
+ADD subdir /
+ADD subdir/subdir1 /
+ADD subdir/subdir1/subdir2 /
+ADD subdir/subdir1/subdir2/subdir3 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file /
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
new file mode 100644
index 0000000..98baad0
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-nothing/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
@@ -0,0 +1 @@
+Hi, I'm a file.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-other/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/.dockerignore
new file mode 100644
index 0000000..2b380f4
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/.dockerignore
@@ -0,0 +1,8 @@
+**
+!subdir
+subdir/subdir1
+!subdir/subdir1/subdir2
+subdir/subdir1/subdir2/subdir3
+!subdir/subdir1/subdir2/subdir3/subdir4
+subdir/subdir1/subdir2/subdir3/subdir4/subdir5
+!subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-other/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/Dockerfile
new file mode 100644
index 0000000..b6137c3
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/Dockerfile
@@ -0,0 +1,8 @@
+FROM scratch
+ADD subdir /
+ADD subdir/subdir1 /
+ADD subdir/subdir1/subdir2 /
+ADD subdir/subdir1/subdir2/subdir3 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file /
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating-other/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file2 b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file2
new file mode 100644
index 0000000..98baad0
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating-other/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file2
@@ -0,0 +1 @@
+Hi, I'm a file.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/alternating/.dockerignore
new file mode 100644
index 0000000..2b380f4
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating/.dockerignore
@@ -0,0 +1,8 @@
+**
+!subdir
+subdir/subdir1
+!subdir/subdir1/subdir2
+subdir/subdir1/subdir2/subdir3
+!subdir/subdir1/subdir2/subdir3/subdir4
+subdir/subdir1/subdir2/subdir3/subdir4/subdir5
+!subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/alternating/Dockerfile
new file mode 100644
index 0000000..b6137c3
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating/Dockerfile
@@ -0,0 +1,8 @@
+FROM scratch
+ADD subdir /
+ADD subdir/subdir1 /
+ADD subdir/subdir1/subdir2 /
+ADD subdir/subdir1/subdir2/subdir3 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5 /
+ADD subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file /
diff --git a/tests/conformance/testdata/dockerignore/allowlist/alternating/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file b/tests/conformance/testdata/dockerignore/allowlist/alternating/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
new file mode 100644
index 0000000..98baad0
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/alternating/subdir/subdir1/subdir2/subdir3/subdir4/subdir5/file
@@ -0,0 +1 @@
+Hi, I'm a file.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/.dockerignore
new file mode 100644
index 0000000..1d085ca
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/.dockerignore
@@ -0,0 +1 @@
+**
diff --git a/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/Dockerfile
new file mode 100644
index 0000000..5a20aa2
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/nothing-dot/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch -t @1485449953 /file
+FROM scratch
+COPY --from=0 /file /
+ADD . .
diff --git a/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/.dockerignore
new file mode 100644
index 0000000..1d085ca
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/.dockerignore
@@ -0,0 +1 @@
+**
diff --git a/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/Dockerfile
new file mode 100644
index 0000000..27ef8f8
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/nothing-slash/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch -t @1485449953 /file
+FROM scratch
+COPY --from=0 /file /
+ADD / /
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-file/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/.dockerignore
new file mode 100644
index 0000000..59d3272
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/.dockerignore
@@ -0,0 +1,2 @@
+**
+!folder1/file1
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-file/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/Dockerfile
new file mode 100644
index 0000000..ad407b6
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+ADD folder1 /f1
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file1 b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file1
new file mode 100644
index 0000000..163d2b5
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file1
@@ -0,0 +1 @@
+Hi, I'm file 1.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file2 b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file2
new file mode 100644
index 0000000..f2c5375
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-file/folder1/file2
@@ -0,0 +1 @@
+Hi, I'm file 2.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/.dockerignore
new file mode 100644
index 0000000..f816333
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/.dockerignore
@@ -0,0 +1,2 @@
+**
+!folder1/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/Dockerfile
new file mode 100644
index 0000000..ad407b6
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+ADD folder1 /f1
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file1 b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file1
new file mode 100644
index 0000000..163d2b5
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file1
@@ -0,0 +1 @@
+Hi, I'm file 1.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file2 b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file2
new file mode 100644
index 0000000..f2c5375
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subdir-nofile/folder1/file2
@@ -0,0 +1 @@
+Hi, I'm file 2.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/.dockerignore
new file mode 100644
index 0000000..40075ce
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/.dockerignore
@@ -0,0 +1,2 @@
+**
+!folder/subfolder/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/Dockerfile
new file mode 100644
index 0000000..5a20aa2
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch -t @1485449953 /file
+FROM scratch
+COPY --from=0 /file /
+ADD . .
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/folder/subfolder/file b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/folder/subfolder/file
new file mode 100644
index 0000000..d1307ab
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-file/folder/subfolder/file
@@ -0,0 +1 @@
+I'm a file.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/.dockerignore
new file mode 100644
index 0000000..40075ce
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/.dockerignore
@@ -0,0 +1,2 @@
+**
+!folder/subfolder/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/Dockerfile
new file mode 100644
index 0000000..5a20aa2
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch -t @1485449953 /file
+FROM scratch
+COPY --from=0 /file /
+ADD . .
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/folder/file b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/folder/file
new file mode 100644
index 0000000..d1307ab
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nofile/folder/file
@@ -0,0 +1 @@
+I'm a file.
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/.dockerignore b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/.dockerignore
new file mode 100644
index 0000000..40075ce
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/.dockerignore
@@ -0,0 +1,2 @@
+**
+!folder/subfolder/file
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/Dockerfile b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/Dockerfile
new file mode 100644
index 0000000..5a20aa2
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+RUN touch -t @1485449953 /file
+FROM scratch
+COPY --from=0 /file /
+ADD . .
diff --git a/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/folder/file b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/folder/file
new file mode 100644
index 0000000..81b4ffd
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/allowlist/subsubdir-nosubdir/folder/file
@@ -0,0 +1 @@
+Hi, I'm a file
diff --git a/tests/conformance/testdata/dockerignore/empty/.dockerignore b/tests/conformance/testdata/dockerignore/empty/.dockerignore
new file mode 100644
index 0000000..6b9e0f8
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/empty/.dockerignore
@@ -0,0 +1 @@
+# Functionally empty.
diff --git a/tests/conformance/testdata/dockerignore/empty/Dockerfile b/tests/conformance/testdata/dockerignore/empty/Dockerfile
new file mode 100644
index 0000000..2b3f7bb
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/empty/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+COPY . subdir/
diff --git a/tests/conformance/testdata/dockerignore/empty/test1.txt b/tests/conformance/testdata/dockerignore/empty/test1.txt
new file mode 100644
index 0000000..a5bce3f
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/empty/test1.txt
@@ -0,0 +1 @@
+test1
diff --git a/tests/conformance/testdata/dockerignore/exceptions-skip/.dockerignore b/tests/conformance/testdata/dockerignore/exceptions-skip/.dockerignore
new file mode 100644
index 0000000..95a651c
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-skip/.dockerignore
@@ -0,0 +1,2 @@
+volume/
+!**/oneline.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-skip/Dockerfile b/tests/conformance/testdata/dockerignore/exceptions-skip/Dockerfile
new file mode 100644
index 0000000..5d7c700
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-skip/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+COPY ./ ./
diff --git a/tests/conformance/testdata/dockerignore/exceptions-skip/volume/data/oneline.txt b/tests/conformance/testdata/dockerignore/exceptions-skip/volume/data/oneline.txt
new file mode 100644
index 0000000..55e5952
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-skip/volume/data/oneline.txt
@@ -0,0 +1 @@
+one line of text
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/.dockerignore b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/.dockerignore
new file mode 100644
index 0000000..aeebb19
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/.dockerignore
@@ -0,0 +1,2 @@
+subdir
+!*/sub1*
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/Dockerfile b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/Dockerfile
new file mode 100644
index 0000000..732754d
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+COPY ./ /newdir
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub1.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub1.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub2.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub2.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub3.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub3.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-1/subdir/sub3.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/.dockerignore b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/.dockerignore
new file mode 100644
index 0000000..9dfeea2
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/.dockerignore
@@ -0,0 +1,3 @@
+subdir
+!*/sub1*
+!subdir/sub3*
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/Dockerfile b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/Dockerfile
new file mode 100644
index 0000000..732754d
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/Dockerfile
@@ -0,0 +1,2 @@
+FROM alpine
+COPY ./ /newdir
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub1.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub1.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub2.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub2.txt
diff --git a/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub3.txt b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub3.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/exceptions-weirdness-2/subdir/sub3.txt
diff --git a/tests/conformance/testdata/dockerignore/integration1/.dockerignore b/tests/conformance/testdata/dockerignore/integration1/.dockerignore
new file mode 100644
index 0000000..4cec094
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/.dockerignore
@@ -0,0 +1,6 @@
+# comment
+*
+test*
+!test2*
+subdir
+!*/sub1* \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/integration1/Dockerfile b/tests/conformance/testdata/dockerignore/integration1/Dockerfile
new file mode 100644
index 0000000..4d930ef
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/Dockerfile
@@ -0,0 +1,4 @@
+FROM alpine
+
+COPY ./ ./
+COPY subdir ./ \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/integration1/subdir/sub1.txt b/tests/conformance/testdata/dockerignore/integration1/subdir/sub1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/subdir/sub1.txt
diff --git a/tests/conformance/testdata/dockerignore/integration1/subdir/sub2.txt b/tests/conformance/testdata/dockerignore/integration1/subdir/sub2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/subdir/sub2.txt
diff --git a/tests/conformance/testdata/dockerignore/integration1/test1.txt b/tests/conformance/testdata/dockerignore/integration1/test1.txt
new file mode 100644
index 0000000..745eda7
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/test1.txt
@@ -0,0 +1 @@
+test1 failed \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/integration1/test2.txt b/tests/conformance/testdata/dockerignore/integration1/test2.txt
new file mode 100644
index 0000000..4e4d75d
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration1/test2.txt
@@ -0,0 +1 @@
+test2 failed \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/integration2/.dockerignore b/tests/conformance/testdata/dockerignore/integration2/.dockerignore
new file mode 100644
index 0000000..7cf9e4b
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration2/.dockerignore
@@ -0,0 +1 @@
+unmatched
diff --git a/tests/conformance/testdata/dockerignore/integration2/Dockerfile b/tests/conformance/testdata/dockerignore/integration2/Dockerfile
new file mode 100644
index 0000000..04fdc27
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration2/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+COPY . .
diff --git a/tests/conformance/testdata/dockerignore/integration2/subdir/sub1.txt b/tests/conformance/testdata/dockerignore/integration2/subdir/sub1.txt
new file mode 100644
index 0000000..6b2fffa
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration2/subdir/sub1.txt
@@ -0,0 +1 @@
+sub1 \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/integration2/subdir/subsubdir/subsub1.txt b/tests/conformance/testdata/dockerignore/integration2/subdir/subsubdir/subsub1.txt
new file mode 100644
index 0000000..652f1b6
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration2/subdir/subsubdir/subsub1.txt
@@ -0,0 +1 @@
+subsub1
diff --git a/tests/conformance/testdata/dockerignore/integration3/.dockerignore b/tests/conformance/testdata/dockerignore/integration3/.dockerignore
new file mode 100644
index 0000000..5bfce4d
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/.dockerignore
@@ -0,0 +1,10 @@
+# comment
+*
+!test*
+!src
+**/*.in
+src/etc
+*.md
+!README*.md
+README-secret.md
+test1.txt
diff --git a/tests/conformance/testdata/dockerignore/integration3/BUILD.md b/tests/conformance/testdata/dockerignore/integration3/BUILD.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/BUILD.md
diff --git a/tests/conformance/testdata/dockerignore/integration3/COPYRIGHT b/tests/conformance/testdata/dockerignore/integration3/COPYRIGHT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/COPYRIGHT
diff --git a/tests/conformance/testdata/dockerignore/integration3/Dockerfile b/tests/conformance/testdata/dockerignore/integration3/Dockerfile
new file mode 100644
index 0000000..48a8a79
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/Dockerfile
@@ -0,0 +1,5 @@
+FROM busybox
+COPY . /upload/
+COPY src /upload/src2/
+COPY test1.txt /upload/test1.txt
+RUN echo "CUT HERE"; /bin/find /upload | LANG=en_US.UTF-8 sort; echo "CUT HERE"
diff --git a/tests/conformance/testdata/dockerignore/integration3/LICENSE b/tests/conformance/testdata/dockerignore/integration3/LICENSE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/LICENSE
diff --git a/tests/conformance/testdata/dockerignore/integration3/README-secret.md b/tests/conformance/testdata/dockerignore/integration3/README-secret.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/README-secret.md
diff --git a/tests/conformance/testdata/dockerignore/integration3/README.md b/tests/conformance/testdata/dockerignore/integration3/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/README.md
diff --git a/tests/conformance/testdata/dockerignore/integration3/manifest b/tests/conformance/testdata/dockerignore/integration3/manifest
new file mode 100644
index 0000000..0ada42d
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/manifest
@@ -0,0 +1,16 @@
+/upload
+/upload/README.md
+/upload/src
+/upload/src/Makefile
+/upload/src/cmd
+/upload/src/cmd/Makefile
+/upload/src/lib
+/upload/src/lib/Makefile
+/upload/src2
+/upload/src2/Makefile
+/upload/src2/cmd
+/upload/src2/cmd/Makefile
+/upload/src2/lib
+/upload/src2/lib/Makefile
+/upload/test2.txt
+/upload/test3.txt
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/Makefile b/tests/conformance/testdata/dockerignore/integration3/src/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/Makefile
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/cmd/Makefile b/tests/conformance/testdata/dockerignore/integration3/src/cmd/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/cmd/Makefile
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/cmd/main.in b/tests/conformance/testdata/dockerignore/integration3/src/cmd/main.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/cmd/main.in
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf b/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf.d/dropin.conf b/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf.d/dropin.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/etc/foo.conf.d/dropin.conf
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/lib/Makefile b/tests/conformance/testdata/dockerignore/integration3/src/lib/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/lib/Makefile
diff --git a/tests/conformance/testdata/dockerignore/integration3/src/lib/framework.in b/tests/conformance/testdata/dockerignore/integration3/src/lib/framework.in
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/src/lib/framework.in
diff --git a/tests/conformance/testdata/dockerignore/integration3/test1.txt b/tests/conformance/testdata/dockerignore/integration3/test1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/test1.txt
diff --git a/tests/conformance/testdata/dockerignore/integration3/test2.txt b/tests/conformance/testdata/dockerignore/integration3/test2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/test2.txt
diff --git a/tests/conformance/testdata/dockerignore/integration3/test3.txt b/tests/conformance/testdata/dockerignore/integration3/test3.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/integration3/test3.txt
diff --git a/tests/conformance/testdata/dockerignore/minimal_test/.dockerignore b/tests/conformance/testdata/dockerignore/minimal_test/.dockerignore
new file mode 100644
index 0000000..84b10fa
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/minimal_test/.dockerignore
@@ -0,0 +1,2 @@
+stuff/huge/*
+!stuff/huge/usr/bin/*
diff --git a/tests/conformance/testdata/dockerignore/minimal_test/Dockerfile b/tests/conformance/testdata/dockerignore/minimal_test/Dockerfile
new file mode 100644
index 0000000..4ab9ff8
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/minimal_test/Dockerfile
@@ -0,0 +1,3 @@
+FROM scratch
+
+COPY stuff /tmp/stuff
diff --git a/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file1 b/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file1
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file1
diff --git a/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file2 b/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/minimal_test/stuff/huge/usr/bin/file2
diff --git a/tests/conformance/testdata/dockerignore/populated/.dotfile-a.txt b/tests/conformance/testdata/dockerignore/populated/.dotfile-a.txt
new file mode 100644
index 0000000..440b793
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/.dotfile-a.txt
@@ -0,0 +1 @@
+dotfile-a
diff --git a/tests/conformance/testdata/dockerignore/populated/file-a.txt b/tests/conformance/testdata/dockerignore/populated/file-a.txt
new file mode 100644
index 0000000..2f76e89
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/file-a.txt
@@ -0,0 +1 @@
+file-a
diff --git a/tests/conformance/testdata/dockerignore/populated/file-b.txt b/tests/conformance/testdata/dockerignore/populated/file-b.txt
new file mode 100644
index 0000000..3b8ef5a
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/file-b.txt
@@ -0,0 +1 @@
+file-b
diff --git a/tests/conformance/testdata/dockerignore/populated/file-c.txt b/tests/conformance/testdata/dockerignore/populated/file-c.txt
new file mode 100644
index 0000000..28e1f44
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/file-c.txt
@@ -0,0 +1 @@
+file-c
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-b/.dotfile-b.txt b/tests/conformance/testdata/dockerignore/populated/subdir-b/.dotfile-b.txt
new file mode 100644
index 0000000..3711553
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-b/.dotfile-b.txt
@@ -0,0 +1 @@
+dotfile-b
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-c/file-b.txt b/tests/conformance/testdata/dockerignore/populated/subdir-c/file-b.txt
new file mode 120000
index 0000000..9e9b4e7
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-c/file-b.txt
@@ -0,0 +1 @@
+../file-a.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-c/file-c.txt b/tests/conformance/testdata/dockerignore/populated/subdir-c/file-c.txt
new file mode 120000
index 0000000..16c7e25
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-c/file-c.txt
@@ -0,0 +1 @@
+../file-b.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/file-a.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-a.txt
new file mode 120000
index 0000000..9e9b4e7
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-a.txt
@@ -0,0 +1 @@
+../file-a.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/file-b.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-b.txt
new file mode 120000
index 0000000..16c7e25
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-b.txt
@@ -0,0 +1 @@
+../file-b.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/file-n.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-n.txt
new file mode 100644
index 0000000..a2ee06e
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/file-n.txt
@@ -0,0 +1 @@
+file-n
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-a.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-a.txt
new file mode 120000
index 0000000..b6df486
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-a.txt
@@ -0,0 +1 @@
+../../file-a.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-n.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-n.txt
new file mode 120000
index 0000000..2ac6b68
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-n.txt
@@ -0,0 +1 @@
+../file-n.txt \ No newline at end of file
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-o.txt b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-o.txt
new file mode 100644
index 0000000..0b72fad
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/file-o.txt
@@ -0,0 +1 @@
+file-o
diff --git a/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/subdir-g/subdir-b b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/subdir-g/subdir-b
new file mode 120000
index 0000000..1e44e12
--- /dev/null
+++ b/tests/conformance/testdata/dockerignore/populated/subdir-e/subdir-f/subdir-g/subdir-b
@@ -0,0 +1 @@
+../../../subdir-b \ No newline at end of file
diff --git a/tests/conformance/testdata/env/precedence/Dockerfile b/tests/conformance/testdata/env/precedence/Dockerfile
new file mode 100644
index 0000000..ce9056f
--- /dev/null
+++ b/tests/conformance/testdata/env/precedence/Dockerfile
@@ -0,0 +1,7 @@
+# syntax=docker/dockerfile:1.4
+FROM busybox
+ENV a=b
+ENV c=d
+RUN echo E=$E G=$G
+ENV E=E G=G
+RUN echo E=$E G=$G
diff --git a/tests/conformance/testdata/heredoc/Dockerfile.heredoc_copy b/tests/conformance/testdata/heredoc/Dockerfile.heredoc_copy
new file mode 100644
index 0000000..227f71b
--- /dev/null
+++ b/tests/conformance/testdata/heredoc/Dockerfile.heredoc_copy
@@ -0,0 +1,23 @@
+# syntax=docker/dockerfile:1.3-labs
+FROM busybox as one
+RUN echo helloworld > image_file
+FROM busybox
+RUN echo hello
+# copy two heredoc and one from context
+COPY <<robots.txt <<humans.txt file /test/
+(robots content)
+Long file with random text
+Random line
+HelloWorld
+robots.txt
+(humans content)
+humans.txt
+# copy two heredoc and one from another stage
+COPY --from=one image_file <<robots.txt <<humans.txt /test2/
+(robots content)
+Long file with random text
+Random line
+HelloWorld
+robots.txt
+(humans content)
+humans.txt
diff --git a/tests/conformance/testdata/heredoc/file b/tests/conformance/testdata/heredoc/file
new file mode 100644
index 0000000..ebf038b
--- /dev/null
+++ b/tests/conformance/testdata/heredoc/file
@@ -0,0 +1 @@
+somefile
diff --git a/tests/conformance/testdata/mount/Dockerfile b/tests/conformance/testdata/mount/Dockerfile
new file mode 100644
index 0000000..6417ece
--- /dev/null
+++ b/tests/conformance/testdata/mount/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+RUN stat -c "%s %n %a %F %g %u" /tmp/test/*
diff --git a/tests/conformance/testdata/mount/file b/tests/conformance/testdata/mount/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/tests/conformance/testdata/mount/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/tests/conformance/testdata/mount/file2 b/tests/conformance/testdata/mount/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/tests/conformance/testdata/mount/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/tests/conformance/testdata/overlapdirwithoutslash/Dockerfile b/tests/conformance/testdata/overlapdirwithoutslash/Dockerfile
new file mode 100644
index 0000000..0833b15
--- /dev/null
+++ b/tests/conformance/testdata/overlapdirwithoutslash/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+COPY existing . \ No newline at end of file
diff --git a/tests/conformance/testdata/overlapdirwithoutslash/existing/etc/file-in-existing-dir b/tests/conformance/testdata/overlapdirwithoutslash/existing/etc/file-in-existing-dir
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/overlapdirwithoutslash/existing/etc/file-in-existing-dir
diff --git a/tests/conformance/testdata/overlapdirwithslash/Dockerfile b/tests/conformance/testdata/overlapdirwithslash/Dockerfile
new file mode 100644
index 0000000..81988db
--- /dev/null
+++ b/tests/conformance/testdata/overlapdirwithslash/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+COPY existing/ . \ No newline at end of file
diff --git a/tests/conformance/testdata/overlapdirwithslash/existing/etc/file-in-existing-dir b/tests/conformance/testdata/overlapdirwithslash/existing/etc/file-in-existing-dir
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/overlapdirwithslash/existing/etc/file-in-existing-dir
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile
new file mode 100644
index 0000000..11234bd
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile
@@ -0,0 +1,3 @@
+FROM scratch
+COPY tree1/ /
+COPY tree2/ /
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.2 b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.2
new file mode 100644
index 0000000..9d2df9f
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.2
@@ -0,0 +1,3 @@
+FROM scratch
+COPY tree2/ /
+COPY tree1/ /
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.3 b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.3
new file mode 100644
index 0000000..16005a2
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.3
@@ -0,0 +1,3 @@
+FROM scratch
+COPY tree2/ /tree/
+COPY tree1/ /tree/
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.4 b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.4
new file mode 100644
index 0000000..b79df35
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/Dockerfile.4
@@ -0,0 +1,3 @@
+FROM scratch
+COPY tree1/ /tree/
+COPY tree2/ /tree/
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/tree1/directory/file-in-directory b/tests/conformance/testdata/replace/symlink-with-directory/tree1/directory/file-in-directory
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/tree1/directory/file-in-directory
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/tree1/maybe-directory b/tests/conformance/testdata/replace/symlink-with-directory/tree1/maybe-directory
new file mode 120000
index 0000000..6d0450c
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/tree1/maybe-directory
@@ -0,0 +1 @@
+directory \ No newline at end of file
diff --git a/tests/conformance/testdata/replace/symlink-with-directory/tree2/maybe-directory/file-in-maybe-directory b/tests/conformance/testdata/replace/symlink-with-directory/tree2/maybe-directory/file-in-maybe-directory
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/replace/symlink-with-directory/tree2/maybe-directory/file-in-maybe-directory
diff --git a/tests/conformance/testdata/subdir/subdir/Dockerfile b/tests/conformance/testdata/subdir/subdir/Dockerfile
new file mode 100644
index 0000000..04fdc27
--- /dev/null
+++ b/tests/conformance/testdata/subdir/subdir/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+COPY . .
diff --git a/tests/conformance/testdata/tar-g/Dockerfile b/tests/conformance/testdata/tar-g/Dockerfile
new file mode 100644
index 0000000..c7e5771
--- /dev/null
+++ b/tests/conformance/testdata/tar-g/Dockerfile
@@ -0,0 +1,2 @@
+FROM scratch
+ADD content.tar.gz /
diff --git a/tests/conformance/testdata/tar-g/content.sh b/tests/conformance/testdata/tar-g/content.sh
new file mode 100755
index 0000000..5ec2666
--- /dev/null
+++ b/tests/conformance/testdata/tar-g/content.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Generate a digest in an effort to create something that looks like headers in
+# https://www.openssl.org/source/openssl-1.1.1g.tar.gz, per #2717.
+comment=$(sha1sum content.txt)
+comment="${comment// *}"
+# Expects GNU tar.
+gtar --pax-option=globexthdr.comment="$comment" -czf content.tar.gz content.txt
diff --git a/tests/conformance/testdata/tar-g/content.tar.gz b/tests/conformance/testdata/tar-g/content.tar.gz
new file mode 100644
index 0000000..8cea49c
--- /dev/null
+++ b/tests/conformance/testdata/tar-g/content.tar.gz
Binary files differ
diff --git a/tests/conformance/testdata/tar-g/content.txt b/tests/conformance/testdata/tar-g/content.txt
new file mode 100644
index 0000000..1629c50
--- /dev/null
+++ b/tests/conformance/testdata/tar-g/content.txt
@@ -0,0 +1 @@
+Look at me, I'm content!
diff --git a/tests/conformance/testdata/transientmount/Dockerfile b/tests/conformance/testdata/transientmount/Dockerfile
new file mode 100644
index 0000000..5accfd4
--- /dev/null
+++ b/tests/conformance/testdata/transientmount/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+RUN ls /mountdir/subdir
+RUN cat /mountfile
diff --git a/tests/conformance/testdata/transientmount/Dockerfile.env b/tests/conformance/testdata/transientmount/Dockerfile.env
new file mode 100644
index 0000000..21e7c4b
--- /dev/null
+++ b/tests/conformance/testdata/transientmount/Dockerfile.env
@@ -0,0 +1,23 @@
+FROM busybox
+ENV name value
+ENV name=value
+ENV name=value name2=value2
+ENV name="value value1"
+ENV name=value\ value2
+ENV name="value'quote space'value2"
+ENV name='value"double quote"value2'
+ENV name=value\ value2 name2=value2\ value3
+ENV name="a\"b"
+ENV name="a\'b"
+ENV name='a\'b'
+ENV name='a\'b''
+ENV name='a\"b'
+ENV name="''"
+# don't put anything after the next line - it must be the last line of the
+# Dockerfile and it must end with \
+ENV name=value \
+ name1=value1 \
+ name2="value2a \
+ value2b" \
+ name3="value3a\n\"value3b\"" \
+ name4="value4a\\nvalue4b" \ \ No newline at end of file
diff --git a/tests/conformance/testdata/transientmount/file b/tests/conformance/testdata/transientmount/file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/transientmount/file
diff --git a/tests/conformance/testdata/transientmount/subdir/file2 b/tests/conformance/testdata/transientmount/subdir/file2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/conformance/testdata/transientmount/subdir/file2
diff --git a/tests/conformance/testdata/volume/Dockerfile b/tests/conformance/testdata/volume/Dockerfile
new file mode 100644
index 0000000..f7cc0d3
--- /dev/null
+++ b/tests/conformance/testdata/volume/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox
+
+ADD file /var/www/
+VOLUME /var/www
+ADD file /var/
+VOLUME /var
+ADD file2 /var/ \ No newline at end of file
diff --git a/tests/conformance/testdata/volume/file b/tests/conformance/testdata/volume/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/tests/conformance/testdata/volume/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/tests/conformance/testdata/volume/file2 b/tests/conformance/testdata/volume/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/tests/conformance/testdata/volume/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/tests/conformance/testdata/volumerun/Dockerfile b/tests/conformance/testdata/volumerun/Dockerfile
new file mode 100644
index 0000000..2cc82f8
--- /dev/null
+++ b/tests/conformance/testdata/volumerun/Dockerfile
@@ -0,0 +1,7 @@
+FROM busybox
+
+ADD file /var/www/
+VOLUME /var/www
+ADD file2 /var/www/
+RUN touch /var/www/file3
+ADD file4 /var/www/ \ No newline at end of file
diff --git a/tests/conformance/testdata/volumerun/file b/tests/conformance/testdata/volumerun/file
new file mode 100644
index 0000000..1a010b1
--- /dev/null
+++ b/tests/conformance/testdata/volumerun/file
@@ -0,0 +1 @@
+file \ No newline at end of file
diff --git a/tests/conformance/testdata/volumerun/file2 b/tests/conformance/testdata/volumerun/file2
new file mode 100644
index 0000000..30d67d4
--- /dev/null
+++ b/tests/conformance/testdata/volumerun/file2
@@ -0,0 +1 @@
+file2 \ No newline at end of file
diff --git a/tests/conformance/testdata/volumerun/file4 b/tests/conformance/testdata/volumerun/file4
new file mode 100644
index 0000000..eed6780
--- /dev/null
+++ b/tests/conformance/testdata/volumerun/file4
@@ -0,0 +1 @@
+file4 \ No newline at end of file
diff --git a/tests/conformance/testdata/wildcard/Dockerfile b/tests/conformance/testdata/wildcard/Dockerfile
new file mode 100644
index 0000000..a13cc56
--- /dev/null
+++ b/tests/conformance/testdata/wildcard/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ENV DIR=/usr
+ADD dir2/*.b dir2/*.c $DIR/test/
diff --git a/tests/conformance/testdata/wildcard/dir2/file.a b/tests/conformance/testdata/wildcard/dir2/file.a
new file mode 100644
index 0000000..d26db9c
--- /dev/null
+++ b/tests/conformance/testdata/wildcard/dir2/file.a
@@ -0,0 +1 @@
+file.a \ No newline at end of file
diff --git a/tests/conformance/testdata/wildcard/dir2/file.b b/tests/conformance/testdata/wildcard/dir2/file.b
new file mode 100644
index 0000000..5c3dc17
--- /dev/null
+++ b/tests/conformance/testdata/wildcard/dir2/file.b
@@ -0,0 +1 @@
+file.b \ No newline at end of file
diff --git a/tests/conformance/testdata/wildcard/dir2/file.c b/tests/conformance/testdata/wildcard/dir2/file.c
new file mode 100644
index 0000000..e0c7bb2
--- /dev/null
+++ b/tests/conformance/testdata/wildcard/dir2/file.c
@@ -0,0 +1 @@
+file.c \ No newline at end of file
diff --git a/tests/conformance/testdata/wildcard/dir2/file2.b b/tests/conformance/testdata/wildcard/dir2/file2.b
new file mode 100644
index 0000000..9e43652
--- /dev/null
+++ b/tests/conformance/testdata/wildcard/dir2/file2.b
@@ -0,0 +1 @@
+file2.b \ No newline at end of file
diff --git a/tests/containers.bats b/tests/containers.bats
new file mode 100644
index 0000000..dcca0c3
--- /dev/null
+++ b/tests/containers.bats
@@ -0,0 +1,82 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "containers" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ run_buildah containers
+ expect_line_count 3
+}
+
+@test "containers filter test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah containers --filter name=$cid1
+ expect_line_count 2
+}
+
+@test "containers format test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ run_buildah containers --format "{{.ContainerName}}"
+ expect_line_count 2
+ expect_output --from="${lines[0]}" "alpine-working-container"
+ expect_output --from="${lines[1]}" "busybox-working-container"
+}
+
+@test "containers json test" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah containers --json
+ expect_output --substring '\{'
+}
+
+@test "containers noheading test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ run_buildah containers --noheading
+ expect_line_count 2
+ if [[ $output =~ "NAME" ]]; then
+ expect_output "[no instance of 'NAME']" "'NAME' header should be absent"
+ fi
+}
+
+@test "containers quiet test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ run_buildah containers --quiet
+ expect_line_count 2
+
+ # Both lines should be CIDs and nothing else.
+ expect_output --substring --from="${lines[0]}" '^[0-9a-f]{64}$'
+ expect_output --substring --from="${lines[1]}" '^[0-9a-f]{64}$'
+}
+
+@test "containers notruncate test" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah containers --notruncate
+ expect_line_count 2
+ expect_output --substring --from="${lines[1]}" '^[0-9a-f]{64}'
+}
+
+@test "containers all test" {
+ skip_if_in_container
+ skip_if_no_podman
+
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ podman create --root ${TEST_SCRATCH_DIR}/root --storage-driver ${STORAGE_DRIVER} --net=host busybox ls
+ run_buildah containers
+ expect_line_count 2
+ run_buildah containers -a
+ expect_line_count 3
+}
diff --git a/tests/containers.conf b/tests/containers.conf
new file mode 100644
index 0000000..54a3177
--- /dev/null
+++ b/tests/containers.conf
@@ -0,0 +1,65 @@
+[containers]
+
+# A list of ulimits to be set in containers by default, specified as
+# "<ulimit name>=<soft limit>:<hard limit>", for example:
+# "nofile=1024:2048"
+# See setrlimit(2) for a list of resource names.
+# Any limit not specified here will be inherited from the process launching the
+# container engine.
+# Ulimits has limits for non privileged container engines.
+#
+default_ulimits = [
+ "nofile=500:500",
+]
+
+# Environment variable list for the conmon process; used for passing necessary
+# environment variables to conmon or the runtime.
+#
+env = [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "foo=bar",
+]
+
+# container engines use container separation using MAC(SELinux) labeling.
+# Flag is ignored on label disabled systems.
+#
+label = true
+
+# Size of /dev/shm. Specified as <number><unit>.
+# Unit is optional, values:
+# b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
+# If the unit is omitted, the system uses bytes.
+#
+shm_size = "200k"
+
+# List of additional devices. Specified as
+# "<device-on-host>:<device-on-container>:<permissions>", for example:
+# "/dev/sdc:/dev/xvdc:rwm".
+# If it is empty or commented out, only the default devices will be used
+#
+devices = [
+]
+
+# List of default capabilities for containers. If it is empty or commented out,
+# the default capabilities defined in the container engine will be added.
+#
+default_capabilities = [
+ "AUDIT_WRITE",
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FOWNER",
+ "FSETID",
+ "KILL",
+ "MKNOD",
+ "NET_BIND_SERVICE",
+ "NET_RAW",
+ "SETFCAP",
+ "SETGID",
+ "SETPCAP",
+ "SETUID",
+ "SYS_CHROOT",
+]
+
+default_sysctls = [
+ "net.ipv4.ping_group_range=0 0",
+]
diff --git a/tests/containers_conf.bats b/tests/containers_conf.bats
new file mode 100644
index 0000000..e90de66
--- /dev/null
+++ b/tests/containers_conf.bats
@@ -0,0 +1,140 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "containers.conf selinux test" {
+ if ! which selinuxenabled > /dev/null 2> /dev/null ; then
+ skip "No selinuxenabled executable"
+ elif ! selinuxenabled ; then
+ skip "selinux is disabled"
+ fi
+
+ _prefetch alpine
+ cid=$(buildah from $WITH_POLICY_JSON alpine)
+ run_buildah --log-level=error run $cid sh -c "cat /proc/self/attr/current | grep container_t"
+
+ run_buildah rm $cid
+
+ sed "s/^label = true/label = false/g" ${TEST_SOURCES}/containers.conf > ${TEST_SCRATCH_DIR}/containers.conf
+ cid=$(buildah from $WITH_POLICY_JSON alpine)
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah 1 --log-level=error run $cid sh -c "cat /proc/self/attr/current | grep container_t"
+}
+
+@test "containers.conf ulimit test" {
+ if test "$BUILDAH_ISOLATION" = "chroot" -o "$BUILDAH_ISOLATION" = "rootless" ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ _prefetch alpine
+ cid=$(buildah from $WITH_POLICY_JSON alpine)
+ run_buildah --log-level=error run $cid awk '/open files/{print $4}' /proc/self/limits
+ expect_output "500" "limits: open files (w/file limit)"
+
+ cid=$(buildah from --ulimit nofile=300:400 $WITH_POLICY_JSON alpine)
+ run_buildah --log-level=error run $cid awk '/open files/{print $4}' /proc/self/limits
+ expect_output "300" "limits: open files (w/file limit)"
+}
+
+@test "containers.conf additional devices test" {
+ skip_if_rootless_environment
+ if test "$BUILDAH_ISOLATION" = "chroot" -o "$BUILDAH_ISOLATION" = "rootless" ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ _prefetch alpine
+ cid=$(buildah from $WITH_POLICY_JSON alpine)
+ CONTAINERS_CONF=$CONTAINERS_CONF run_buildah 1 --log-level=error run $cid ls /dev/foo1
+ run_buildah rm $cid
+
+ sed '/^devices.*/a "\/dev\/foo:\/dev\/foo1:rmw",' ${TEST_SOURCES}/containers.conf > ${TEST_SCRATCH_DIR}/containers.conf
+ rm -f /dev/foo; mknod /dev/foo c 1 1
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid="$output"
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah --log-level=error run $cid ls /dev/foo1
+ rm -f /dev/foo
+}
+
+@test "containers.conf capabilities test" {
+ _prefetch alpine
+
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid="$output"
+ run_buildah --log-level=error run $cid sh -c 'grep CapEff /proc/self/status | cut -f2'
+ CapEff="$output"
+ expect_output "00000000a80425fb"
+ run_buildah rm $cid
+
+ sed "/AUDIT_WRITE/d" ${TEST_SOURCES}/containers.conf > ${TEST_SCRATCH_DIR}/containers.conf
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid="$output"
+
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah --log-level=error run $cid sh -c 'grep CapEff /proc/self/status | cut -f2'
+ run_buildah rm $cid
+
+ test "$output" != "$CapEff"
+}
+
+@test "containers.conf /dev/shm test" {
+ if test "$BUILDAH_ISOLATION" = "chroot" -o "$BUILDAH_ISOLATION" = "rootless" ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ _prefetch alpine
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid="$output"
+ run_buildah --log-level=error run $cid sh -c 'df /dev/shm | awk '\''/shm/{print $4}'\'''
+ expect_output "200"
+}
+
+@test "containers.conf custom runtime" {
+ if test "$BUILDAH_ISOLATION" = "chroot" -o "$BUILDAH_ISOLATION" = "rootless" ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ test -x /usr/bin/crun || skip "/usr/bin/crun doesn't exist"
+
+ ln -s /usr/bin/crun ${TEST_SCRATCH_DIR}/runtime
+
+ cat >${TEST_SCRATCH_DIR}/containers.conf << EOF
+[engine]
+runtime = "nonstandard_runtime_name"
+[engine.runtimes]
+nonstandard_runtime_name = ["${TEST_SCRATCH_DIR}/runtime"]
+EOF
+
+ _prefetch alpine
+ cid=$(buildah from $WITH_POLICY_JSON alpine)
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah --log-level=error run $cid true
+}
+
+@test "containers.conf network sysctls" {
+ if test "$BUILDAH_ISOLATION" = "chroot" ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ cat >${TEST_SCRATCH_DIR}/containers.conf << EOF
+[containers]
+default_sysctls = [
+ "net.ipv4.tcp_timestamps=123"
+]
+EOF
+ _prefetch alpine
+ cat >${TEST_SCRATCH_DIR}/Containerfile << _EOF
+FROM alpine
+RUN echo -n "timestamp="; cat /proc/sys/net/ipv4/tcp_timestamps
+RUN echo -n "ping_group_range="; cat /proc/sys/net/ipv4/ping_group_range
+_EOF
+
+ run_buildah build ${TEST_SCRATCH_DIR}
+ expect_output --substring "timestamp=1"
+ expect_output --substring "ping_group_range=0.*0"
+
+ CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf run_buildah build ${TEST_SCRATCH_DIR}
+ expect_output --substring "timestamp=123"
+ if is_rootless ; then
+ expect_output --substring "ping_group_range=65534.*65534"
+ else
+ expect_output --substring "ping_group_range=1.*0"
+ fi
+
+}
diff --git a/tests/copy.bats b/tests/copy.bats
new file mode 100644
index 0000000..ad9f31d
--- /dev/null
+++ b/tests/copy.bats
@@ -0,0 +1,539 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "copy-flags-order-verification" {
+ run_buildah 125 copy container1 -q /tmp/container1
+ check_options_flag_err "-q"
+
+ run_buildah 125 copy container1 --chown /tmp/container1 --quiet
+ check_options_flag_err "--chown"
+
+ run_buildah 125 copy container1 /tmp/container1 --quiet
+ check_options_flag_err "--quiet"
+}
+
+@test "copy-local-multiple" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+ createrandom ${TEST_SCRATCH_DIR}/third-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ # copy ${TEST_SCRATCH_DIR}/randomfile to a file of the same name in the container's working directory
+ run_buildah copy --retry 4 --retry-delay 4s $cid ${TEST_SCRATCH_DIR}/randomfile
+ # copy ${TEST_SCRATCH_DIR}/other-randomfile and ${TEST_SCRATCH_DIR}/third-randomfile to a new directory named ${TEST_SCRATCH_DIR}/randomfile in the container
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/other-randomfile ${TEST_SCRATCH_DIR}/third-randomfile ${TEST_SCRATCH_DIR}/randomfile
+ # try to copy ${TEST_SCRATCH_DIR}/other-randomfile and ${TEST_SCRATCH_DIR}/third-randomfile to a /randomfile, which already exists and is a file
+ run_buildah 125 copy $cid ${TEST_SCRATCH_DIR}/other-randomfile ${TEST_SCRATCH_DIR}/third-randomfile /randomfile
+ # copy ${TEST_SCRATCH_DIR}/other-randomfile and ${TEST_SCRATCH_DIR}/third-randomfile to previously-created directory named ${TEST_SCRATCH_DIR}/randomfile in the container
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/other-randomfile ${TEST_SCRATCH_DIR}/third-randomfile ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah rm $cid
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/other-randomfile ${TEST_SCRATCH_DIR}/third-randomfile ${TEST_SCRATCH_DIR}/randomfile /etc
+ run_buildah rm $cid
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid "${TEST_SCRATCH_DIR}/*randomfile" /etc
+ (cd ${TEST_SCRATCH_DIR}; for i in *randomfile; do cmp $i ${root}/etc/$i; done)
+}
+
+@test "copy-local-plain" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+ createrandom ${TEST_SCRATCH_DIR}/third-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/other-randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/randomfile
+ test -s $newroot/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/other-randomfile $newroot/other-randomfile
+}
+
+@test "copy-local-subdirectory" {
+ mkdir -p ${TEST_SCRATCH_DIR}/subdir
+ createrandom ${TEST_SCRATCH_DIR}/subdir/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/subdir/other-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --workingdir /container-subdir $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/subdir
+ run_buildah mount $cid
+ root=$output
+ test -s $root/container-subdir/randomfile
+ cmp ${TEST_SCRATCH_DIR}/subdir/randomfile $root/container-subdir/randomfile
+ test -s $root/container-subdir/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/subdir/other-randomfile $root/container-subdir/other-randomfile
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/subdir /other-subdir
+ test -s $root/other-subdir/randomfile
+ cmp ${TEST_SCRATCH_DIR}/subdir/randomfile $root/other-subdir/randomfile
+ test -s $root/other-subdir/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/subdir/other-randomfile $root/other-subdir/other-randomfile
+}
+
+@test "copy-local-force-directory" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/randomfile /randomfile
+ run_buildah mount $cid
+ root=$output
+ test -s $root/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $root/randomfile
+ run_buildah rm $cid
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/randomfile /randomsubdir/
+ run_buildah mount $cid
+ root=$output
+ test -s $root/randomsubdir/randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $root/randomsubdir/randomfile
+}
+
+@test "copy-url-mtime" {
+ # Create a file with random content and a non-now timestamp (so we can
+ # can trust that buildah correctly set mtime on copy)
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ touch -t 201910310123.45 ${TEST_SCRATCH_DIR}/randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah config --workingdir / $cid
+ starthttpd ${TEST_SCRATCH_DIR}
+ run_buildah copy $cid http://0.0.0.0:${HTTP_SERVER_PORT}/randomfile /urlfile
+ stophttpd
+ run_buildah mount $cid
+ root=$output
+ test -s $root/urlfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $root/urlfile
+
+ # Compare timestamps. Display them in human-readable form, so if there's
+ # a mismatch it will be shown in the test log.
+ mtime_randomfile=$(stat --format %y ${TEST_SCRATCH_DIR}/randomfile)
+ mtime_urlfile=$(stat --format %y $root/urlfile)
+
+ expect_output --from="$mtime_randomfile" "$mtime_urlfile" "mtime[randomfile] == mtime[urlfile]"
+}
+
+@test "copy --chown" {
+ mkdir -p ${TEST_SCRATCH_DIR}/subdir
+ mkdir -p ${TEST_SCRATCH_DIR}/other-subdir
+ createrandom ${TEST_SCRATCH_DIR}/subdir/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/subdir/other-randomfile
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-subdir/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-subdir/other-randomfile
+
+ _prefetch alpine
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy --chown 1:1 $cid ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah copy --chown root:1 $cid ${TEST_SCRATCH_DIR}/randomfile /randomfile2
+ run_buildah copy --chown nobody $cid ${TEST_SCRATCH_DIR}/randomfile /randomfile3
+ run_buildah copy --chown nobody:root $cid ${TEST_SCRATCH_DIR}/subdir /subdir
+ run_buildah run $cid stat -c "%u:%g" /randomfile
+ expect_output "1:1" "stat ug /randomfile"
+
+ run_buildah run $cid stat -c "%U:%g" /randomfile2
+ expect_output "root:1" "stat Ug /randomfile2"
+
+ run_buildah run $cid stat -c "%U" /randomfile3
+ expect_output "nobody" "stat U /randomfile3"
+
+ for i in randomfile other-randomfile ; do
+ run_buildah run $cid stat -c "%U:%G" /subdir/$i
+ expect_output "nobody:root" "stat UG /subdir/$i"
+ done
+
+ # subdir will have been implicitly created, and the --chown should have had an effect
+ run_buildah run $cid stat -c "%U:%G" /subdir
+ expect_output "nobody:root" "stat UG /subdir"
+
+ run_buildah copy --chown root:root $cid ${TEST_SCRATCH_DIR}/other-subdir /subdir
+ for i in randomfile other-randomfile ; do
+ run_buildah run $cid stat -c "%U:%G" /subdir/$i
+ expect_output "root:root" "stat UG /subdir/$i (after chown)"
+ done
+
+ # subdir itself will have not been copied (the destination directory was created implicitly), so its permissions should not have changed
+ run_buildah run $cid stat -c "%U:%G" /subdir
+ expect_output "nobody:root" "stat UG /subdir"
+}
+
+@test "copy --chmod" {
+ mkdir -p ${TEST_SCRATCH_DIR}/subdir
+ mkdir -p ${TEST_SCRATCH_DIR}/other-subdir
+ createrandom ${TEST_SCRATCH_DIR}/subdir/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/subdir/other-randomfile
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-subdir/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-subdir/other-randomfile
+
+ _prefetch alpine
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy --chmod 777 $cid ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah copy --chmod 700 $cid ${TEST_SCRATCH_DIR}/randomfile /randomfile2
+ run_buildah copy --chmod 755 $cid ${TEST_SCRATCH_DIR}/randomfile /randomfile3
+ run_buildah copy --chmod 660 $cid ${TEST_SCRATCH_DIR}/subdir /subdir
+
+ run_buildah run $cid ls -l /randomfile
+ expect_output --substring rwxrwxrwx
+
+ run_buildah run $cid ls -l /randomfile2
+ expect_output --substring rwx------
+
+ run_buildah run $cid ls -l /randomfile3
+ expect_output --substring rwxr-xr-x
+
+ for i in randomfile other-randomfile ; do
+ run_buildah run $cid ls -l /subdir/$i
+ expect_output --substring rw-rw----
+ done
+
+ run_buildah run $cid ls -l /subdir
+ expect_output --substring rw-rw----
+
+ run_buildah copy --chmod 600 $cid ${TEST_SCRATCH_DIR}/other-subdir /subdir
+ for i in randomfile other-randomfile ; do
+ run_buildah run $cid ls -l /subdir/$i
+ expect_output --substring rw-------
+ done
+}
+
+@test "copy-symlink" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ ln -s ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/link-randomfile
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/link-randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/link-randomfile
+ test -f $newroot/link-randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $newroot/link-randomfile
+}
+
+@test "ignore-socket" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ # This seems to be the least-worst way to create a socket: run and kill nc
+ nc -lkU ${TEST_SCRATCH_DIR}/test.socket &
+ nc_pid=$!
+ # This should succeed fairly quickly. We test with a timeout in case of
+ # failure (likely reason: 'nc' not installed.)
+ retries=50
+ while ! test -e ${TEST_SCRATCH_DIR}/test.socket; do
+ sleep 0.1
+ retries=$((retries - 1))
+ if [[ $retries -eq 0 ]]; then
+ die "Timed out waiting for ${TEST_SCRATCH_DIR}/test.socket (is nc installed?)"
+ fi
+ done
+ kill $nc_pid
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test \! -e $newroot/test.socket
+}
+
+@test "copy-symlink-archive-suffix" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile.tar.gz
+ ln -s ${TEST_SCRATCH_DIR}/randomfile.tar.gz ${TEST_SCRATCH_DIR}/link-randomfile.tar.gz
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir / $cid
+ run_buildah copy $cid ${TEST_SCRATCH_DIR}/link-randomfile.tar.gz
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+ run_buildah rm $cid
+
+ run_buildah from --quiet $WITH_POLICY_JSON new-image
+ newcid=$output
+ run_buildah mount $newcid
+ newroot=$output
+ test -s $newroot/link-randomfile.tar.gz
+ test -f $newroot/link-randomfile.tar.gz
+ cmp ${TEST_SCRATCH_DIR}/randomfile.tar.gz $newroot/link-randomfile.tar.gz
+}
+
+@test "copy-detect-missing-data" {
+ _prefetch busybox
+
+ : > ${TEST_SCRATCH_DIR}/Dockerfile
+ echo FROM busybox AS builder >> ${TEST_SCRATCH_DIR}/Dockerfile
+ echo FROM scratch >> ${TEST_SCRATCH_DIR}/Dockerfile
+ echo COPY --from=builder /bin/-no-such-file-error- /usr/bin >> ${TEST_SCRATCH_DIR}/Dockerfile
+ run_buildah 125 build-using-dockerfile $WITH_POLICY_JSON ${TEST_SCRATCH_DIR}
+ expect_output --substring "no such file or directory"
+}
+
+@test "copy --ignorefile" {
+ mytest=${TEST_SCRATCH_DIR}/mytest
+ mkdir -p ${mytest}
+ touch ${mytest}/mystuff
+ touch ${mytest}/source.go
+ mkdir -p ${mytest}/notmystuff
+ touch ${mytest}/notmystuff/notmystuff
+ cat > ${mytest}/.ignore << _EOF
+*.go
+.ignore
+notmystuff
+_EOF
+
+expect="
+stuff
+stuff/mystuff"
+
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+
+ run_buildah 125 copy --ignorefile ${mytest}/.ignore $cid ${mytest} /stuff
+ expect_output -- "Error: --ignorefile option requires that you specify a context dir using --contextdir" "container file list"
+
+ run_buildah copy --contextdir=${mytest} --ignorefile ${mytest}/.ignore $cid ${mytest} /stuff
+
+ run_buildah mount $cid
+ mnt=$output
+ run find $mnt -printf "%P\n"
+ filelist=$(LC_ALL=C sort <<<"$output")
+ run_buildah umount $cid
+ expect_output --from="$filelist" "$expect" "container file list"
+}
+
+@test "copy-quiet" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah copy --quiet $cid ${TEST_SCRATCH_DIR}/randomfile /
+ expect_output ""
+ cmp ${TEST_SCRATCH_DIR}/randomfile $root/randomfile
+ run_buildah umount $cid
+ run_buildah rm $cid
+}
+
+@test "copy-from-container" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ from=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah copy --quiet $from ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ expect_output ""
+ run_buildah copy --quiet $WITH_POLICY_JSON --from $from $cid /tmp/random /tmp/random # absolute path
+ expect_output ""
+ run_buildah copy --quiet $WITH_POLICY_JSON --from $from $cid tmp/random /tmp/random2 # relative path
+ expect_output ""
+ run_buildah mount $cid
+ croot=$output
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/random
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/random2
+}
+
+@test "copy-container-root" {
+ _prefetch busybox
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ from=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah copy --quiet $from ${TEST_SCRATCH_DIR}/randomfile /tmp/random
+ expect_output ""
+ run_buildah copy --quiet $WITH_POLICY_JSON --from $from $cid / /tmp/
+ expect_output "" || \
+ expect_output --substring "copier: file disappeared while reading"
+ run_buildah mount $cid
+ croot=$output
+ cmp ${TEST_SCRATCH_DIR}/randomfile ${croot}/tmp/tmp/random
+}
+
+@test "add-from-image" {
+ _prefetch busybox
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah add $WITH_POLICY_JSON --quiet --from ubuntu $cid /etc/passwd /tmp/passwd # should pull the image, absolute path
+ expect_output ""
+ run_buildah add --quiet $WITH_POLICY_JSON --from ubuntu $cid etc/passwd /tmp/passwd2 # relative path
+ expect_output ""
+ run_buildah from --quiet $WITH_POLICY_JSON ubuntu
+ ubuntu=$output
+ run_buildah mount $cid
+ croot=$output
+ run_buildah mount $ubuntu
+ ubuntu=$output
+ cmp $ubuntu/etc/passwd ${croot}/tmp/passwd
+ cmp $ubuntu/etc/passwd ${croot}/tmp/passwd2
+}
+
+@test "copy with .dockerignore" {
+ _prefetch alpine busybox
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ from=$output
+ run_buildah copy --contextdir=$BUDFILES/dockerignore $from $BUDFILES/dockerignore ./
+
+ run_buildah 1 run $from ls -l test1.txt
+
+ run_buildah run $from ls -l test2.txt
+
+ run_buildah 1 run $from ls -l sub1.txt
+
+ run_buildah 1 run $from ls -l sub2.txt
+
+ run_buildah 1 run $from ls -l subdir/
+}
+
+@test "copy-preserving-extended-attributes" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ # if we need to change which image we use, any image that can provide a working setattr/setcap/getfattr will do
+ image="quay.io/libpod/fedora-minimal:34"
+ if ! which setfattr > /dev/null 2> /dev/null; then
+ skip "setfattr not available, unable to check if it'll work in filesystem at ${TEST_SCRATCH_DIR}"
+ fi
+ run setfattr -n user.yeah -v butno ${TEST_SCRATCH_DIR}/root
+ if [ "$status" -ne 0 ] ; then
+ if [[ "$output" =~ "not supported" ]] ; then
+ skip "setfattr not supported in filesystem at ${TEST_SCRATCH_DIR}"
+ fi
+ skip "$output"
+ fi
+ _prefetch $image
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ first="$output"
+ run_buildah run $first microdnf -y install /usr/bin/setfattr /usr/sbin/setcap
+ run_buildah copy $first ${TEST_SCRATCH_DIR}/randomfile /
+ # set security.capability
+ run_buildah run $first setcap cap_setuid=ep /randomfile
+ # set user.something
+ run_buildah run $first setfattr -n user.yeah -v butno /randomfile
+ # copy the file to a second container
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ second="$output"
+ run_buildah run $second microdnf -y install /usr/bin/getfattr
+ run_buildah copy --from $first $second /randomfile /
+ # compare what the extended attributes look like. if we're on a system with SELinux, there's a label in here, too
+ run_buildah run $first sh -c "getfattr -d -m . --absolute-names /randomfile | grep -v ^security.selinux | sort"
+ expected="$output"
+ run_buildah run $second sh -c "getfattr -d -m . --absolute-names /randomfile | grep -v ^security.selinux | sort"
+ expect_output "$expected"
+}
+
+@test "copy-relative-context-dir" {
+ image=busybox
+ _prefetch $image
+ mkdir -p ${TEST_SCRATCH_DIR}/context
+ createrandom ${TEST_SCRATCH_DIR}/context/excluded_test_file
+ createrandom ${TEST_SCRATCH_DIR}/context/test_file
+ echo excluded_test_file | tee ${TEST_SCRATCH_DIR}/context/.containerignore | tee ${TEST_SCRATCH_DIR}/context/.dockerignore
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ ctr="$output"
+ cd ${TEST_SCRATCH_DIR}/context
+ run_buildah copy --contextdir . $ctr / /opt/
+ run_buildah run $ctr ls -1 /opt/
+ expect_line_count 1
+ assert "$output" = "test_file" "only contents of copied directory"
+}
+
+@test "copy-file-relative-context-dir" {
+ image=busybox
+ _prefetch $image
+ mkdir -p ${TEST_SCRATCH_DIR}/context
+ createrandom ${TEST_SCRATCH_DIR}/context/test_file
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ ctr="$output"
+ run_buildah copy --contextdir ${TEST_SCRATCH_DIR}/context $ctr test_file /opt/
+ run_buildah run $ctr ls -1 /opt/
+ expect_line_count 1
+ assert "$output" = "test_file" "only the one file"
+}
+
+@test "copy-file-absolute-context-dir" {
+ image=busybox
+ _prefetch $image
+ mkdir -p ${TEST_SCRATCH_DIR}/context/subdir
+ createrandom ${TEST_SCRATCH_DIR}/context/subdir/test_file
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ ctr="$output"
+ run_buildah copy --contextdir ${TEST_SCRATCH_DIR}/context $ctr /subdir/test_file /opt/
+ run_buildah run $ctr ls -1 /opt/
+ expect_line_count 1
+ assert "$output" = "test_file" "only the one file"
+}
+
+@test "copy-file-relative-no-context-dir" {
+ image=busybox
+ _prefetch $image
+ mkdir -p ${TEST_SCRATCH_DIR}/context
+ createrandom ${TEST_SCRATCH_DIR}/context/test_file
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ ctr="$output"
+ # we're not in that directory currently
+ run_buildah 125 copy $ctr test_file /opt/
+ # now we are
+ cd ${TEST_SCRATCH_DIR}/context
+ run_buildah copy $ctr test_file /opt/
+ run_buildah run $ctr ls -1 /opt/
+ expect_line_count 1
+ assert "$output" = "test_file" "only the one file"
+}
diff --git a/tests/copy/copy.go b/tests/copy/copy.go
new file mode 100644
index 0000000..5330781
--- /dev/null
+++ b/tests/copy/copy.go
@@ -0,0 +1,160 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah"
+ "github.com/containers/common/libnetwork/network"
+ "github.com/containers/common/pkg/config"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/signature"
+ imageStorage "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var storeOptions storage.StoreOptions
+ var systemContext types.SystemContext
+ var logLevel string
+ var maxParallelDownloads uint
+ var compressionFormat string
+ var manifestFormat string
+ compressionLevel := -1
+
+ if buildah.InitReexec() {
+ return
+ }
+
+ unshare.MaybeReexecUsingUserNamespace(false)
+
+ storeOptions, err := storage.DefaultStoreOptionsAutoDetectUID()
+ if err != nil {
+ storeOptions = storage.StoreOptions{}
+ }
+
+ rootCmd := &cobra.Command{
+ Use: "copy [flags] source destination",
+ Long: "copies an image",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := cobra.ExactArgs(2)(cmd, args); err != nil {
+ return err
+ }
+ if compressionLevel != -1 {
+ systemContext.CompressionLevel = &compressionLevel
+ }
+ if compressionFormat != "" {
+ alg, err := compression.AlgorithmByName(compressionFormat)
+ if err != nil {
+ return err
+ }
+ systemContext.CompressionFormat = &alg
+ }
+ switch strings.ToLower(manifestFormat) {
+ case "oci":
+ manifestFormat = v1.MediaTypeImageManifest
+ case "docker", "dockerv2s2":
+ manifestFormat = manifest.DockerV2Schema2MediaType
+ }
+
+ level, err := logrus.ParseLevel(logLevel)
+ if err != nil {
+ return err
+ }
+ logrus.SetLevel(level)
+
+ store, err := storage.GetStore(storeOptions)
+ if err != nil {
+ return err
+ }
+ imageStorage.Transport.SetStore(store)
+
+ conf, err := config.Default()
+ if err != nil {
+ return err
+ }
+ _, _, err = network.NetworkBackend(store, conf, false)
+ if err != nil {
+ return err
+ }
+
+ if len(args) < 1 {
+ return errors.New("no source name provided")
+ }
+ src, err := alltransports.ParseImageName(args[0])
+ if err != nil {
+ return fmt.Errorf("parsing source name: %w", err)
+ }
+ if len(args) < 1 {
+ return errors.New("no destination name provided")
+ }
+ dest, err := alltransports.ParseImageName(args[1])
+ if err != nil {
+ return fmt.Errorf("parsing destination name: %w", err)
+ }
+
+ policy, err := signature.DefaultPolicy(&systemContext)
+ if err != nil {
+ return fmt.Errorf("reading signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("creating new signature policy context: %w", err)
+ }
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ logrus.Error(fmt.Errorf("destroying signature policy context: %w", err))
+ }
+ }()
+
+ options := cp.Options{
+ ReportWriter: os.Stdout,
+ SourceCtx: &systemContext,
+ DestinationCtx: &systemContext,
+ MaxParallelDownloads: maxParallelDownloads,
+ ForceManifestMIMEType: manifestFormat,
+ }
+ if _, err = cp.Image(context.TODO(), policyContext, dest, src, &options); err != nil {
+ return err
+ }
+
+ defer func() {
+ _, err := store.Shutdown(false)
+ if err != nil {
+ logrus.Error(err)
+ }
+ }()
+ return nil
+ },
+ }
+
+ rootCmd.PersistentFlags().StringVar(&storeOptions.GraphRoot, "root", "", "storage root")
+ rootCmd.PersistentFlags().StringVar(&storeOptions.RunRoot, "runroot", "", "runtime root")
+ rootCmd.PersistentFlags().StringVar(&storeOptions.GraphDriverName, "storage-driver", "", "storage driver")
+ rootCmd.PersistentFlags().StringSliceVar(&storeOptions.GraphDriverOptions, "storage-opt", nil, "storage option")
+ rootCmd.PersistentFlags().StringVar(&systemContext.SystemRegistriesConfPath, "registries-conf", "", "location of registries.conf")
+ rootCmd.PersistentFlags().StringVar(&systemContext.SystemRegistriesConfDirPath, "registries-conf-dir", "", "location of registries.d")
+ rootCmd.PersistentFlags().StringVar(&systemContext.SignaturePolicyPath, "signature-policy", "", "`pathname` of signature policy file")
+ rootCmd.PersistentFlags().StringVar(&systemContext.UserShortNameAliasConfPath, "short-name-alias-conf", "", "`pathname` of short name alias cache file (not usually used)")
+ rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "warn", "logging level")
+ rootCmd.PersistentFlags().UintVar(&maxParallelDownloads, "max-parallel-downloads", 0, "maximum `number` of blobs to copy at once")
+ rootCmd.PersistentFlags().StringVar(&manifestFormat, "format", "", "image manifest type")
+ rootCmd.PersistentFlags().BoolVar(&systemContext.DirForceCompress, "dest-compress", false, "force compression of layers for dir: destinations")
+ rootCmd.PersistentFlags().BoolVar(&systemContext.DirForceDecompress, "dest-decompress", false, "force decompression of layers for dir: destinations")
+ rootCmd.PersistentFlags().StringVar(&compressionFormat, "dest-compress-format", "", "compression type")
+ rootCmd.PersistentFlags().IntVar(&compressionLevel, "dest-compress-level", 0, "compression level")
+ if err := rootCmd.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/tests/deny.json b/tests/deny.json
new file mode 100644
index 0000000..3dcc4d7
--- /dev/null
+++ b/tests/deny.json
@@ -0,0 +1,7 @@
+{
+ "default": [
+ {
+ "type": "reject"
+ }
+ ]
+}
diff --git a/tests/digest.bats b/tests/digest.bats
new file mode 100644
index 0000000..3a63e17
--- /dev/null
+++ b/tests/digest.bats
@@ -0,0 +1,74 @@
+#!/usr/bin/env bats
+
+load helpers
+
+fromreftest() {
+ local img=$1
+
+ run_buildah from --quiet --pull $WITH_POLICY_JSON $img
+ cid=$output
+
+ # If image includes '_v2sN', verify that image is schema version N
+ local expected_schemaversion=$(expr "$img" : '.*_v2s\([0-9]\)')
+ if [ -n "$expected_schemaversion" ]; then
+ actual_schemaversion=$(imgtype -expected-manifest-type '*' -show-manifest $img | jq .schemaVersion)
+ expect_output --from="$actual_schemaversion" "$expected_schemaversion" \
+ ".schemaversion of $img"
+ fi
+
+ # This is all we test: basically, that buildah doesn't crash when pushing
+ pushdir=${TEST_SCRATCH_DIR}/fromreftest
+ mkdir -p ${pushdir}/{1,2,3}
+ run_buildah push $WITH_POLICY_JSON $img dir:${pushdir}/1
+ run_buildah commit $WITH_POLICY_JSON $cid new-image
+ run_buildah push $WITH_POLICY_JSON new-image dir:${pushdir}/2
+ run_buildah rmi new-image
+ run_buildah commit $WITH_POLICY_JSON $cid dir:${pushdir}/3
+
+ run_buildah rm $cid
+ rm -fr ${pushdir}
+}
+
+@test "from-by-digest-s1" {
+ skip_if_rootless_environment
+ fromreftest quay.io/libpod/testdigest_v2s1@sha256:816563225d7baae4782653efc9410579341754fe32cbe20f7600b39fc37d8ec7
+}
+
+@test "from-by-digest-s1-a-discarded-layer" {
+ skip_if_rootless_environment
+ IMG=quay.io/libpod/testdigest_v2s1_with_dups@sha256:2c619fffbed29d8677e246798333e7d1b288333cb61c020575f6372c76fdbb52
+
+ fromreftest ${IMG}
+
+ # Verify that image meets our expectations (duplicate layers)
+ # Surprisingly, we do this after fromreftest, not before, because fromreftest
+ # has to pull the image for us.
+ #
+ # Check that the first and second .fsLayers and .history elements are dups
+ local manifest=$(imgtype -expected-manifest-type '*' -show-manifest ${IMG})
+ for element in fsLayers history; do
+ local first=$(jq ".${element}[0]" <<<"$manifest")
+ local second=$(jq ".${element}[1]" <<<"$manifest")
+ expect_output --from="$second" "$first" "${IMG}: .${element}[1] == [0]"
+ done
+}
+
+@test "from-by-tag-s1" {
+ skip_if_rootless_environment
+ fromreftest quay.io/libpod/testdigest_v2s1:20200210
+}
+
+@test "from-by-digest-s2" {
+ skip_if_rootless_environment
+ fromreftest quay.io/libpod/testdigest_v2s2@sha256:755f4d90b3716e2bf57060d249e2cd61c9ac089b1233465c5c2cb2d7ee550fdb
+}
+
+@test "from-by-tag-s2" {
+ skip_if_rootless_environment
+ fromreftest quay.io/libpod/testdigest_v2s2:20200210
+}
+
+@test "from-by-repo-only-s2" {
+ skip_if_rootless_environment
+ fromreftest quay.io/libpod/testdigest_v2s2
+}
diff --git a/tests/digest/README.md b/tests/digest/README.md
new file mode 100644
index 0000000..d306ee4
--- /dev/null
+++ b/tests/digest/README.md
@@ -0,0 +1,28 @@
+This subdirectory contains a script used to create images for testing.
+
+To rephrase: this script is used **before testing**, not used **in** testing.
+_Much_ before testing (days/weeks/months/years), and manually.
+
+The script is `make-v2sN` but it is never invoked as such. Instead,
+various different symlinks point to the script, and the script
+figures out its use by picking apart the name under which it is called.
+
+As of the initial commit on 2020-02-10 there are three symlinks:
+
+* make-v2s1 - Create a schema 1 image
+* make-v2s2 - Create a schema 2 image
+* make-v2s1-with-dups - Create a schema 1 image with two identical layers
+
+If the script is successful, it will emit instructions on how to
+push the images to quay and what else you might need to do.
+
+Updating
+========
+
+Should you need new image types, e.g. schema version 3 or an image
+with purple elephant GIFs in it:
+
+1. Decide on a name. Create a new symlink pointing to `make-v2sN`
+1. Add the relevant code to `make-v2sN`: a conditional check at the top, the actual image-creating code, and if possible a new test to make sure the generated image is good
+1. Run the script. Verify that the generated image is what you expect.
+1. Add new test(s) to `digest.bats`
diff --git a/tests/digest/make-v2s1 b/tests/digest/make-v2s1
new file mode 120000
index 0000000..3dc992e
--- /dev/null
+++ b/tests/digest/make-v2s1
@@ -0,0 +1 @@
+make-v2sN \ No newline at end of file
diff --git a/tests/digest/make-v2s1-with-dups b/tests/digest/make-v2s1-with-dups
new file mode 120000
index 0000000..3dc992e
--- /dev/null
+++ b/tests/digest/make-v2s1-with-dups
@@ -0,0 +1 @@
+make-v2sN \ No newline at end of file
diff --git a/tests/digest/make-v2s2 b/tests/digest/make-v2s2
new file mode 120000
index 0000000..3dc992e
--- /dev/null
+++ b/tests/digest/make-v2s2
@@ -0,0 +1 @@
+make-v2sN \ No newline at end of file
diff --git a/tests/digest/make-v2sN b/tests/digest/make-v2sN
new file mode 100755
index 0000000..eb98814
--- /dev/null
+++ b/tests/digest/make-v2sN
@@ -0,0 +1,180 @@
+#!/bin/bash
+#
+# make-v2sN - create a v2sN image, possibly with dups
+#
+# This is a helper script used for creating custom images for buildah testing.
+# The images are used in the digest.bats test.
+#
+ME=$(basename $0)
+
+die() {
+ echo "$ME: $*" >&2
+ exit 1
+}
+
+###############################################################################
+#
+# From the script name, determine the desired schema version (1 or 2) and
+# whether or not we want duplicate layers.
+
+schemaversion=$(expr "$ME" : ".*-v2s\([12]\)")
+test -n "$schemaversion" || die "Could not find 'v2s[12]' in basename"
+test "$schemaversion" = "N" && die "Script must be invoked via symlink"
+
+dup=
+if expr "$ME" : ".*-dup" &>/dev/null; then
+ dup="_with_dups"
+fi
+
+IMGNAME=testdigest_v2s${schemaversion}${dup}
+
+###############################################################################
+# Create the image.
+
+set -e
+
+# First layer
+cid=$(buildah from scratch)
+buildah commit -q $cid interim1
+
+# Create a second layer containing this script and a README
+cid2=$(buildah from interim1)
+mp=$(buildah mount $cid2)
+cp $0 $mp/
+cat <<EOF >$mp/README
+This is a test image used for buildah testing.
+
+EOF
+
+# In the README include creation timestamp, user, script name, git tree state
+function add_to_readme() {
+ printf " %-12s : %s\n" "$1" "$2" >>$mp/README
+}
+
+add_to_readme "Created" "$(date --iso-8601=seconds)"
+
+# FIXME: do we really need to know? Will it ever, in practice, be non-root?
+user=$(id -un)
+if [ -n "$user" -a "$user" != "root" ]; then
+ add_to_readme "By (user)" "$user"
+fi
+
+create_script=$(cd $(dirname $0) && git ls-files --full-name $ME)
+if [ -z "$create_script" ]; then
+ create_script=$0
+fi
+add_to_readme "By (script)" "$create_script"
+
+git_state=$(cd $(dirname $0) && git describe --dirty)
+if [ -n "$git_state" ]; then
+ add_to_readme "git state" "$git_state"
+fi
+
+echo "-----------------------------------------------------------------"
+cat $mp/README
+echo "-----------------------------------------------------------------"
+
+buildah umount $cid2
+buildah commit -q $cid2 interim2
+
+layers="interim2 interim1"
+buildah tag interim2 my_image
+
+###############################################################################
+#
+# Push/pull the image to/from a tempdir. This is a kludge allowing us to
+# clean up interim layers. It's also necessary for dealing with v2s1 layers.
+
+TMPDIR=$(mktemp --tmpdir -d $(basename $0).XXXXXXX)
+push_flags=
+if [[ $schemaversion -eq 1 ]]; then
+ # buildah can't actually create a v2s1 image; only v2s2. To create v2s1,
+ # dir-push it to a tmpdir using '--format v2s1'; that will be inherited
+ # when we reload it
+ push_flags="--format v2s1"
+fi
+buildah push $push_flags my_image dir:${TMPDIR}/${IMGNAME}
+
+# Clean up containers and images
+buildah rm -a
+buildah rmi -f my_image $layers
+
+if [ -n "$dup" ]; then
+ manifest=${TMPDIR}/${IMGNAME}/manifest.json
+ cat $manifest |
+ jq -c '.fsLayers |= [.[0]] + .' |
+ jq -c '.history |= [.[0]] + .' |
+ tr -d '\012' >$manifest.tmp
+ mv $manifest $manifest.BAK
+ mv $manifest.tmp $manifest
+fi
+
+# Delete possibly-existing image, because 'buildah pull' will not overwrite it
+buildah rmi -f localhost/${IMGNAME}:latest &>/dev/null || true
+
+# Reload the image
+(cd $TMPDIR && buildah pull dir:${IMGNAME})
+
+# Leave the tmpdir behind for the -dup image!
+if [ -z "$dup" ]; then
+ rm -rf ${TMPDIR}
+fi
+
+###############################################################################
+#
+# We should now have a 'localhost/IMGNAME' image with desired SchemaVersion
+# and other features as requested.
+#
+# Now verify what we have what we intended.
+echo
+if type -p jq >&/dev/null; then
+ # Manifest is embedded in the image but as a string, not actual JSON;
+ # the eval-echo converts it to usable JSON
+ manifest=$(eval echo $(buildah inspect ${IMGNAME} | jq .Manifest))
+
+ # Check desired schema version:
+ actual_schemaversion=$(jq .schemaVersion <<<"$manifest")
+ if [[ $actual_schemaversion -ne $schemaversion ]]; then
+ die "Expected .schemaVersion $schemaversion, got '$actual_schemaversion'"
+ fi
+
+ echo "Image localhost/${IMGNAME} looks OK; feel free to:"
+ echo
+
+ if [ -n "$dup" ]; then
+ echo " \$SKOPEO copy dir:${TMPDIR}/${IMGNAME} docker://quay.io/libpod/${IMGNAME}:\$(date +%Y%m%d)"
+ echo " ^^^^^^^--- must be specially-crafted skopeo(*), see below"
+ else
+ echo " buildah push localhost/${IMGNAME} quay.io/libpod/${IMGNAME}:$(date +%Y%m%d)"
+ echo " buildah push localhost/${IMGNAME} quay.io/libpod/${IMGNAME}:latest"
+ fi
+
+ echo
+ echo "You may then need to log in to the https://quay.io/ web UI"
+ echo "make those images public, then update tags and/or SHAs"
+ echo "in test/digest.bats."
+ echo
+ echo "Note that the Digest SHA on quay.io != the SHA on the locally"
+ echo "created image. You can get the real SHA on quay.io by clicking"
+ echo "on the image name, then the luggage-tag icon on the left,"
+ echo "then the gray box with the text 'SHA256' (not the actual"
+ echo "hash shown in blue to its right), and copy-pasting the SHA"
+ echo "from the popup window."
+ echo
+ echo "NOTE: the first push to quay.io sometimes fails with some sort of"
+ echo "500 error, trying to reuse blob, blah blah. Just ignore it and"
+ echo "retry. IME it works the second time."
+
+ if [ -n "$dup" ]; then
+ echo
+ echo "(*) skopeo WILL NOT push an image with dup layers. To get it to"
+ echo " do that, build a custom skopeo using the patch here:"
+ echo " https://gist.github.com/nalind/b491204ff05c3c3f3b6ef014b333a60c"
+ echo " ...then use that skopeo in the above 'copy' command."
+ # And, for posterity should the gist ever disappear:
+ # vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+ # - remove lines 66-68 ('if ... s1.fixManifestLayers()...')
+ fi
+else
+ echo "WARNING: 'jq' not found; unable to verify built image" >&2
+fi
diff --git a/tests/docker.json b/tests/docker.json
new file mode 100644
index 0000000..ca1d5c6
--- /dev/null
+++ b/tests/docker.json
@@ -0,0 +1,6 @@
+{
+ "default": [ { "type": "reject" } ],
+ "transports": {
+ "docker": { "": [ { "type": "insecureAcceptAnything" } ] }
+ }
+}
diff --git a/tests/e2e/buildah_suite_test.go b/tests/e2e/buildah_suite_test.go
new file mode 100644
index 0000000..95d5bbb
--- /dev/null
+++ b/tests/e2e/buildah_suite_test.go
@@ -0,0 +1,325 @@
+package integration
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/containers/buildah"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/types"
+ sstorage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/reexec"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
+)
+
+const (
+ artifactDir = "/tmp/.artifacts"
+)
+
+var (
+ integrationRoot string
+ cacheImages = []string{"alpine", "busybox", "quay.io/libpod/fedora-minimal:34"}
+ restoreImages = []string{"alpine", "busybox"}
+ defaultWaitTimeout = 90
+)
+
+// BuildAhSession wraps the gexec.session so we can extend it
+type BuildAhSession struct {
+ *gexec.Session
+}
+
+// BuildAhTest struct for command line options
+type BuildAhTest struct {
+ BuildAhBinary string
+ RunRoot string
+ StorageOptions string
+ ArtifactPath string
+ TempDir string
+ SignaturePath string
+ Root string
+ RegistriesConf string
+}
+
+// TestBuildAh ginkgo master function
+func TestBuildAh(t *testing.T) {
+ if reexec.Init() {
+ os.Exit(1)
+ }
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Buildah Suite")
+}
+
+var _ = BeforeSuite(func() {
+ //Cache images
+ cwd, _ := os.Getwd()
+ integrationRoot = filepath.Join(cwd, "../../")
+ buildah := BuildahCreate("/tmp")
+ buildah.ArtifactPath = artifactDir
+ if _, err := os.Stat(artifactDir); errors.Is(err, os.ErrNotExist) {
+ if err = os.Mkdir(artifactDir, 0777); err != nil {
+ fmt.Printf("%q\n", err)
+ os.Exit(1)
+ }
+ }
+ for _, image := range cacheImages {
+ fmt.Printf("Caching %s...\n", image)
+ if err := buildah.CreateArtifact(image); err != nil {
+ fmt.Printf("%q\n", err)
+ os.Exit(1)
+ }
+ }
+
+})
+
+// CreateTempDirin
+func CreateTempDirInTempDir() (string, error) {
+ return os.MkdirTemp("", "buildah_test")
+}
+
+// BuildahCreate a BuildAhTest instance for the tests
+func BuildahCreate(tempDir string) BuildAhTest {
+ cwd, _ := os.Getwd()
+
+ buildAhBinary := filepath.Join(cwd, "../../bin/buildah")
+ if os.Getenv("BUILDAH_BINARY") != "" {
+ buildAhBinary = os.Getenv("BUILDAH_BINARY")
+ }
+ storageOpts := "--storage-driver vfs"
+ if os.Getenv("STORAGE_DRIVER") != "" {
+ storageOpts = fmt.Sprintf("--storage-driver %s", os.Getenv("STORAGE_DRIVER"))
+ }
+
+ return BuildAhTest{
+ BuildAhBinary: buildAhBinary,
+ RunRoot: filepath.Join(tempDir, "runroot"),
+ Root: filepath.Join(tempDir, "root"),
+ StorageOptions: storageOpts,
+ ArtifactPath: artifactDir,
+ TempDir: tempDir,
+ SignaturePath: "../../tests/policy.json",
+ RegistriesConf: "../../tests/registries.conf",
+ }
+}
+
+//MakeOptions assembles all the buildah main options
+func (p *BuildAhTest) MakeOptions() []string {
+ return strings.Split(fmt.Sprintf("--root %s --runroot %s --registries-conf %s",
+ p.Root, p.RunRoot, p.RegistriesConf), " ")
+}
+
+// BuildAh is the exec call to buildah on the filesystem
+func (p *BuildAhTest) BuildAh(args []string) *BuildAhSession {
+ buildAhOptions := p.MakeOptions()
+ buildAhOptions = append(buildAhOptions, strings.Split(p.StorageOptions, " ")...)
+ buildAhOptions = append(buildAhOptions, args...)
+ fmt.Printf("Running: %s %s\n", p.BuildAhBinary, strings.Join(buildAhOptions, " "))
+ command := exec.Command(p.BuildAhBinary, buildAhOptions...)
+ session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run buildah command: %s", strings.Join(buildAhOptions, " ")))
+ }
+ return &BuildAhSession{session}
+}
+
+// Cleanup cleans up the temporary store
+func (p *BuildAhTest) Cleanup() {
+ // Nuke tempdir
+ if err := os.RemoveAll(p.TempDir); err != nil {
+ fmt.Printf("%q\n", err)
+ }
+}
+
+// GrepString takes session output and behaves like grep. it returns a bool
+// if successful and an array of strings on positive matches
+func (s *BuildAhSession) GrepString(term string) (bool, []string) {
+ var (
+ greps []string
+ matches bool
+ )
+
+ for _, line := range strings.Split(s.OutputToString(), "\n") {
+ if strings.Contains(line, term) {
+ matches = true
+ greps = append(greps, line)
+ }
+ }
+ return matches, greps
+}
+
+// OutputToString formats session output to string
+func (s *BuildAhSession) OutputToString() string {
+ fields := bytes.Fields(s.Out.Contents())
+ return string(bytes.Join(fields, []byte{' '}))
+}
+
+// OutputToStringArray returns the output as a []string
+// where each array item is a line split by newline
+func (s *BuildAhSession) OutputToStringArray() []string {
+ return strings.Split(string(s.Out.Contents()), "\n")
+}
+
+// IsJSONOutputValid attempts to unmarshall the session buffer
+// and if successful, returns true, else false
+func (s *BuildAhSession) IsJSONOutputValid() bool {
+ var i interface{}
+ if err := json.Unmarshal(s.Out.Contents(), &i); err != nil {
+ fmt.Println(err)
+ return false
+ }
+ return true
+}
+
+func (s *BuildAhSession) WaitWithDefaultTimeout() {
+ s.Wait(defaultWaitTimeout)
+}
+
+// SystemExec is used to exec a system command to check its exit code or output
+func (p *BuildAhTest) SystemExec(command string, args []string) *BuildAhSession {
+ c := exec.Command(command, args...)
+ session, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
+ }
+ return &BuildAhSession{session}
+}
+
+// CreateArtifact creates a cached image in the artifact dir
+func (p *BuildAhTest) CreateArtifact(image string) error {
+ systemContext := types.SystemContext{
+ SignaturePolicyPath: p.SignaturePath,
+ }
+ policy, err := signature.DefaultPolicy(&systemContext)
+ if err != nil {
+ return fmt.Errorf("loading signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("loading signature policy: %w", err)
+ }
+ defer func() {
+ _ = policyContext.Destroy()
+ }()
+ options := &copy.Options{}
+
+ importRef, err := docker.ParseReference("//" + image)
+ if err != nil {
+ return fmt.Errorf("parsing image name %v: %w", image, err)
+ }
+
+ imageDir := strings.Replace(image, "/", "_", -1)
+ exportDir := filepath.Join(p.ArtifactPath, imageDir)
+ exportRef, err := directory.NewReference(exportDir)
+ if err != nil {
+ return fmt.Errorf("creating image reference for %v: %w", exportDir, err)
+ }
+
+ _, err = copy.Image(context.Background(), policyContext, exportRef, importRef, options)
+ return err
+}
+
+// RestoreArtifact puts the cached image into our test store
+func (p *BuildAhTest) RestoreArtifact(image string) error {
+ storeOptions, _ := sstorage.DefaultStoreOptions(false, 0)
+ storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
+ if storeOptions.GraphDriverName == "" {
+ storeOptions.GraphDriverName = "vfs"
+ }
+ storeOptions.GraphRoot = p.Root
+ storeOptions.RunRoot = p.RunRoot
+ store, err := sstorage.GetStore(storeOptions)
+
+ options := &copy.Options{}
+ if err != nil {
+ return fmt.Errorf("opening storage: %w", err)
+ }
+ defer func() {
+ _, _ = store.Shutdown(false)
+ }()
+
+ storage.Transport.SetStore(store)
+ ref, err := storage.Transport.ParseStoreReference(store, image)
+ if err != nil {
+ return fmt.Errorf("parsing image name: %w", err)
+ }
+
+ imageDir := strings.Replace(image, "/", "_", -1)
+ importDir := filepath.Join(p.ArtifactPath, imageDir)
+ importRef, err := directory.NewReference(importDir)
+ if err != nil {
+ return fmt.Errorf("creating image reference for %v: %w", image, err)
+ }
+ systemContext := types.SystemContext{
+ SignaturePolicyPath: p.SignaturePath,
+ }
+ policy, err := signature.DefaultPolicy(&systemContext)
+ if err != nil {
+ return fmt.Errorf("loading signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("loading signature policy: %w", err)
+ }
+ defer func() {
+ _ = policyContext.Destroy()
+ }()
+ _, err = copy.Image(context.Background(), policyContext, ref, importRef, options)
+ if err != nil {
+ return fmt.Errorf("importing %s: %w", importDir, err)
+ }
+ return nil
+}
+
+// RestoreAllArtifacts unpacks all cached images
+func (p *BuildAhTest) RestoreAllArtifacts() error {
+ for _, image := range restoreImages {
+ if err := p.RestoreArtifact(image); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//LineInOutputStartsWith returns true if a line in a
+// session output starts with the supplied string
+func (s *BuildAhSession) LineInOutputStartsWith(term string) bool {
+ for _, i := range s.OutputToStringArray() {
+ if strings.HasPrefix(i, term) {
+ return true
+ }
+ }
+ return false
+}
+
+//LineInOutputContains returns true if a line in a
+// session output starts with the supplied string
+func (s *BuildAhSession) LineInOutputContains(term string) bool {
+ for _, i := range s.OutputToStringArray() {
+ if strings.Contains(i, term) {
+ return true
+ }
+ }
+ return false
+}
+
+// InspectContainerToJSON takes the session output of an inspect
+// container and returns json
+func (s *BuildAhSession) InspectImageJSON() buildah.BuilderInfo {
+ var i buildah.BuilderInfo
+ err := json.Unmarshal(s.Out.Contents(), &i)
+ Expect(err).To(BeNil())
+ return i
+}
diff --git a/tests/e2e/inspect_test.go b/tests/e2e/inspect_test.go
new file mode 100644
index 0000000..6a62368
--- /dev/null
+++ b/tests/e2e/inspect_test.go
@@ -0,0 +1,92 @@
+package integration
+
+import (
+ "os"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman load", func() {
+ var (
+ tempdir string
+ err error
+ buildahtest BuildAhTest
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ buildahtest = BuildahCreate(tempdir)
+ })
+
+ AfterEach(func() {
+ buildahtest.Cleanup()
+ })
+
+ It("buildah inspect json", func() {
+ b := buildahtest.BuildAh([]string{"from", "--pull=false", "scratch"})
+ b.WaitWithDefaultTimeout()
+ Expect(b.ExitCode()).To(Equal(0))
+ cid := b.OutputToString()
+ result := buildahtest.BuildAh([]string{"inspect", cid})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.IsJSONOutputValid()).To(BeTrue())
+ })
+
+ It("buildah inspect format", func() {
+ b := buildahtest.BuildAh([]string{"from", "--pull=false", "scratch"})
+ b.WaitWithDefaultTimeout()
+ Expect(b.ExitCode()).To(Equal(0))
+ cid := b.OutputToString()
+ result := buildahtest.BuildAh([]string{"inspect", "--format", "\"{{.}}\"", cid})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ })
+
+ It("buildah inspect image", func() {
+ b := buildahtest.BuildAh([]string{"from", "--pull=false", "scratch"})
+ b.WaitWithDefaultTimeout()
+ Expect(b.ExitCode()).To(Equal(0))
+ cid := b.OutputToString()
+ commit := buildahtest.BuildAh([]string{"commit", cid, "scratchy-image"})
+ commit.WaitWithDefaultTimeout()
+ Expect(commit.ExitCode()).To(Equal(0))
+
+ result := buildahtest.BuildAh([]string{"inspect", "--type", "image", "scratchy-image"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.IsJSONOutputValid()).To(BeTrue())
+
+ result = buildahtest.BuildAh([]string{"inspect", "--type", "image", "scratchy-image:latest"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.IsJSONOutputValid()).To(BeTrue())
+ })
+
+ It("buildah HTML escaped", func() {
+ b := buildahtest.BuildAh([]string{"from", "--pull=false", "scratch"})
+ b.WaitWithDefaultTimeout()
+ Expect(b.ExitCode()).To(Equal(0))
+ cid := b.OutputToString()
+
+ config := buildahtest.BuildAh([]string{"config", "--label", "maintainer=\"Darth Vader <dvader@darkside.io>\"", cid})
+ config.WaitWithDefaultTimeout()
+ Expect(config.ExitCode()).To(Equal(0))
+
+ commit := buildahtest.BuildAh([]string{"commit", cid, "darkside-image"})
+ commit.WaitWithDefaultTimeout()
+ Expect(commit.ExitCode()).To(Equal(0))
+
+ result := buildahtest.BuildAh([]string{"inspect", "--type", "image", "darkside-image"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+
+ data := result.InspectImageJSON()
+ Expect(data.Docker.Config.Labels["maintainer"]).To(Equal("\"Darth Vader <dvader@darkside.io>\""))
+
+ })
+})
diff --git a/tests/formats.bats b/tests/formats.bats
new file mode 100644
index 0000000..afa9dbc
--- /dev/null
+++ b/tests/formats.bats
@@ -0,0 +1,70 @@
+#!/usr/bin/env bats
+
+load helpers
+
+###################
+# check_imgtype # shortcut for running 'imgtype' and verifying image
+###################
+function check_imgtype() {
+ # First argument: image name
+ image="$1"
+
+ # Second argument: expected image type, 'oci' or 'docker'
+ imgtype_oci="application/vnd.oci.image.manifest.v1+json"
+ imgtype_dkr="application/vnd.docker.distribution.manifest.v2+json"
+
+ expect=""
+ case "$2" in
+ oci) want=$imgtype_oci; reject=$imgtype_dkr;;
+ docker) want=$imgtype_dkr; reject=$imgtype_oci;;
+ *) die "Internal error: unknown image type '$2'";;
+ esac
+
+ # First test: run imgtype with expected type, confirm exit 0 + no output
+ echo "\$ imgtype -expected-manifest-type $want $image"
+ run imgtype -expected-manifest-type $want $image
+ echo "$output"
+ if [[ $status -ne 0 ]]; then
+ die "exit status is $status (expected 0)"
+ fi
+ expect_output "" "Checking imagetype($image) == $2"
+
+ # Second test: the converse. Run imgtype with the WRONG expected type,
+ # confirm error message and exit status 1
+ echo "\$ imgtype -expected-manifest-type $reject $image [opposite test]"
+ run imgtype -expected-manifest-type $reject $image
+ echo "$output"
+ if [[ $status -ne 1 ]]; then
+ die "exit status is $status (expected 1)"
+ fi
+
+ # Can't embed entire string because the '+' sign is interpreted as regex
+ expect_output --substring \
+ "level=error msg=\"expected .* type \\\\\".*, got " \
+ "Checking imagetype($image) == $2"
+}
+
+
+@test "write-formats" {
+ skip_if_rootless_environment
+ run_buildah from --pull=false $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid scratch-image-default
+ run_buildah commit --format docker $WITH_POLICY_JSON $cid scratch-image-docker
+ run_buildah commit --format oci $WITH_POLICY_JSON $cid scratch-image-oci
+
+ check_imgtype scratch-image-default oci
+ check_imgtype scratch-image-oci oci
+ check_imgtype scratch-image-docker docker
+}
+
+@test "bud-formats" {
+ skip_if_rootless_environment
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON -t scratch-image-default -f Containerfile $BUDFILES/from-scratch
+ run_buildah build-using-dockerfile --format docker $WITH_POLICY_JSON -t scratch-image-docker -f Containerfile $BUDFILES/from-scratch
+ run_buildah build-using-dockerfile --format oci $WITH_POLICY_JSON -t scratch-image-oci -f Containerfile $BUDFILES/from-scratch
+
+ check_imgtype scratch-image-default oci
+ check_imgtype scratch-image-oci oci
+ check_imgtype scratch-image-docker docker
+}
diff --git a/tests/from.bats b/tests/from.bats
new file mode 100644
index 0000000..274c87d
--- /dev/null
+++ b/tests/from.bats
@@ -0,0 +1,670 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "from-flags-order-verification" {
+ run_buildah 125 from scratch -q
+ check_options_flag_err "-q"
+
+ run_buildah 125 from scratch --pull
+ check_options_flag_err "--pull"
+
+ run_buildah 125 from scratch --ulimit=1024
+ check_options_flag_err "--ulimit=1024"
+
+ run_buildah 125 from scratch --name container-name-irrelevant
+ check_options_flag_err "--name"
+
+ run_buildah 125 from scratch --cred="fake fake" --name small
+ check_options_flag_err "--cred=fake fake"
+}
+
+@test "from-with-digest" {
+ run_buildah pull alpine
+ run_buildah inspect --format "{{.FromImageID}}" alpine
+ digest=$output
+
+ run_buildah from "sha256:$digest"
+ run_buildah rm $output
+
+ run_buildah 125 from sha256:1111111111111111111111111111111111111111111111111111111111111111
+ expect_output --substring "1111111111111111111111111111111111111111111111111111111111111111: image not known"
+}
+
+@test "commit-to-from-elsewhere" {
+ elsewhere=${TEST_SCRATCH_DIR}/elsewhere-img
+ mkdir -p ${elsewhere}
+
+ run_buildah from --retry 4 --retry-delay 4s --pull $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid dir:${elsewhere}
+ run_buildah rm $cid
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON dir:${elsewhere}
+ expect_output "dir-working-container"
+ run_buildah rm $output
+
+ run_buildah from --quiet --pull-always $WITH_POLICY_JSON dir:${elsewhere}
+ expect_output "dir-working-container"
+
+ run_buildah from --pull $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid oci-archive:${elsewhere}.oci
+ run_buildah rm $cid
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON oci-archive:${elsewhere}.oci
+ expect_output "oci-archive-working-container"
+ run_buildah rm $output
+
+ run_buildah from --pull $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid docker-archive:${elsewhere}.docker
+ run_buildah rm $cid
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON docker-archive:${elsewhere}.docker
+ expect_output "docker-archive-working-container"
+ run_buildah rm $output
+}
+
+@test "from-tagged-image" {
+ # GitHub #396: Make sure the container name starts with the correct image even when it's tagged.
+ run_buildah from --pull=false $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON "$cid" scratch2
+ # Also check for base-image annotations.
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' scratch2
+ expect_output "" "no base digest for scratch"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' scratch2
+ expect_output "" "no base name for scratch"
+ run_buildah rm $cid
+ run_buildah tag scratch2 scratch3
+ # Set --pull=false to prevent looking for a newer scratch3 image.
+ run_buildah from --pull=false $WITH_POLICY_JSON scratch3
+ expect_output --substring "scratch3-working-container"
+ run_buildah rm $output
+ run_buildah rmi scratch2 scratch3
+
+ # GitHub https://github.com/containers/buildah/issues/396#issuecomment-360949396
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm $cid
+ run_buildah tag alpine alpine2
+ run_buildah from --quiet $WITH_POLICY_JSON localhost/alpine2
+ expect_output "alpine2-working-container"
+ run_buildah rm $output
+ tmp=$RANDOM
+ run_buildah from --suffix $tmp --quiet $WITH_POLICY_JSON localhost/alpine2
+ expect_output "alpine2-$tmp"
+ run_buildah rm $output
+ run_buildah rmi alpine alpine2
+
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON docker.io/alpine
+ run_buildah rm $output
+ run_buildah rmi docker.io/alpine
+
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON docker.io/alpine:latest
+ run_buildah rm $output
+ run_buildah rmi docker.io/alpine:latest
+
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON docker.io/centos:7
+ run_buildah rm $output
+ run_buildah rmi docker.io/centos:7
+
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON docker.io/centos:latest
+ run_buildah rm $output
+ run_buildah rmi docker.io/centos:latest
+}
+
+@test "from the following transports: docker-archive, oci-archive, and dir" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON alpine
+ run_buildah rm $output
+
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON docker:latest
+ run_buildah rm $output
+
+ run_buildah push $WITH_POLICY_JSON alpine docker-archive:${TEST_SCRATCH_DIR}/docker-alp.tar:alpine
+ run_buildah push $WITH_POLICY_JSON alpine oci-archive:${TEST_SCRATCH_DIR}/oci-alp.tar:alpine
+ run_buildah push $WITH_POLICY_JSON alpine dir:${TEST_SCRATCH_DIR}/alp-dir
+ run_buildah rmi alpine
+
+ run_buildah from --quiet $WITH_POLICY_JSON docker-archive:${TEST_SCRATCH_DIR}/docker-alp.tar
+ expect_output "alpine-working-container"
+ run_buildah rm ${output}
+ run_buildah rmi alpine
+
+ run_buildah from --quiet $WITH_POLICY_JSON oci-archive:${TEST_SCRATCH_DIR}/oci-alp.tar
+ expect_output "alpine-working-container"
+ run_buildah rm ${output}
+ run_buildah rmi alpine
+
+ run_buildah from --quiet $WITH_POLICY_JSON dir:${TEST_SCRATCH_DIR}/alp-dir
+ expect_output "dir-working-container"
+}
+
+@test "from the following transports: docker-archive and oci-archive with no image reference" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON alpine
+ run_buildah rm $output
+
+ run_buildah push $WITH_POLICY_JSON alpine docker-archive:${TEST_SCRATCH_DIR}/docker-alp.tar
+ run_buildah push $WITH_POLICY_JSON alpine oci-archive:${TEST_SCRATCH_DIR}/oci-alp.tar
+ run_buildah rmi alpine
+
+ run_buildah from --quiet $WITH_POLICY_JSON docker-archive:${TEST_SCRATCH_DIR}/docker-alp.tar
+ expect_output "alpine-working-container"
+ run_buildah rm $output
+ run_buildah rmi -a
+
+ run_buildah from --quiet $WITH_POLICY_JSON oci-archive:${TEST_SCRATCH_DIR}/oci-alp.tar
+ expect_output "oci-archive-working-container"
+ run_buildah rm $output
+ run_buildah rmi -a
+}
+
+@test "from cpu-period test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --cpu-period=5000 --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ if is_cgroupsv2; then
+ run_buildah run $cid /bin/sh -c "cut -d ' ' -f 2 /sys/fs/cgroup/\$(awk -F: '{print \$NF}' /proc/self/cgroup)/cpu.max"
+ else
+ run_buildah run $cid cat /sys/fs/cgroup/cpu/cpu.cfs_period_us
+ fi
+ expect_output "5000"
+}
+
+@test "from cpu-quota test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --cpu-quota=5000 --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ if is_cgroupsv2; then
+ run_buildah run $cid /bin/sh -c "cut -d ' ' -f 1 /sys/fs/cgroup/\$(awk -F: '{print \$NF}' /proc/self/cgroup)/cpu.max"
+ else
+ run_buildah run $cid cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us
+ fi
+ expect_output "5000"
+}
+
+@test "from cpu-shares test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+ shares=2
+ run_buildah from --quiet --cpu-shares=${shares} --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ if is_cgroupsv2; then
+ run_buildah run $cid /bin/sh -c "cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpu.weight"
+ expect_output "$((1 + ((${shares} - 2) * 9999) / 262142))"
+ else
+ run_buildah run $cid cat /sys/fs/cgroup/cpu/cpu.shares
+ expect_output "${shares}"
+ fi
+}
+
+@test "from cpuset-cpus test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --cpuset-cpus=0 --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ if is_cgroupsv2; then
+ run_buildah run $cid /bin/sh -c "cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpuset.cpus"
+ else
+ run_buildah run $cid cat /sys/fs/cgroup/cpuset/cpuset.cpus
+ fi
+ expect_output "0"
+}
+
+@test "from cpuset-mems test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --cpuset-mems=0 --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ if is_cgroupsv2; then
+ run_buildah run $cid /bin/sh -c "cat /sys/fs/cgroup/\$(awk -F : '{print \$NF}' /proc/self/cgroup)/cpuset.mems"
+ else
+ run_buildah run $cid cat /sys/fs/cgroup/cpuset/cpuset.mems
+ fi
+ expect_output "0"
+}
+
+@test "from memory test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless_and_cgroupv1
+
+ _prefetch alpine
+ run_buildah from --quiet --memory=40m --memory-swap=70m --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+
+ # Life is much more complicated under cgroups v2
+ mpath='/sys/fs/cgroup/memory/memory.limit_in_bytes'
+ spath='/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes'
+ expect_sw=73400320
+ if is_cgroupsv2; then
+ mpath="/sys/fs/cgroup\$(awk -F: '{print \$3}' /proc/self/cgroup)/memory.max"
+ spath="/sys/fs/cgroup\$(awk -F: '{print \$3}' /proc/self/cgroup)/memory.swap.max"
+ expect_sw=31457280
+ fi
+ run_buildah run $cid sh -c "cat $mpath"
+ expect_output "41943040" "$mpath"
+ run_buildah run $cid sh -c "cat $spath"
+ expect_output "$expect_sw" "$spath"
+}
+
+@test "from volume test" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --volume=${TEST_SCRATCH_DIR}:/myvol --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid -- cat /proc/mounts
+ expect_output --substring " /myvol "
+}
+
+@test "from volume ro test" {
+ skip_if_chroot
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --volume=${TEST_SCRATCH_DIR}:/myvol:ro --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid -- cat /proc/mounts
+ expect_output --substring " /myvol "
+}
+
+@test "from --volume with U flag" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ # Check if we're running in an environment that can even test this.
+ run readlink /proc/self/ns/user
+ echo "readlink /proc/self/ns/user -> $output"
+ [ $status -eq 0 ] || skip "user namespaces not supported"
+
+ # Generate mappings for using a user namespace.
+ uidbase=$((${RANDOM}+1024))
+ gidbase=$((${RANDOM}+1024))
+ uidsize=$((${RANDOM}+1024))
+ gidsize=$((${RANDOM}+1024))
+
+ # Create source volume.
+ mkdir ${TEST_SCRATCH_DIR}/testdata
+ touch ${TEST_SCRATCH_DIR}/testdata/testfile1.txt
+
+ # Create a container that uses that mapping and U volume flag.
+ _prefetch alpine
+ run_buildah from --pull=false $WITH_POLICY_JSON --userns-uid-map 0:$uidbase:$uidsize --userns-gid-map 0:$gidbase:$gidsize --volume ${TEST_SCRATCH_DIR}/testdata:/mnt:z,U alpine
+ ctr="$output"
+
+ # Test mounted volume has correct UID and GID ownership.
+ run_buildah run "$ctr" stat -c "%u:%g" /mnt/testfile1.txt
+ expect_output "0:0"
+
+ # Test user can create file in the mounted volume.
+ run_buildah run "$ctr" touch /mnt/testfile2.txt
+
+ # Test created file has correct UID and GID ownership.
+ run_buildah run "$ctr" stat -c "%u:%g" /mnt/testfile2.txt
+ expect_output "0:0"
+}
+
+@test "from shm-size test" {
+ skip_if_chroot
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --shm-size=80m --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid -- df -h /dev/shm
+ expect_output --substring " 80.0M "
+}
+
+@test "from add-host test" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --add-host=localhost:127.0.0.1 --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --net=container $cid -- cat /etc/hosts
+ expect_output --substring "127.0.0.1[[:blank:]]*localhost"
+}
+
+@test "from name test" {
+ _prefetch alpine
+ container_name=mycontainer
+ run_buildah from --quiet --name=${container_name} --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format '{{.Container}}' ${container_name}
+}
+
+@test "from cidfile test" {
+ _prefetch alpine
+ run_buildah from --cidfile ${TEST_SCRATCH_DIR}/output.cid --pull=false $WITH_POLICY_JSON alpine
+ cid=$(< ${TEST_SCRATCH_DIR}/output.cid)
+ run_buildah containers -f id=${cid}
+}
+
+@test "from pull never" {
+ run_buildah 125 from $WITH_POLICY_JSON --pull-never busybox
+ echo "$output"
+ expect_output --substring "busybox: image not known"
+
+ run_buildah from $WITH_POLICY_JSON --pull=false busybox
+ echo "$output"
+ expect_output --substring "busybox-working-container"
+
+ run_buildah from $WITH_POLICY_JSON --pull=never busybox
+ echo "$output"
+ expect_output --substring "busybox-working-container"
+}
+
+@test "from pull false no local image" {
+ _prefetch busybox
+ target=my-busybox
+ run_buildah from $WITH_POLICY_JSON --pull=false busybox
+ echo "$output"
+ expect_output --substring "busybox-working-container"
+}
+
+@test "from with nonexistent authfile: fails" {
+ run_buildah 125 from --authfile /no/such/file --pull $WITH_POLICY_JSON alpine
+ expect_output "Error: credential file is not accessible: stat /no/such/file: no such file or directory"
+}
+
+@test "from --pull-always: emits 'Getting' even if image is cached" {
+ _prefetch docker.io/busybox
+ run_buildah inspect --format "{{.FromImageDigest}}" docker.io/busybox
+ fromDigest="$output"
+ run_buildah pull $WITH_POLICY_JSON docker.io/busybox
+ run_buildah from $WITH_POLICY_JSON --name busyboxc --pull-always docker.io/busybox
+ expect_output --substring "Getting"
+ run_buildah commit $WITH_POLICY_JSON busyboxc fakename-img
+ run_buildah 125 from $WITH_POLICY_JSON --pull=always fakename-img
+
+ # Also check for base-image annotations.
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.digest" }}' fakename-img
+ expect_output "$fromDigest" "base digest from busybox"
+ run_buildah inspect --format '{{index .ImageAnnotations "org.opencontainers.image.base.name" }}' fakename-img
+ expect_output "docker.io/library/busybox:latest" "base name from busybox"
+}
+
+@test "from --quiet: should not emit progress messages" {
+ # Force a pull. Normally this would say 'Getting image ...' and other
+ # progress messages. With --quiet, we should see only the container name.
+ run_buildah '?' rmi busybox
+ run_buildah from $WITH_POLICY_JSON --quiet docker.io/busybox
+ expect_output "busybox-working-container"
+}
+
+@test "from encrypted local image" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey2.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+
+ # Try encrypted image without key should fail
+ run_buildah 1 from oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ expect_output --substring "archive/tar: invalid tar header"
+
+ # Try encrypted image with wrong key should fail
+ run_buildah 125 from --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey2.pem oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
+
+ # Providing the right key should succeed
+ run_buildah from --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey.pem oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "from encrypted registry image" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 2048
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey2.pem 2048
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ start_registry
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ # Try encrypted image without key should fail
+ run_buildah 1 from --tls-verify=false --creds testuser:testpassword docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "archive/tar: invalid tar header"
+
+ # Try encrypted image with wrong key should fail
+ run_buildah 125 from --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey2.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
+
+ # Providing the right key should succeed
+ run_buildah from --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ run_buildah rm -a
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "from with non buildah container" {
+ skip_if_in_container
+ skip_if_no_podman
+
+ _prefetch busybox
+ podman create --net=host --name busyboxc-podman busybox top
+ run_buildah from $WITH_POLICY_JSON --name busyboxc busybox
+ expect_output --substring "busyboxc"
+ podman rm -f busyboxc-podman
+ run_buildah rm busyboxc
+}
+
+@test "from --arch test" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --arch=arm64 alpine
+ other=$output
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --arch=$(go env GOARCH) alpine
+ cid=$output
+ run_buildah copy --from $other $cid /etc/apk/arch /root/other-arch
+ run_buildah run $cid cat /root/other-arch
+ expect_output "aarch64"
+
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --arch=s390x alpine
+ other=$output
+ run_buildah copy --from $other $cid /etc/apk/arch /root/other-arch
+ run_buildah run $cid cat /root/other-arch
+ expect_output "s390x"
+}
+
+@test "from --platform test" {
+ skip_if_no_runtime
+
+ run_buildah version
+ platform=$(grep ^BuildPlatform: <<< "$output")
+ echo "$platform"
+ platform=${platform##* }
+ echo "$platform"
+
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --platform=linux/arm64 alpine
+ other=$output
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --platform=${platform} alpine
+ cid=$output
+ run_buildah copy --from $other $cid /etc/apk/arch /root/other-arch
+ run_buildah run $cid cat /root/other-arch
+ expect_output "aarch64"
+
+ run_buildah from --quiet --pull $WITH_POLICY_JSON --platform=linux/s390x alpine
+ other=$output
+ run_buildah copy --from $other $cid /etc/apk/arch /root/other-arch
+ run_buildah run $cid cat /root/other-arch
+ expect_output "s390x"
+}
+
+@test "from --authfile test" {
+ _prefetch busybox
+ start_registry
+ run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT}
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ target=busybox-image
+ run_buildah from -q $WITH_POLICY_JSON --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ run_buildah rm $output
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/busybox:latest
+}
+
+@test "from --cap-add/--cap-drop test" {
+ _prefetch alpine
+ CAP_DAC_OVERRIDE=2 # unlikely to change
+
+ # Try with default caps.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/^CapEff/{print $2;}' /proc/self/status
+ defaultcaps="$output"
+ run_buildah rm $cid
+
+ if ((0x$defaultcaps & 0x$CAP_DAC_OVERRIDE)); then
+ run_buildah from --quiet --cap-drop CAP_DAC_OVERRIDE --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/^CapEff/{print $2;}' /proc/self/status
+ droppedcaps="$output"
+ run_buildah rm $cid
+ if ((0x$droppedcaps & 0x$CAP_DAC_OVERRIDE)); then
+ die "--cap-drop did not drop DAC_OVERRIDE: $droppedcaps"
+ fi
+ else
+ run_buildah from --quiet --cap-add CAP_DAC_OVERRIDE --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/^CapEff/{print $2;}' /proc/self/status
+ addedcaps="$output"
+ run_buildah rm $cid
+ if (( !(0x$addedcaps & 0x$CAP_DAC_OVERRIDE) )); then
+ die "--cap-add did not add DAC_OVERRIDE: $addedcaps"
+ fi
+ fi
+}
+
+@test "from ulimit test" {
+ _prefetch alpine
+ run_buildah from -q --ulimit cpu=300 $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid /bin/sh -c "ulimit -t"
+ expect_output "300" "ulimit -t"
+}
+
+@test "from isolation test" {
+ skip_if_rootless_environment
+ _prefetch alpine
+ run_buildah from -q --isolation chroot $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect $cid
+ expect_output --substring '"Isolation": "chroot"'
+
+ if [ -z "${BUILDAH_ISOLATION}" ]; then
+ run readlink /proc/self/ns/pid
+ host_pidns=$output
+ run_buildah run --pid private $cid readlink /proc/self/ns/pid
+ # chroot isolation doesn't make a new PID namespace.
+ expect_output "${host_pidns}"
+ fi
+}
+
+@test "from cgroup-parent test" {
+ skip_if_rootless_environment
+ skip_if_chroot
+
+ _prefetch alpine
+ # with cgroup-parent
+ run_buildah from -q --cgroupns=host --cgroup-parent test-cgroup $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah --cgroup-manager cgroupfs run $cid /bin/sh -c 'cat /proc/$$/cgroup'
+ expect_output --substring "test-cgroup"
+
+ # without cgroup-parent
+ run_buildah from -q $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah --cgroup-manager cgroupfs run $cid /bin/sh -c 'cat /proc/$$/cgroup'
+ if [ -n "$(grep "test-cgroup" <<< "$output")" ]; then
+ die "Unexpected cgroup."
+ fi
+}
+
+@test "from cni config test" {
+ _prefetch alpine
+
+ cni_config_dir=${TEST_SCRATCH_DIR}/no-cni-configs
+ cni_plugin_path=${TEST_SCRATCH_DIR}/no-cni-plugin
+ mkdir -p ${cni_config_dir}
+ mkdir -p ${cni_plugin_path}
+ run_buildah from -q --cni-config-dir=${cni_config_dir} --cni-plugin-path=${cni_plugin_path} $WITH_POLICY_JSON alpine
+ cid=$output
+
+ run_buildah inspect --format '{{.CNIConfigDir}}' $cid
+ expect_output "${cni_config_dir}"
+ run_buildah inspect --format '{{.CNIPluginPath}}' $cid
+ expect_output "${cni_plugin_path}"
+}
+
+@test "from-image-with-zstd-compression" {
+ copy --format oci --dest-compress --dest-compress-format zstd docker://quay.io/libpod/alpine_nginx:latest dir:${TEST_SCRATCH_DIR}/base-image
+ run_buildah from dir:${TEST_SCRATCH_DIR}/base-image
+}
+
+@test "from proxy test" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ tmp=$RANDOM
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ FTP_PROXY=$tmp run_buildah run $cid printenv FTP_PROXY
+ expect_output "$tmp"
+ ftp_proxy=$tmp run_buildah run $cid printenv ftp_proxy
+ expect_output "$tmp"
+ HTTP_PROXY=$tmp run_buildah run $cid printenv HTTP_PROXY
+ expect_output "$tmp"
+ https_proxy=$tmp run_buildah run $cid printenv https_proxy
+ expect_output "$tmp"
+ BOGUS_PROXY=$tmp run_buildah 1 run $cid printenv BOGUS_PROXY
+}
+
+@test "from-image-by-id" {
+ skip_if_chroot
+ skip_if_no_runtime
+
+ _prefetch busybox
+ run_buildah from --cidfile ${TEST_SCRATCH_DIR}/cid busybox
+ cid=$(cat ${TEST_SCRATCH_DIR}/cid)
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah copy ${cid} ${TEST_SCRATCH_DIR}/randomfile /
+ run_buildah commit --iidfile ${TEST_SCRATCH_DIR}/iid ${cid}
+ iid=$(cat ${TEST_SCRATCH_DIR}/iid)
+ run_buildah from --cidfile ${TEST_SCRATCH_DIR}/cid2 ${iid}
+ cid2=$(cat ${TEST_SCRATCH_DIR}/cid2)
+ run_buildah run ${cid2} cat /etc/hosts
+ truncated=${iid##*:}
+ truncated="${truncated:0:12}"
+ expect_output --substring ${truncated}-working-container
+ run_buildah run ${cid2} hostname -f
+ expect_output "${cid2:0:12}"
+}
diff --git a/tests/git-daemon/release-1.11-rhel.tar.gz b/tests/git-daemon/release-1.11-rhel.tar.gz
new file mode 100644
index 0000000..47474f8
--- /dev/null
+++ b/tests/git-daemon/release-1.11-rhel.tar.gz
Binary files differ
diff --git a/tests/git-daemon/repo-with-containerfile-on-old-commit.tar.gz b/tests/git-daemon/repo-with-containerfile-on-old-commit.tar.gz
new file mode 100644
index 0000000..a002f14
--- /dev/null
+++ b/tests/git-daemon/repo-with-containerfile-on-old-commit.tar.gz
Binary files differ
diff --git a/tests/git-daemon/repo.tar.gz b/tests/git-daemon/repo.tar.gz
new file mode 100644
index 0000000..ff69ea7
--- /dev/null
+++ b/tests/git-daemon/repo.tar.gz
Binary files differ
diff --git a/tests/git-daemon/subdirectory.tar.gz b/tests/git-daemon/subdirectory.tar.gz
new file mode 100644
index 0000000..f76cd62
--- /dev/null
+++ b/tests/git-daemon/subdirectory.tar.gz
Binary files differ
diff --git a/tests/help.bats b/tests/help.bats
new file mode 100644
index 0000000..4d57a7f
--- /dev/null
+++ b/tests/help.bats
@@ -0,0 +1,95 @@
+#!/usr/bin/env bats
+
+load helpers
+
+# run 'buildah help', parse the output looking for 'Available Commands';
+# return that list.
+function buildah_commands() {
+ run_buildah help "$@" |\
+ awk '/^Available Commands:/{ok=1;next}/^Flags:/{ok=0}ok { print $1 }' |\
+ grep .
+}
+
+function check_help() {
+ local count=0
+ local -A found
+
+ for cmd in $(buildah_commands "$@"); do
+ # Human-readable buildah command string, with multiple spaces collapsed
+ command_string="buildah $* $cmd"
+ command_string=${command_string// / } # 'buildah x' -> 'buildah x'
+
+ # help command and --help flag have the same output
+ run_buildah help "$@" $cmd
+ local full_help=$output
+
+ # The line immediately after 'Usage:' gives us a 1-line synopsis
+ usage=$(echo "$output" | grep -A1 '^Usage:' | tail -1)
+ [ -n "$usage" ] || die "$command_string: no Usage message found"
+ expr "$usage" : "^ $command_string" > /dev/null || die "$command_string: Usage string doesn't match command"
+
+ # If usage ends in '[command]', recurse into subcommands
+ if expr "$usage" : '.*\[command\]$' >/dev/null; then
+ found[subcommands]=1
+ check_help "$@" $cmd
+ continue
+ fi
+
+ # Cross-check: if usage includes '[flags]', there must be a
+ # longer 'Flags:' section in the full --help output; vice-versa,
+ # if 'Flags:' is in full output, usage line must have '[flags]'.
+ if expr "$usage" : '.*\[flags' >/dev/null; then
+ if ! expr "$full_help" : ".*Flags:" >/dev/null; then
+ die "$command_string: Usage includes '[flags]' but has no 'Flags:' subsection"
+ fi
+ elif expr "$full_help" : ".*Flags:" >/dev/null; then
+ die "$command_string: --help has 'Flags:' section but no '[flags]' in synopsis"
+ fi
+
+ count=$(expr $count + 1)
+
+ done
+
+ run_buildah "$@" --help
+ full_usage=$output
+
+ # Any command that takes subcommands, must show usage if called without one.
+ run_buildah "$@"
+ expect_output "$full_usage"
+
+ # 'NoSuchCommand' subcommand shows usage unless the command is root 'buildah' command.
+ if [ -n "$*" ]; then
+ run_buildah "$@" NoSuchCommand
+ expect_output "$full_usage"
+ else
+ run_buildah 125 "$@" NoSuchCommand
+ expect_output --substring "unknown command"
+ fi
+
+ # This can happen if the output of --help changes, such as between
+ # the old command parser and cobra.
+ assert "$count" -gt 0 \
+ "Internal error: no commands found in 'buildah help $@' list"
+
+ # Sanity check: make sure the special loops above triggered at least once.
+ # (We've had situations where a typo makes the conditional never run in podman)
+ if [ -z "$*" ]; then
+ # This loop is copied from podman test and redundant for buildah now.
+ # But this is kept for future extension.
+ for i in subcommands; do
+ if [[ -z ${found[$i]} ]]; then
+ die "Internal error: '$i' subtest did not trigger"
+ fi
+ done
+ fi
+
+ # This can happen if the output of --help changes, such as between
+ # the old command parser and cobra.
+ assert "$count" -gt 0 \
+ "Internal error: no commands found in 'buildah help list"
+
+}
+
+@test "buildah help - basic tests" {
+ check_help
+}
diff --git a/tests/helpers.bash b/tests/helpers.bash
new file mode 100644
index 0000000..fa52d56
--- /dev/null
+++ b/tests/helpers.bash
@@ -0,0 +1,760 @@
+#!/usr/bin/env bash
+
+# Directory in which tests live
+TEST_SOURCES=${TEST_SOURCES:-$(dirname ${BASH_SOURCE})}
+
+BUILDAH_BINARY=${BUILDAH_BINARY:-$TEST_SOURCES/../bin/buildah}
+IMGTYPE_BINARY=${IMGTYPE_BINARY:-$TEST_SOURCES/../bin/imgtype}
+COPY_BINARY=${COPY_BINARY:-$TEST_SOURCES/../bin/copy}
+TUTORIAL_BINARY=${TUTORIAL_BINARY:-$TEST_SOURCES/../bin/tutorial}
+STORAGE_DRIVER=${STORAGE_DRIVER:-vfs}
+PATH=$(dirname ${BASH_SOURCE})/../bin:${PATH}
+OCI=$(${BUILDAH_BINARY} info --format '{{.host.OCIRuntime}}' || command -v runc || command -v crun)
+# Default timeout for a buildah command.
+BUILDAH_TIMEOUT=${BUILDAH_TIMEOUT:-300}
+
+# Safe reliable unchanging test image
+SAFEIMAGE_REGISTRY=${SAFEIMAGE_REGISTRY:-quay.io}
+SAFEIMAGE_USER=${SAFEIMAGE_USER:-libpod}
+SAFEIMAGE_NAME=${SAFEIMAGE_NAME:-testimage}
+SAFEIMAGE_TAG=${SAFEIMAGE_TAG:-20221018}
+SAFEIMAGE="${SAFEIMAGE:-$SAFEIMAGE_REGISTRY/$SAFEIMAGE_USER/$SAFEIMAGE_NAME:$SAFEIMAGE_TAG}"
+
+# Prompt to display when logging buildah commands; distinguish root/rootless
+_LOG_PROMPT='$'
+if [ $(id -u) -eq 0 ]; then
+ _LOG_PROMPT='#'
+fi
+
+# Shortcut for directory containing Containerfiles for bud.bats
+BUDFILES=${TEST_SOURCES}/bud
+
+# Used hundreds of times throughout all the tests
+WITH_POLICY_JSON="--signature-policy ${TEST_SOURCES}/policy.json"
+
+# We don't invoke gnupg directly in many places, but this avoids ENOTTY errors
+# when we invoke it directly in batch mode, and CI runs us without a terminal
+# attached.
+export GPG_TTY=/dev/null
+
+function setup(){
+ setup_tests
+}
+
+function setup_tests() {
+ pushd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+ # $TEST_SCRATCH_DIR is a custom scratch directory for each @test,
+ # but it is NOT EMPTY! It is the caller's responsibility to make
+ # empty subdirectories as needed. All of it will be deleted upon
+ # test completion.
+ #
+ # buildah/podman: "repository name must be lowercase".
+ # me: "but it's a local file path, not a repository name!"
+ # buildah/podman: "i dont care. no caps anywhere!"
+ TEST_SCRATCH_DIR=$(mktemp -d --dry-run --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} buildah_tests.XXXXXX | tr A-Z a-z)
+ mkdir --mode=0700 $TEST_SCRATCH_DIR
+
+ mkdir -p ${TEST_SCRATCH_DIR}/{root,runroot,sigstore,registries.d}
+ cat >${TEST_SCRATCH_DIR}/registries.d/default.yaml <<EOF
+default-docker:
+ sigstore-staging: file://${TEST_SCRATCH_DIR}/sigstore
+docker:
+ registry.access.redhat.com:
+ sigstore: https://access.redhat.com/webassets/docker/content/sigstore
+ registry.redhat.io:
+ sigstore: https://registry.redhat.io/containers/sigstore
+EOF
+
+ # Common options for all buildah and podman invocations
+ ROOTDIR_OPTS="--root ${TEST_SCRATCH_DIR}/root --runroot ${TEST_SCRATCH_DIR}/runroot --storage-driver ${STORAGE_DRIVER}"
+ BUILDAH_REGISTRY_OPTS="--registries-conf ${TEST_SOURCES}/registries.conf --registries-conf-dir ${TEST_SCRATCH_DIR}/registries.d --short-name-alias-conf ${TEST_SCRATCH_DIR}/cache/shortnames.conf"
+ PODMAN_REGISTRY_OPTS="--registries-conf ${TEST_SOURCES}/registries.conf"
+}
+
+function starthttpd() {
+ pushd ${2:-${TEST_SCRATCH_DIR}} > /dev/null
+ go build -o serve ${TEST_SOURCES}/serve/serve.go
+ portfile=$(mktemp)
+ if test -z "${portfile}"; then
+ echo error creating temporaty file
+ exit 1
+ fi
+ ./serve ${1:-${BATS_TMPDIR}} 0 ${portfile} &
+ HTTP_SERVER_PID=$!
+ waited=0
+ while ! test -s ${portfile} ; do
+ sleep 0.1
+ if test $((++waited)) -ge 300 ; then
+ echo test http server did not start within timeout
+ exit 1
+ fi
+ done
+ HTTP_SERVER_PORT=$(cat ${portfile})
+ rm -f ${portfile}
+ popd > /dev/null
+}
+
+function stophttpd() {
+ if test -n "$HTTP_SERVER_PID" ; then
+ kill -HUP ${HTTP_SERVER_PID}
+ unset HTTP_SERVER_PID
+ unset HTTP_SERVER_PORT
+ fi
+ true
+}
+
+function teardown(){
+ teardown_tests
+}
+
+function teardown_tests() {
+ stophttpd
+ stop_git_daemon
+ stop_registry
+
+ # Workaround for #1991 - buildah + overlayfs leaks mount points.
+ # Many tests leave behind /var/tmp/.../root/overlay and sub-mounts;
+ # let's find those and clean them up, otherwise 'rm -rf' fails.
+ # 'sort -r' guarantees that we umount deepest subpaths first.
+ mount |\
+ awk '$3 ~ testdir { print $3 }' testdir="^${TEST_SCRATCH_DIR}/" |\
+ sort -r |\
+ xargs --no-run-if-empty --max-lines=1 umount
+
+ rm -fr ${TEST_SCRATCH_DIR}
+
+ popd
+}
+
+function normalize_image_name() {
+ for img in "$@"; do
+ if [[ "${img##*/}" == "$img" ]] ; then
+ echo -n docker.io/library/"$img"
+ elif [[ docker.io/"${img##*/}" == "$img" ]] ; then
+ echo -n docker.io/library/"${img##*/}"
+ else
+ echo -n "$img"
+ fi
+ done
+}
+
+function _prefetch() {
+ if [ -z "${_BUILDAH_IMAGE_CACHEDIR}" ]; then
+ _pgid=$(sed -ne 's/^NSpgid:\s*//p' /proc/$$/status)
+ export _BUILDAH_IMAGE_CACHEDIR=${BATS_TMPDIR}/buildah-image-cache.$_pgid
+ mkdir -p ${_BUILDAH_IMAGE_CACHEDIR}
+ fi
+
+ local storage=
+ for img in "$@"; do
+ if [[ "$img" =~ '[vfs@' ]] ; then
+ storage="$img"
+ continue
+ fi
+ img=$(normalize_image_name "$img")
+ echo "# [checking for: $img]" >&2
+ fname=$(tr -c a-zA-Z0-9.- - <<< "$img")
+ if [ -d $_BUILDAH_IMAGE_CACHEDIR/$fname ]; then
+ echo "# [restoring from cache: $_BUILDAH_IMAGE_CACHEDIR / $img]" >&2
+ copy dir:$_BUILDAH_IMAGE_CACHEDIR/$fname containers-storage:"$storage""$img"
+ else
+ rm -fr $_BUILDAH_IMAGE_CACHEDIR/$fname
+ echo "# [copy docker://$img dir:$_BUILDAH_IMAGE_CACHEDIR/$fname]" >&2
+ for attempt in $(seq 3) ; do
+ if copy docker://"$img" dir:$_BUILDAH_IMAGE_CACHEDIR/$fname ; then
+ break
+ fi
+ sleep 5
+ done
+ echo "# [copy dir:$_BUILDAH_IMAGE_CACHEDIR/$fname containers-storage:$storage$img]" >&2
+ copy dir:$_BUILDAH_IMAGE_CACHEDIR/$fname containers-storage:"$storage""$img"
+ fi
+ done
+}
+
+function createrandom() {
+ dd if=/dev/urandom bs=1 count=${2:-256} of=${1:-${BATS_TMPDIR}/randomfile} status=none
+}
+
+###################
+# random_string # Returns a pseudorandom human-readable string
+###################
+#
+# Numeric argument, if present, is desired length of string
+#
+function random_string() {
+ local length=${1:-10}
+
+ head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
+}
+
+function buildah() {
+ ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
+}
+
+function imgtype() {
+ ${IMGTYPE_BINARY} ${ROOTDIR_OPTS} "$@"
+}
+
+function copy() {
+ ${COPY_BINARY} --max-parallel-downloads=1 ${ROOTDIR_OPTS} ${BUILDAH_REGISTRY_OPTS} "$@"
+}
+
+function podman() {
+ command ${PODMAN_BINARY:-podman} ${PODMAN_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
+}
+
+# There are various scenarios where we would like to execute `tests` as rootless user, however certain commands like `buildah mount`
+# do not work in rootless session since a normal user cannot mount a filesystem unless they're in a user namespace along with its
+# own mount namespace. In order to run such specific commands from a rootless session we must perform `buildah unshare`.
+# Following function makes sure that invoked command is triggered inside a `buildah unshare` session if env is rootless.
+function run_unshared() {
+ if is_rootless; then
+ $BUILDAH_BINARY unshare "$@"
+ else
+ command "$@"
+ fi
+}
+
+function mkdir() {
+ run_unshared mkdir "$@"
+}
+
+function touch() {
+ run_unshared touch "$@"
+}
+
+function cp() {
+ run_unshared cp "$@"
+}
+
+function rm() {
+ run_unshared rm "$@"
+}
+
+
+#################
+# run_buildah # Invoke buildah, with timeout, using BATS 'run'
+#################
+#
+# This is the preferred mechanism for invoking buildah:
+#
+# * we use 'timeout' to abort (with a diagnostic) if something
+# takes too long; this is preferable to a CI hang.
+# * we log the command run and its output. This doesn't normally
+# appear in BATS output, but it will if there's an error.
+# * we check exit status. Since the normal desired code is 0,
+# that's the default; but the first argument can override:
+#
+# run_buildah 125 nonexistent-subcommand
+# run_buildah '?' some-other-command # let our caller check status
+#
+# Since we use the BATS 'run' mechanism, $output and $status will be
+# defined for our caller.
+#
+function run_buildah() {
+ # Number as first argument = expected exit code; default 0
+ # --retry as first argument = retry 3 times on error (eg registry flakes)
+ local expected_rc=0
+ local retry=1
+ case "$1" in
+ [0-9]) expected_rc=$1; shift;;
+ [1-9][0-9]) expected_rc=$1; shift;;
+ [12][0-9][0-9]) expected_rc=$1; shift;;
+ '?') expected_rc= ; shift;; # ignore exit code
+ --retry) retry=3; shift;; # retry network flakes
+ esac
+
+ # Remember command args, for possible use in later diagnostic messages
+ MOST_RECENT_BUILDAH_COMMAND="buildah $*"
+
+ # If session is rootless and `buildah mount` is invoked, perform unshare,
+ # since normal user cannot mount a filesystem unless they're in a user namespace along with its own mount namespace.
+ if is_rootless; then
+ if [[ "$1" =~ mount ]]; then
+ set "unshare" "$BUILDAH_BINARY" ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
+ fi
+ fi
+
+ while [ $retry -gt 0 ]; do
+ retry=$(( retry - 1 ))
+
+ # stdout is only emitted upon error; this echo is to help a debugger
+ echo "${_LOG_PROMPT} $BUILDAH_BINARY $*"
+ run env CONTAINERS_CONF=${CONTAINERS_CONF:-$(dirname ${BASH_SOURCE})/containers.conf} timeout --foreground --kill=10 $BUILDAH_TIMEOUT ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
+ # without "quotes", multiple lines are glommed together into one
+ if [ -n "$output" ]; then
+ echo "$output"
+ fi
+ if [ "$status" -ne 0 ]; then
+ echo -n "[ rc=$status ";
+ if [ -n "$expected_rc" ]; then
+ if [ "$status" -eq "$expected_rc" ]; then
+ echo -n "(expected) ";
+ else
+ echo -n "(** EXPECTED $expected_rc **) ";
+ fi
+ fi
+ echo "]"
+ fi
+
+ if [ "$status" -eq 124 -o "$status" -eq 137 ]; then
+ # FIXME: 'timeout -v' requires coreutils-8.29; travis seems to have
+ # an older version. If/when travis updates, please add -v
+ # to the 'timeout' command above, and un-comment this out:
+ # if expr "$output" : ".*timeout: sending" >/dev/null; then
+ echo "*** TIMED OUT ***"
+ # This does not get the benefit of a retry
+ false
+ fi
+
+ if [ -n "$expected_rc" ]; then
+ if [ "$status" -eq "$expected_rc" ]; then
+ return
+ elif [ $retry -gt 0 ]; then
+ echo "[ RETRYING ]" >&2
+ sleep 30
+ else
+ die "exit code is $status; expected $expected_rc"
+ fi
+ fi
+ done
+}
+
+#########
+# die # Abort with helpful message
+#########
+function die() {
+ echo "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" >&2
+ echo "#| FAIL: $*" >&2
+ echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2
+ false
+}
+
+############
+# assert # Compare actual vs expected string; fail if mismatch
+############
+#
+# Compares string (default: $output) against the given string argument.
+# By default we do an exact-match comparison against $output, but there
+# are two different ways to invoke us, each with an optional description:
+#
+# assert "EXPECT" [DESCRIPTION]
+# assert "RESULT" "OP" "EXPECT" [DESCRIPTION]
+#
+# The first form (one or two arguments) does an exact-match comparison
+# of "$output" against "EXPECT". The second (three or four args) compares
+# the first parameter against EXPECT, using the given OPerator. If present,
+# DESCRIPTION will be displayed on test failure.
+#
+# Examples:
+#
+# assert "this is exactly what we expect"
+# assert "${lines[0]}" =~ "^abc" "first line begins with abc"
+#
+function assert() {
+ local actual_string="$output"
+ local operator='=='
+ local expect_string="$1"
+ local testname="$2"
+
+ case "${#*}" in
+ 0) die "Internal error: 'assert' requires one or more arguments" ;;
+ 1|2) ;;
+ 3|4) actual_string="$1"
+ operator="$2"
+ expect_string="$3"
+ testname="$4"
+ ;;
+ *) die "Internal error: too many arguments to 'assert" ;;
+ esac
+
+ # Comparisons.
+ # Special case: there is no !~ operator, so fake it via '! x =~ y'
+ local not=
+ local actual_op="$operator"
+ if [[ $operator == '!~' ]]; then
+ not='!'
+ actual_op='=~'
+ fi
+ if [[ $operator == '=' || $operator == '==' ]]; then
+ # Special case: we can't use '=' or '==' inside [[ ... ]] because
+ # the right-hand side is treated as a pattern... and '[xy]' will
+ # not compare literally. There seems to be no way to turn that off.
+ if [ "$actual_string" = "$expect_string" ]; then
+ return
+ fi
+ elif [[ $operator == '!=' ]]; then
+ # Same special case as above
+ if [ "$actual_string" != "$expect_string" ]; then
+ return
+ fi
+ else
+ if eval "[[ $not \$actual_string $actual_op \$expect_string ]]"; then
+ return
+ elif [ $? -gt 1 ]; then
+ die "Internal error: could not process 'actual' $operator 'expect'"
+ fi
+ fi
+
+ # Test has failed. Get a descriptive test name.
+ if [ -z "$testname" ]; then
+ testname="${MOST_RECENT_BUILDAH_COMMAND:-[no test name given]}"
+ fi
+
+ # Display optimization: the typical case for 'expect' is an
+ # exact match ('='), but there are also '=~' or '!~' or '-ge'
+ # and the like. Omit the '=' but show the others; and always
+ # align subsequent output lines for ease of comparison.
+ local op=''
+ local ws=''
+ if [ "$operator" != '==' ]; then
+ op="$operator "
+ ws=$(printf "%*s" ${#op} "")
+ fi
+
+ # This is a multi-line message, which may in turn contain multi-line
+ # output, so let's format it ourself, readably
+ local actual_split
+ IFS=$'\n' read -rd '' -a actual_split <<<"$actual_string" || true
+ printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
+ printf "#| FAIL: %s\n" "$testname" >&2
+ printf "#| expected: %s'%s'\n" "$op" "$expect_string" >&2
+ printf "#| actual: %s'%s'\n" "$ws" "${actual_split[0]}" >&2
+ local line
+ for line in "${actual_split[@]:1}"; do
+ printf "#| > %s'%s'\n" "$ws" "$line" >&2
+ done
+ printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
+ false
+}
+
+###################
+# expect_output # [obsolete; kept for compatibility]
+###################
+#
+# An earlier version of assert().
+#
+function expect_output() {
+ # By default we examine $output, the result of run_buildah
+ local actual="$output"
+ local operator='=='
+
+ # option processing: recognize --from="...", --substring
+ local opt
+ for opt; do
+ local value=$(expr "$opt" : '[^=]*=\(.*\)')
+ case "$opt" in
+ --from=*) actual="$value"; shift;;
+ --substring) operator='=~'; shift;;
+ --) shift; break;;
+ -*) die "Invalid option '$opt'" ;;
+ *) break;;
+ esac
+ done
+
+ assert "$actual" "$operator" "$@"
+}
+
+#######################
+# expect_line_count # Check the expected number of output lines
+#######################
+#
+# ...from the most recent run_buildah command
+#
+function expect_line_count() {
+ local expect="$1"
+ local testname="${2:-${MOST_RECENT_BUILDAH_COMMAND:-[no test name given]}}"
+
+ local actual="${#lines[@]}"
+ if [ "$actual" -eq "$expect" ]; then
+ return
+ fi
+
+ printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
+ printf "#| FAIL: $testname\n" >&2
+ printf "#| Expected %d lines of output, got %d\n" $expect $actual >&2
+ printf "#| Output was:\n" >&2
+ local line
+ for line in "${lines[@]}"; do
+ printf "#| >%s\n" "$line" >&2
+ done
+ printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
+ false
+}
+
+function check_options_flag_err() {
+ flag="$1"
+ [ "$status" -eq 125 ]
+ [[ $output = *"no options ($flag) can be specified after"* ]]
+}
+
+#################
+# is_rootless # Check if we run as normal user
+#################
+function is_rootless() {
+ [ "$(id -u)" -ne 0 ]
+}
+
+#################
+# has_supplemental_groups # Check that account has additional groups
+#################
+function has_supplemental_groups() {
+ [ "$(id -g)" != "$(id -G)" ]
+}
+
+#################################
+# skip_if_rootless_environment # `mount` or its variant needs unshare
+#################################
+function skip_if_rootless_environment() {
+ if is_rootless; then
+ skip "${1:-test is being invoked from rootless environment and might need unshare}"
+ fi
+}
+
+#################################
+# skip_if_root_environment #
+#################################
+function skip_if_root_environment() {
+ if ! is_rootless; then
+ skip "${1:-test is being invoked from root environment}"
+ fi
+}
+
+####################
+# skip_if_chroot #
+####################
+function skip_if_chroot() {
+ if test "$BUILDAH_ISOLATION" = "chroot"; then
+ skip "${1:-test does not work when \$BUILDAH_ISOLATION = chroot}"
+ fi
+}
+
+######################
+# skip_if_rootless #
+######################
+function skip_if_rootless() {
+ if test "$BUILDAH_ISOLATION" = "rootless"; then
+ skip "${1:-test does not work when \$BUILDAH_ISOLATION = rootless}"
+ fi
+}
+
+##################################
+# skip_if_rootless_and_cgroupv1 #
+##################################
+function skip_if_rootless_and_cgroupv1() {
+ if test "$BUILDAH_ISOLATION" = "rootless"; then
+ if ! is_cgroupsv2; then
+ skip "${1:-test does not work when \$BUILDAH_ISOLATION = rootless} and not cgroupv2"
+ fi
+ fi
+}
+
+########################
+# skip_if_no_runtime # 'buildah run' can't work without a runtime
+########################
+function skip_if_no_runtime() {
+ if type -p "${OCI}" &> /dev/null; then
+ return
+ fi
+
+ skip "runtime \"$OCI\" not found"
+}
+
+#######################
+# skip_if_no_podman # we need 'podman' to test how we interact with podman
+#######################
+function skip_if_no_podman() {
+ run which ${PODMAN_BINARY:-podman}
+ if [[ $status -ne 0 ]]; then
+ skip "podman is not installed"
+ fi
+}
+
+##################
+# is_cgroupsv2 # Returns true if host system has cgroupsv2 enabled
+##################
+function is_cgroupsv2() {
+ local cgroupfs_t=$(stat -f -c %T /sys/fs/cgroup)
+ test "$cgroupfs_t" = "cgroup2fs"
+}
+
+#######################
+# skip_if_cgroupsv2 # Some tests don't work with cgroupsv2
+#######################
+function skip_if_cgroupsv2() {
+ if is_cgroupsv2; then
+ skip "${1:-test does not work with cgroups v2}"
+ fi
+}
+
+#######################
+# skip_if_cgroupsv1 # Some tests don't work with cgroupsv1
+#######################
+function skip_if_cgroupsv1() {
+ if ! is_cgroupsv2; then
+ skip "${1:-test does not work with cgroups v1}"
+ fi
+}
+
+##########################
+# skip_if_in_container #
+##########################
+function skip_if_in_container() {
+ if test "$CONTAINER" = "podman"; then
+ skip "This test is not working inside a container"
+ fi
+}
+
+#######################
+# skip_if_no_docker #
+#######################
+function skip_if_no_docker() {
+ which docker || skip "docker is not installed"
+ systemctl -q is-active docker || skip "docker.service is not active"
+
+ # Confirm that this is really truly docker, not podman.
+ docker_version=$(docker --version)
+ if [[ $docker_version =~ podman ]]; then
+ skip "this test needs actual docker, not podman-docker"
+ fi
+}
+
+function skip_if_no_unshare() {
+ run which ${UNSHARE_BINARY:-unshare}
+ if [[ $status -ne 0 ]]; then
+ skip "unshare is not installed"
+ fi
+ if ! unshare -Ur true ; then
+ skip "unshare was not able to create a user namespace"
+ fi
+ if ! unshare -Urm true ; then
+ skip "unshare was not able to create a mount namespace"
+ fi
+ if ! unshare -Urmpf true ; then
+ skip "unshare was not able to create a pid namespace"
+ fi
+}
+
+function start_git_daemon() {
+ daemondir=${TEST_SCRATCH_DIR}/git-daemon
+ mkdir -p ${daemondir}/repo
+ gzip -dc < ${1:-${TEST_SOURCES}/git-daemon/repo.tar.gz} | tar x -C ${daemondir}/repo
+ GITPORT=$(($RANDOM + 32768))
+ git daemon --detach --pid-file=${TEST_SCRATCH_DIR}/git-daemon/pid --reuseaddr --port=${GITPORT} --base-path=${daemondir} ${daemondir}
+}
+
+function stop_git_daemon() {
+ if test -s ${TEST_SCRATCH_DIR}/git-daemon/pid ; then
+ kill $(cat ${TEST_SCRATCH_DIR}/git-daemon/pid)
+ rm -f ${TEST_SCRATCH_DIR}/git-daemon/pid
+ fi
+}
+
+# Bring up a registry server using buildah with vfs and chroot as a cheap
+# substitute for podman, accessible only to user $1 using password $2 on the
+# local system at a dynamically-allocated port.
+# Requires openssl.
+# A user name and password can be supplied as the two parameters, or default
+# values of "testuser" and "testpassword" will be used.
+# Sets REGISTRY_PID, REGISTRY_PORT (to append to "localhost:"), and
+# REGISTRY_DIR (where the CA cert can be found) on success.
+function start_registry() {
+ local testuser="${1:-testuser}"
+ local testpassword="${2:-testpassword}"
+ local REGISTRY_IMAGE=quay.io/libpod/registry:2.8
+ local config='
+version: 0.1
+log:
+ fields:
+ service: registry
+storage:
+ cache:
+ blobdescriptor: inmemory
+ filesystem:
+ rootdirectory: /var/lib/registry
+http:
+ addr: :0
+ headers:
+ X-Content-Type-Options: [nosniff]
+ tls:
+ certificate: /etc/docker/registry/localhost.crt
+ key: /etc/docker/registry/localhost.key
+health:
+ storagedriver:
+ enabled: true
+ interval: 10s
+ threshold: 3
+auth:
+ htpasswd:
+ realm: buildah-realm
+ path: /etc/docker/registry/htpasswd
+'
+ # roughly equivalent to "htpasswd -nbB testuser testpassword", the registry uses
+ # the same package this does for verifying passwords against hashes in htpasswd files
+ htpasswd=${testuser}:$(buildah passwd ${testpassword})
+
+ # generate the htpasswd and config.yml files for the registry
+ mkdir -p "${TEST_SCRATCH_DIR}"/registry/root "${TEST_SCRATCH_DIR}"/registry/run "${TEST_SCRATCH_DIR}"/registry/certs "${TEST_SCRATCH_DIR}"/registry/config
+ cat > "${TEST_SCRATCH_DIR}"/registry/config/htpasswd <<< "$htpasswd"
+ cat > "${TEST_SCRATCH_DIR}"/registry/config/config.yml <<< "$config"
+ chmod 644 "${TEST_SCRATCH_DIR}"/registry/config/htpasswd "${TEST_SCRATCH_DIR}"/registry/config/config.yml
+
+ # generate a new key and certificate
+ if ! openssl req -newkey rsa:4096 -nodes -sha256 -keyout "${TEST_SCRATCH_DIR}"/registry/certs/localhost.key -x509 -days 2 -addext "subjectAltName = DNS:localhost" -out "${TEST_SCRATCH_DIR}"/registry/certs/localhost.crt -subj "/CN=localhost" ; then
+ die error creating new key and certificate
+ fi
+ chmod 644 "${TEST_SCRATCH_DIR}"/registry/certs/localhost.crt
+ chmod 600 "${TEST_SCRATCH_DIR}"/registry/certs/localhost.key
+ # use a copy of the server's certificate for validation from a client
+ cp "${TEST_SCRATCH_DIR}"/registry/certs/localhost.crt "${TEST_SCRATCH_DIR}"/registry/
+
+ # create a container in its own storage
+ _prefetch "[vfs@${TEST_SCRATCH_DIR}/registry/root+${TEST_SCRATCH_DIR}/registry/run]" ${REGISTRY_IMAGE}
+ ctr=$(${BUILDAH_BINARY} --storage-driver vfs --root "${TEST_SCRATCH_DIR}"/registry/root --runroot "${TEST_SCRATCH_DIR}"/registry/run from --quiet --pull-never ${REGISTRY_IMAGE})
+ ${BUILDAH_BINARY} --storage-driver vfs --root "${TEST_SCRATCH_DIR}"/registry/root --runroot "${TEST_SCRATCH_DIR}"/registry/run copy $ctr "${TEST_SCRATCH_DIR}"/registry/config/htpasswd "${TEST_SCRATCH_DIR}"/registry/config/config.yml "${TEST_SCRATCH_DIR}"/registry/certs/localhost.key "${TEST_SCRATCH_DIR}"/registry/certs/localhost.crt /etc/docker/registry/
+
+ # fire it up
+ coproc ${BUILDAH_BINARY} --storage-driver vfs --root "${TEST_SCRATCH_DIR}"/registry/root --runroot "${TEST_SCRATCH_DIR}"/registry/run run --net host "$ctr" /entrypoint.sh /etc/docker/registry/config.yml 2> "${TEST_SCRATCH_DIR}"/registry/registry.log
+
+ # record the coprocess's ID and try to parse the listening port from the log
+ # we're separating all of this from the storage for any test that might call
+ # this function and using vfs to minimize the cleanup required
+ REGISTRY_PID="${COPROC_PID}"
+ REGISTRY_DIR="${TEST_SCRATCH_DIR}"/registry
+ REGISTRY_PORT=
+ local waited=0
+ while [ -z "${REGISTRY_PORT}" ] ; do
+ if [ $waited -ge $BUILDAH_TIMEOUT ] ; then
+ echo Could not determine listening port from log:
+ sed -e 's/^/ >/' ${TEST_SCRATCH_DIR}/registry/registry.log
+ stop_registry
+ false
+ fi
+ waited=$((waited+1))
+ sleep 1
+ REGISTRY_PORT=$(sed -ne 's^.*listening on.*:\([0-9]\+\),.*^\1^p' ${TEST_SCRATCH_DIR}/registry/registry.log)
+ done
+
+ # push the registry image we just started... to itself, as a confidence check
+ if ! ${BUILDAH_BINARY} --storage-driver vfs --root "${REGISTRY_DIR}"/root --runroot "${REGISTRY_DIR}"/run push --cert-dir "${REGISTRY_DIR}" --creds "${testuser}":"${testpassword}" "${REGISTRY_IMAGE}" localhost:"${REGISTRY_PORT}"/registry; then
+ echo error pushing to /registry repository at localhost:$REGISTRY_PORT
+ stop_registry
+ false
+ fi
+}
+
+function stop_registry() {
+ if test -n "${REGISTRY_PID}" ; then
+ kill "${REGISTRY_PID}"
+ wait "${REGISTRY_PID}" || true
+ fi
+ unset REGISTRY_PID
+ unset REGISTRY_PORT
+ if test -n "${REGISTRY_DIR}" ; then
+ ${BUILDAH_BINARY} --storage-driver vfs --root "${REGISTRY_DIR}"/root --runroot "${REGISTRY_DIR}"/run rmi -a -f
+ rm -fr "${REGISTRY_DIR}"
+ fi
+ unset REGISTRY_DIR
+}
diff --git a/tests/helpers.bash.t b/tests/helpers.bash.t
new file mode 100755
index 0000000..ec49519
--- /dev/null
+++ b/tests/helpers.bash.t
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+# tests for helpers.bash
+#
+
+. $(dirname ${BASH_SOURCE})/helpers.bash
+
+INDEX=1
+RC=0
+
+# t (true) : tests that should pass
+function t() {
+ result=$(assert "$@" 2>&1)
+ status=$?
+
+ if [[ $status -eq 0 ]]; then
+ echo "ok $INDEX $*"
+ else
+ echo "not ok $INDEX $*"
+ echo "$result"
+ RC=1
+ fi
+
+ INDEX=$((INDEX + 1))
+}
+
+# f (false) : tests that should fail
+function f() {
+ result=$(assert "$@" 2>&1)
+ status=$?
+
+ if [[ $status -ne 0 ]]; then
+ echo "ok $INDEX ! $*"
+ else
+ echo "not ok $INDEX ! $* [passed, should have failed]"
+ RC=1
+ fi
+
+ INDEX=$((INDEX + 1))
+}
+
+
+
+t "" = ""
+t "a" != ""
+t "" != "a"
+
+t "a" = "a"
+t "aa" == "aa"
+t "a[b]{c}" = "a[b]{c}"
+
+t "abcde" =~ "a"
+t "abcde" =~ "b"
+t "abcde" =~ "c"
+t "abcde" =~ "d"
+t "abcde" =~ "e"
+t "abcde" =~ "ab"
+t "abcde" =~ "abc"
+t "abcde" =~ "abcd"
+t "abcde" =~ "bcde"
+t "abcde" =~ "cde"
+t "abcde" =~ "de"
+
+t "foo" =~ "foo"
+t "foobar" =~ "foo"
+t "barfoo" =~ "foo"
+
+t 'a "AB \"CD": ef' = 'a "AB \"CD": ef'
+t 'a "AB \"CD": ef' =~ 'a "AB \\"CD": ef'
+
+t 'abcdef' !~ 'efg'
+t 'abcdef' !~ 'x'
+
+###########
+
+f "a" = "b"
+f "a" == "b"
+
+f "abcde" =~ "x"
+
+f "abcde" !~ "a"
+f "abcde" !~ "ab"
+f "abcde" !~ "abc"
+
+f "" != ""
+
+exit $RC
diff --git a/tests/history.bats b/tests/history.bats
new file mode 100644
index 0000000..859e3ff
--- /dev/null
+++ b/tests/history.bats
@@ -0,0 +1,152 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function testconfighistory() {
+ config="$1"
+ expected="$2"
+ container=$(echo "c$config" | sed -E -e 's|[[:blank:]]|_|g' -e "s,[-=/:'],_,g" | tr '[A-Z]' '[a-z]')
+ image=$(echo "i$config" | sed -E -e 's|[[:blank:]]|_|g' -e "s,[-=/:'],_,g" | tr '[A-Z]' '[a-z]')
+ run_buildah from --name "$container" --format docker scratch
+ run_buildah config $config --add-history "$container"
+ run_buildah commit $WITH_POLICY_JSON "$container" "$image"
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' "$image"
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' "$image"
+ expect_output --substring "$expected"
+ if test "$3" != "not-oci" ; then
+ run_buildah inspect --format '{{range .OCIv1.History}}{{println .CreatedBy}}{{end}}' "$image"
+ expect_output --substring "$expected"
+ fi
+}
+
+@test "history-cmd" {
+ testconfighistory "--cmd /foo" "CMD /foo"
+}
+
+@test "history-entrypoint" {
+ testconfighistory "--entrypoint /foo" "ENTRYPOINT /foo"
+}
+
+@test "history-env" {
+ testconfighistory "--env FOO=BAR" "ENV FOO=BAR"
+}
+
+@test "history-healthcheck" {
+ run_buildah from --name healthcheckctr --format docker scratch
+ run_buildah config --healthcheck "CMD /foo" --healthcheck-timeout=10s --healthcheck-interval=20s --healthcheck-retries=7 --healthcheck-start-period=30s --add-history healthcheckctr
+ run_buildah commit $WITH_POLICY_JSON healthcheckctr healthcheckimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' healthcheckimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' healthcheckimg
+ expect_output --substring "HEALTHCHECK --interval=20s --retries=7 --start-period=30s --timeout=10s CMD /foo"
+}
+
+@test "history-label" {
+ testconfighistory "--label FOO=BAR" "LABEL FOO=BAR"
+}
+
+@test "history-onbuild" {
+ run_buildah from --name onbuildctr --format docker scratch
+ run_buildah config --onbuild "CMD /foo" --add-history onbuildctr
+ run_buildah commit $WITH_POLICY_JSON onbuildctr onbuildimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' onbuildimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' onbuildimg
+ expect_output --substring "ONBUILD CMD /foo"
+}
+
+@test "commit-with-omit-history-set-to-true" {
+ run_buildah from --name onbuildctr --format docker scratch
+ run_buildah config --onbuild "CMD /foo" --add-history onbuildctr
+ run_buildah commit --omit-history $WITH_POLICY_JSON onbuildctr onbuildimg
+ run_buildah inspect --format "{{index .Docker.History}}" onbuildimg
+ expect_output "[]"
+}
+
+@test "history-port" {
+ testconfighistory "--port 80/tcp" "EXPOSE 80/tcp"
+}
+
+@test "history-shell" {
+ testconfighistory "--shell /bin/wish" "SHELL /bin/wish"
+}
+
+@test "history-stop-signal" {
+ testconfighistory "--stop-signal SIGHUP" "STOPSIGNAL SIGHUP" not-oci
+}
+
+@test "history-user" {
+ testconfighistory "--user 10:10" "USER 10:10"
+}
+
+@test "history-volume" {
+ testconfighistory "--volume /foo" "VOLUME /foo"
+}
+
+@test "history-workingdir" {
+ testconfighistory "--workingdir /foo" "WORKDIR /foo"
+}
+
+@test "history-add" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --name addctr --format docker scratch
+ run_buildah add --add-history addctr ${TEST_SCRATCH_DIR}/randomfile
+ digest="$output"
+ run_buildah commit $WITH_POLICY_JSON addctr addimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' addimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' addimg
+ expect_output --substring "ADD file:$digest"
+}
+
+@test "history-copy" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --name copyctr --format docker scratch
+ run_buildah copy --add-history copyctr ${TEST_SCRATCH_DIR}/randomfile
+ digest="$output"
+ run_buildah commit $WITH_POLICY_JSON copyctr copyimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' copyimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' copyimg
+ expect_output --substring "COPY file:$digest"
+}
+
+@test "history-run" {
+ _prefetch busybox
+ run_buildah from --name runctr --format docker $WITH_POLICY_JSON busybox
+ run_buildah run --add-history runctr -- uname -a
+ run_buildah commit $WITH_POLICY_JSON runctr runimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' runimg
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' runimg
+ expect_output --substring "/bin/sh -c uname -a"
+}
+
+@test "history should not contain vars in allowlist unless set in ARG" {
+ _prefetch busybox
+ ctxdir=${TEST_SCRATCH_DIR}/bud
+ mkdir -p $ctxdir
+ cat >$ctxdir/Dockerfile <<EOF
+FROM busybox
+RUN echo \$HTTP_PROXY
+EOF
+
+ run_buildah build $WITH_POLICY_JSON -t test --build-arg HTTP_PROXY="helloworld" ${ctxdir}
+ expect_output --substring 'helloworld'
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' test
+ # history should not contain value for HTTP_PROXY since it was not in Containerfile
+ assert "$output" !~ 'HTTP_PROXY=helloworld'
+ assert "$output" !~ 'helloworld'
+}
+
+@test "history should contain vars in allowlist when set in ARG" {
+ _prefetch busybox
+ ctxdir=${TEST_SCRATCH_DIR}/bud
+ mkdir -p $ctxdir
+ cat >$ctxdir/Dockerfile <<EOF
+FROM busybox
+ARG HTTP_PROXY
+RUN echo \$HTTP_PROXY
+EOF
+
+ run_buildah build $WITH_POLICY_JSON -t test --build-arg HTTP_PROXY="helloworld" ${ctxdir}
+ expect_output --substring 'helloworld'
+ run_buildah inspect --format '{{range .Docker.History}}{{println .CreatedBy}}{{end}}' test
+ # history should not contain value for HTTP_PROXY since it was not in Containerfile
+ expect_output --substring 'HTTP_PROXY=helloworld'
+}
diff --git a/tests/images.bats b/tests/images.bats
new file mode 100644
index 0000000..f8cf923
--- /dev/null
+++ b/tests/images.bats
@@ -0,0 +1,272 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "images-flags-order-verification" {
+ run_buildah images --all
+
+ run_buildah 125 images img1 -n
+ check_options_flag_err "-n"
+
+ run_buildah 125 images img1 --filter="service=redis" img2
+ check_options_flag_err "--filter=service=redis"
+
+ run_buildah 125 images img1 img2 img3 -q
+ check_options_flag_err "-q"
+}
+
+@test "images" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images
+ expect_line_count 3
+}
+
+@test "images all test" {
+ _prefetch alpine
+ run_buildah bud $WITH_POLICY_JSON --layers -t test $BUDFILES/use-layers
+ run_buildah images
+ expect_line_count 3
+
+ run_buildah images -a
+ expect_line_count 8
+
+ # create a no name image which should show up when doing buildah images without the --all flag
+ run_buildah bud $WITH_POLICY_JSON $BUDFILES/use-layers
+ run_buildah images
+ expect_line_count 4
+}
+
+@test "images filter test" {
+ _prefetch registry.k8s.io/pause busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON registry.k8s.io/pause
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+
+ run_buildah 125 images --noheading --filter since registry.k8s.io/pause
+ expect_output 'Error: invalid image filter "since": must be in the format "filter=value or filter!=value"'
+
+
+ run_buildah images --noheading --filter since=registry.k8s.io/pause
+ expect_line_count 1
+
+ # pause* and u* should only give us pause image not busybox since its a AND between
+ # two filters
+ run_buildah images --noheading --filter "reference=pause*" --filter "reference=u*"
+ expect_line_count 1
+}
+
+@test "images format test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images --format "{{.Name}}"
+ expect_line_count 2
+}
+
+@test "images noheading test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images --noheading
+ expect_line_count 2
+}
+
+@test "images quiet test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images --quiet
+ expect_line_count 2
+}
+
+@test "images no-trunc test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images -q --no-trunc
+ expect_line_count 2
+ expect_output --substring --from="${lines[0]}" "sha256"
+}
+
+@test "images json test" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+
+ for img in '' alpine busybox; do
+ # e.g. [ { "id": "xx", ... },{ "id": "yy", ... } ]
+ # We check for the presence of some keys, but not (yet) their values.
+ # FIXME: once we can rely on 'jq' tool being present, improve this test!
+ run_buildah images --json $img
+ expect_output --from="${lines[0]}" "[" "first line of JSON output: array"
+ for key in id names digest createdat size readonly history; do
+ expect_output --substring "\"$key\": "
+ done
+ done
+}
+
+@test "images json dup test" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid test
+ run_buildah tag test new-name
+
+ run_buildah images --json
+ expect_output --substring '"id": '
+}
+
+@test "images json valid" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid1=$output
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid2=$output
+ run_buildah commit $WITH_POLICY_JSON $cid1 test
+ run_buildah commit $WITH_POLICY_JSON $cid2 test2
+
+ run_buildah images --json
+ run python3 -m json.tool <<< "$output"
+ assert "$status" -eq 0 "status from python json.tool"
+}
+
+@test "specify an existing image" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah images alpine
+ expect_line_count 2
+}
+
+@test "specify a nonexistent image" {
+ run_buildah 125 images alpine
+ expect_output --from="${lines[0]}" "Error: alpine: image not known"
+ expect_line_count 1
+}
+
+@test "Test dangling images" {
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON $cid test
+ run_buildah commit $WITH_POLICY_JSON $cid test
+ run_buildah images
+ expect_line_count 3
+
+ run_buildah images --filter dangling=true
+ expect_output --substring " <none> "
+ expect_line_count 2
+
+ run_buildah images --filter dangling=false
+ expect_output --substring " latest "
+ expect_line_count 2
+}
+
+@test "image digest test" {
+ _prefetch busybox
+ run_buildah pull $WITH_POLICY_JSON busybox
+ run_buildah images --digests
+ expect_output --substring "sha256:"
+}
+
+@test "images in OCI format with no creation dates" {
+ mkdir -p $TEST_SCRATCH_DIR/blobs/sha256
+
+ # Create a layer.
+ dd if=/dev/zero bs=512 count=2 of=$TEST_SCRATCH_DIR/blob
+ layerdigest=$(sha256sum $TEST_SCRATCH_DIR/blob | awk '{print $1}')
+ layersize=$(stat -c %s $TEST_SCRATCH_DIR/blob)
+ mv $TEST_SCRATCH_DIR/blob $TEST_SCRATCH_DIR/blobs/sha256/${layerdigest}
+
+ # Create a configuration blob that doesn't include a "created" date.
+ now=$(TZ=UTC date +%Y-%m-%dT%H:%M:%S.%NZ)
+ arch=$(go env GOARCH)
+ cat > $TEST_SCRATCH_DIR/blob << EOF
+ {
+ "architecture": "$arch",
+ "os": "linux",
+ "config": {
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "sh"
+ ]
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:${layerdigest}"
+ ]
+ },
+ "history": [
+ {
+ "created": "${now}",
+ "created_by": "/bin/sh -c #(nop) ADD file:${layerdigest} in / "
+ }
+ ]
+ }
+EOF
+ configdigest=$(sha256sum $TEST_SCRATCH_DIR/blob | awk '{print $1}')
+ configsize=$(stat -c %s $TEST_SCRATCH_DIR/blob)
+ mv $TEST_SCRATCH_DIR/blob $TEST_SCRATCH_DIR/blobs/sha256/${configdigest}
+
+ # Create a manifest for that configuration blob and layer.
+ cat > $TEST_SCRATCH_DIR/blob << EOF
+ {
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:${configdigest}",
+ "size": ${configsize}
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "digest": "sha256:${layerdigest}",
+ "size": ${layersize}
+ }
+ ]
+ }
+EOF
+ manifestdigest=$(sha256sum $TEST_SCRATCH_DIR/blob | awk '{print $1}')
+ manifestsize=$(stat -c %s $TEST_SCRATCH_DIR/blob)
+ mv $TEST_SCRATCH_DIR/blob $TEST_SCRATCH_DIR/blobs/sha256/${manifestdigest}
+
+ # Add the manifest to the image index.
+ cat > $TEST_SCRATCH_DIR/index.json << EOF
+ {
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:${manifestdigest}",
+ "size": ${manifestsize}
+ }
+ ]
+ }
+EOF
+
+ # Mark the directory as a layout directory.
+ echo -n '{"imageLayoutVersion": "1.0.0"}' > $TEST_SCRATCH_DIR/oci-layout
+
+ # Import the image.
+ run_buildah pull oci:$TEST_SCRATCH_DIR
+
+ # Inspect the image. We shouldn't crash.
+ run_buildah inspect ${configdigest}
+ # List images. We shouldn't crash.
+ run_buildah images
+}
diff --git a/tests/imgtype/imgtype.go b/tests/imgtype/imgtype.go
new file mode 100644
index 0000000..b775cfb
--- /dev/null
+++ b/tests/imgtype/imgtype.go
@@ -0,0 +1,259 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/docker"
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/manifest"
+ is "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+func main() {
+ if buildah.InitReexec() {
+ return
+ }
+ unshare.MaybeReexecUsingUserNamespace(false)
+
+ storeOptions, err := storage.DefaultStoreOptionsAutoDetectUID()
+ if err != nil {
+ storeOptions = storage.StoreOptions{}
+ }
+
+ expectedManifestType := ""
+ expectedConfigType := ""
+
+ debug := flag.Bool("debug", false, "turn on debug logging")
+ root := flag.String("root", storeOptions.GraphRoot, "storage root directory")
+ runroot := flag.String("runroot", storeOptions.RunRoot, "storage runtime directory")
+ driver := flag.String("storage-driver", storeOptions.GraphDriverName, "storage driver")
+ opts := flag.String("storage-opts", "", "storage option list (comma separated)")
+ policy := flag.String("signature-policy", "", "signature policy file")
+ mtype := flag.String("expected-manifest-type", define.OCIv1ImageManifest, "expected manifest type")
+ showm := flag.Bool("show-manifest", false, "output the manifest JSON")
+ rebuildm := flag.Bool("rebuild-manifest", false, "rebuild the manifest JSON")
+ showc := flag.Bool("show-config", false, "output the configuration JSON")
+ rebuildc := flag.Bool("rebuild-config", false, "rebuild the configuration JSON")
+ flag.Parse()
+ logrus.SetLevel(logrus.ErrorLevel)
+ if debug != nil && *debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ switch *mtype {
+ case define.OCIv1ImageManifest:
+ expectedManifestType = *mtype
+ expectedConfigType = v1.MediaTypeImageConfig
+ case define.Dockerv2ImageManifest:
+ expectedManifestType = *mtype
+ expectedConfigType = manifest.DockerV2Schema2ConfigMediaType
+ case "*":
+ expectedManifestType = ""
+ expectedConfigType = ""
+ default:
+ logrus.Errorf("unknown -expected-manifest-type value, expected either %q or %q or %q",
+ define.OCIv1ImageManifest, define.Dockerv2ImageManifest, "*")
+ return
+ }
+ if root != nil {
+ storeOptions.GraphRoot = *root
+ }
+ if runroot != nil {
+ storeOptions.RunRoot = *runroot
+ }
+ if driver != nil {
+ storeOptions.GraphDriverName = *driver
+ storeOptions.GraphDriverOptions = nil
+ }
+ if opts != nil && *opts != "" {
+ storeOptions.GraphDriverOptions = strings.Split(*opts, ",")
+ }
+ systemContext := &types.SystemContext{
+ SignaturePolicyPath: *policy,
+ }
+ args := flag.Args()
+ if len(args) == 0 {
+ flag.Usage()
+ return
+ }
+ store, err := storage.GetStore(storeOptions)
+ if err != nil {
+ logrus.Errorf("error opening storage: %v", err)
+ os.Exit(1)
+ }
+ is.Transport.SetStore(store)
+
+ errors := false
+ defer func() {
+ store.Shutdown(false) // nolint:errcheck
+ if errors {
+ os.Exit(1)
+ }
+ }()
+ for _, image := range args {
+ var ref types.ImageReference
+ oImage := v1.Image{}
+ dImage := docker.V2Image{}
+ oManifest := v1.Manifest{}
+ dManifest := docker.V2S2Manifest{}
+ manifestType := ""
+ configType := ""
+
+ ref, _, err := util.FindImage(store, "", systemContext, image)
+ if err != nil {
+ ref2, err2 := alltransports.ParseImageName(image)
+ if err2 != nil {
+ logrus.Errorf("error parsing reference %q to an image: %v", image, err)
+ errors = true
+ continue
+ }
+ ref = ref2
+ }
+
+ ctx := context.Background()
+ img, err := ref.NewImage(ctx, systemContext)
+ if err != nil {
+ logrus.Errorf("error opening image %q: %v", image, err)
+ errors = true
+ continue
+ }
+ defer img.Close()
+
+ config, err := img.ConfigBlob(ctx)
+ if err != nil {
+ logrus.Errorf("error reading configuration from %q: %v", image, err)
+ errors = true
+ continue
+ }
+
+ manifest, manifestType, err := img.Manifest(ctx)
+ if err != nil {
+ logrus.Errorf("error reading manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+
+ if expectedManifestType != "" && manifestType != expectedManifestType {
+ logrus.Errorf("expected manifest type %q in %q, got %q", expectedManifestType, image, manifestType)
+ errors = true
+ continue
+ }
+
+ switch expectedManifestType {
+ case define.OCIv1ImageManifest:
+ err = json.Unmarshal(manifest, &oManifest)
+ if err != nil {
+ logrus.Errorf("error parsing manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ err = json.Unmarshal(config, &oImage)
+ if err != nil {
+ logrus.Errorf("error parsing config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ manifestType = oManifest.MediaType
+ configType = oManifest.Config.MediaType
+ case define.Dockerv2ImageManifest:
+ err = json.Unmarshal(manifest, &dManifest)
+ if err != nil {
+ logrus.Errorf("error parsing manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ err = json.Unmarshal(config, &dImage)
+ if err != nil {
+ logrus.Errorf("error parsing config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ manifestType = dManifest.MediaType
+ configType = dManifest.Config.MediaType
+ }
+
+ switch manifestType {
+ case define.OCIv1ImageManifest:
+ if rebuildm != nil && *rebuildm {
+ err = json.Unmarshal(manifest, &oManifest)
+ if err != nil {
+ logrus.Errorf("error parsing manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ manifest, err = json.Marshal(oManifest)
+ if err != nil {
+ logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ }
+ if rebuildc != nil && *rebuildc {
+ err = json.Unmarshal(config, &oImage)
+ if err != nil {
+ logrus.Errorf("error parsing config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ config, err = json.Marshal(oImage)
+ if err != nil {
+ logrus.Errorf("error rebuilding config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ }
+ case define.Dockerv2ImageManifest:
+ if rebuildm != nil && *rebuildm {
+ err = json.Unmarshal(manifest, &dManifest)
+ if err != nil {
+ logrus.Errorf("error parsing manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ manifest, err = json.Marshal(dManifest)
+ if err != nil {
+ logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ }
+ if rebuildc != nil && *rebuildc {
+ err = json.Unmarshal(config, &dImage)
+ if err != nil {
+ logrus.Errorf("error parsing config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ config, err = json.Marshal(dImage)
+ if err != nil {
+ logrus.Errorf("error rebuilding config from %q: %v", image, err)
+ errors = true
+ continue
+ }
+ }
+ }
+ if expectedConfigType != "" && configType != expectedConfigType {
+ logrus.Errorf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
+ errors = true
+ continue
+ }
+ if showm != nil && *showm {
+ fmt.Println(string(manifest))
+ }
+ if showc != nil && *showc {
+ fmt.Println(string(config))
+ }
+ }
+}
diff --git a/tests/info.bats b/tests/info.bats
new file mode 100644
index 0000000..9b4eee4
--- /dev/null
+++ b/tests/info.bats
@@ -0,0 +1,29 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "info" {
+ run_buildah info
+ expect_output --substring "host"
+
+ run_buildah info --format='{{.store}}'
+ # All of the following keys must be present in results. Order
+ # isn't guaranteed, nor is their value, but they must all exist.
+ for key in ContainerStore GraphDriverName GraphRoot RunRoot;do
+ expect_output --substring "map.*$key:"
+ done
+}
+
+@test "logging levels" {
+ # check that these logging levels are recognized
+ run_buildah --log-level=trace info
+ run_buildah --log-level=debug info
+ run_buildah --log-level=warn info
+ run_buildah --log-level=info info
+ run_buildah --log-level=error info
+ run_buildah --log-level=fatal info
+ run_buildah --log-level=panic info
+ # check that we reject bogus logging levels
+ run_buildah 125 --log-level=telepathic info
+ expect_output --substring "unable to parse log level: not a valid logrus Level"
+}
diff --git a/tests/inspect.bats b/tests/inspect.bats
new file mode 100644
index 0000000..d0977ba
--- /dev/null
+++ b/tests/inspect.bats
@@ -0,0 +1,138 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "inspect-flags-order-verification" {
+ run_buildah 125 inspect img1 -f "{{.ContainerID}}" -t="container"
+ check_options_flag_err "-f"
+
+ run_buildah 125 inspect img1 --format="{{.ContainerID}}"
+ check_options_flag_err "--format={{.ContainerID}}"
+
+ run_buildah 125 inspect img1 -t="image"
+ check_options_flag_err "-t=image"
+}
+
+@test "inspect" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON "$cid" alpine-image
+
+ # e.g. { map[] [PATH=/....] [] [/bin/sh] map[] map[] }
+ run_buildah inspect --format '{{.OCIv1.Config}}' alpine
+ expect_output --substring "map.*PATH=.*/bin/sh.*map"
+ inspect_basic=$output
+
+ # Now inspect the committed image. Output should be _mostly_ the same...
+ run_buildah inspect --type image --format '{{.OCIv1.Config}}' alpine-image
+ inspect_after_commit=$output
+
+ # ...except that at some point in November 2019 buildah-inspect started
+ # including version. Strip it out,
+ run_buildah --version
+ local -a output_fields=($output)
+ buildah_version=${output_fields[2]}
+ inspect_cleaned=$(echo "$inspect_after_commit" | sed "s/io.buildah.version:${buildah_version}//g")
+ expect_output --from="$inspect_cleaned" "$inspect_basic"
+
+ run_buildah images -q alpine-image
+ imageid=$output
+ run_buildah containers -q
+ containerid=$output
+
+ # This one should not include buildah version
+ run_buildah inspect --format '{{.OCIv1.Config}}' $containerid
+ expect_output "$inspect_basic"
+
+ # This one should.
+ run_buildah inspect --type image --format '{{.OCIv1.Config}}' $imageid
+ expect_output "$inspect_after_commit"
+}
+
+@test "inspect-config-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect alpine
+ expect_output --substring 'Config.*\{'
+}
+
+@test "inspect-manifest-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect alpine
+ expect_output --substring 'Manifest.*\{'
+}
+
+@test "inspect-ociv1-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect alpine
+ expect_output --substring 'OCIv1.*\{'
+}
+
+@test "inspect-docker-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect alpine
+ expect_output --substring 'Docker.*\{'
+}
+
+@test "inspect-format-config-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format "{{.Config}}" alpine
+ expect_output --substring '\{'
+}
+
+@test "inspect-format-manifest-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format "{{.Manifest}}" alpine
+ expect_output --substring '\{'
+}
+
+@test "inspect-format-ociv1-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format "{{.OCIv1}}" alpine
+ expect_output --substring '\{'
+}
+
+@test "inspect-format-docker-is-json" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format "{{.Docker}}" alpine
+ expect_output --substring '\{'
+}
+
+@test "inspect-format-docker-variant" {
+ # libimage.Normalize() converts Arch:"armhf" to Arch:"arm" and variant: "v7",
+ # so check that platform normalization happens at least for that one
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON --arch=armhf scratch
+ cid=$output
+ run_buildah inspect --format "{{.Docker.Architecture}}" $cid
+ [[ "$output" == "arm" ]]
+ run_buildah inspect --format "{{.Docker.Variant}}" $cid
+ [[ "$output" == "v7" ]]
+}
+
+@test "inspect manifest and verify OCI annotation" {
+ run_buildah manifest create foobar
+ run_buildah manifest add foobar busybox
+ # get digest of added instance
+ sha=$(echo $output | awk '{print $2}')
+ run_buildah manifest annotate --annotation hello=world foobar "$sha"
+ run_buildah manifest inspect foobar
+ # Must contain annotation key and value
+ expect_output --substring "hello"
+ expect_output --substring "world"
+}
diff --git a/tests/lists.bats b/tests/lists.bats
new file mode 100644
index 0000000..6c85d5b
--- /dev/null
+++ b/tests/lists.bats
@@ -0,0 +1,229 @@
+#!/usr/bin/env bats
+
+load helpers
+
+IMAGE_LIST=docker://registry.k8s.io/pause:3.1
+IMAGE_LIST_DIGEST=docker://registry.k8s.io/pause@sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea
+IMAGE_LIST_INSTANCE=docker://registry.k8s.io/pause@sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39
+IMAGE_LIST_AMD64_INSTANCE_DIGEST=sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
+IMAGE_LIST_ARM_INSTANCE_DIGEST=sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53
+IMAGE_LIST_ARM64_INSTANCE_DIGEST=sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39
+IMAGE_LIST_PPC64LE_INSTANCE_DIGEST=sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990
+IMAGE_LIST_S390X_INSTANCE_DIGEST=sha256:882a20ee0df7399a445285361d38b711c299ca093af978217112c73803546d5e
+
+@test "manifest-create" {
+ run_buildah manifest create foo
+ listid="$output"
+ run_buildah 125 manifest create foo
+ assert "$output" =~ "that name is already in use"
+ run_buildah manifest create --amend foo
+ assert "$output" == "$listid"
+ # since manifest exists in local storage this should exit with `0`
+ run_buildah manifest exists foo
+ # since manifest does not exist in local storage this should exit with `1`
+ run_buildah 1 manifest exists foo2
+}
+
+@test "manifest-inspect-id" {
+ run_buildah manifest create foo
+ cid=$output
+ run_buildah manifest inspect $cid
+}
+
+@test "manifest-add" {
+ run_buildah manifest create foo
+ run_buildah manifest add foo ${IMAGE_LIST}
+ # since manifest exists in local storage this should exit with `0`
+ run_buildah manifest exists foo
+ # since manifest does not exist in local storage this should exit with `1`
+ run_buildah 1 manifest exists foo2
+ run_buildah manifest rm foo
+}
+
+@test "manifest-add local image" {
+ target=scratch-image
+ run_buildah bud $WITH_POLICY_JSON -t ${target} $BUDFILES/from-scratch
+ run_buildah manifest create foo
+ run_buildah manifest add foo ${target}
+ run_buildah manifest rm foo
+}
+
+@test "manifest-add-one" {
+ run_buildah manifest create foo
+ run_buildah manifest add --arch=arm64 foo ${IMAGE_LIST_INSTANCE}
+ run_buildah manifest inspect foo
+ expect_output --substring ${IMAGE_LIST_ARM64_INSTANCE_DIGEST}
+ run_buildah 125 inspect --type image foo
+ expect_output --substring "no image found"
+ run_buildah inspect foo
+ expect_output --substring ${IMAGE_LIST_ARM64_INSTANCE_DIGEST}
+}
+
+@test "manifest-add-all" {
+ run_buildah manifest create foo
+ run_buildah manifest add --all foo ${IMAGE_LIST}
+ run_buildah manifest inspect foo
+ expect_output --substring ${IMAGE_LIST_AMD64_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_ARM_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_ARM64_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_PPC64LE_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_S390X_INSTANCE_DIGEST}
+}
+
+@test "manifest-remove" {
+ run_buildah manifest create foo
+ run_buildah manifest add --all foo ${IMAGE_LIST}
+ run_buildah manifest inspect foo
+ expect_output --substring ${IMAGE_LIST_ARM64_INSTANCE_DIGEST}
+ run_buildah manifest remove foo ${IMAGE_LIST_ARM64_INSTANCE_DIGEST}
+ run_buildah manifest inspect foo
+ expect_output --substring ${IMAGE_LIST_AMD64_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_ARM_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_PPC64LE_INSTANCE_DIGEST}
+ expect_output --substring ${IMAGE_LIST_S390X_INSTANCE_DIGEST}
+
+ # ARM64 should now be gone
+ arm64=$(grep ${IMAGE_LIST_ARM64_INSTANCE_DIGEST} <<< "$output" || true)
+ assert "$arm64" = "" "arm64 instance digest found in manifest list"
+}
+
+@test "manifest-remove-not-found" {
+ run_buildah manifest create foo
+ run_buildah manifest add foo ${IMAGE_LIST}
+ run_buildah 125 manifest remove foo sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
+}
+
+@test "manifest-rm failures" {
+ run_buildah 125 manifest rm foo1
+ expect_output --substring "foo1: image not known"
+}
+
+@test "manifest-push" {
+ run_buildah manifest create foo
+ run_buildah manifest add --all foo ${IMAGE_LIST}
+ run_buildah manifest push $WITH_POLICY_JSON foo dir:${TEST_SCRATCH_DIR}/pushed
+ case "$(go env GOARCH 2> /dev/null)" in
+ amd64) IMAGE_LIST_EXPECTED_INSTANCE_DIGEST=${IMAGE_LIST_AMD64_INSTANCE_DIGEST} ;;
+ arm64) IMAGE_LIST_EXPECTED_INSTANCE_DIGEST=${IMAGE_LIST_ARM64_INSTANCE_DIGEST} ;;
+ arm) IMAGE_LIST_EXPECTED_INSTANCE_DIGEST=${IMAGE_LIST_ARM_INSTANCE_DIGEST} ;;
+ ppc64le) IMAGE_LIST_EXPECTED_INSTANCE_DIGEST=${IMAGE_LIST_PPC64LE_INSTANCE_DIGEST} ;;
+ s390x) IMAGE_LIST_EXPECTED_INSTANCE_DIGEST=${IMAGE_LIST_S390X_INSTANCE_DIGEST} ;;
+ *) skip "current arch \"$(go env GOARCH 2> /dev/null)\" not present in manifest list" ;;
+ esac
+
+ run grep ${IMAGE_LIST_EXPECTED_INSTANCE_DIGEST##sha256} ${TEST_SCRATCH_DIR}/pushed/manifest.json
+ assert "$status" -eq 0 "status code of grep for expected instance digest"
+}
+
+@test "manifest-push-all" {
+ run_buildah manifest create foo
+ run_buildah manifest add --all foo ${IMAGE_LIST}
+ run_buildah manifest push $WITH_POLICY_JSON --all foo dir:${TEST_SCRATCH_DIR}/pushed
+ run sha256sum ${TEST_SCRATCH_DIR}/pushed/*
+ expect_output --substring ${IMAGE_LIST_AMD64_INSTANCE_DIGEST##sha256:}
+ expect_output --substring ${IMAGE_LIST_ARM_INSTANCE_DIGEST##sha256:}
+ expect_output --substring ${IMAGE_LIST_ARM64_INSTANCE_DIGEST##sha256:}
+ expect_output --substring ${IMAGE_LIST_PPC64LE_INSTANCE_DIGEST##sha256:}
+ expect_output --substring ${IMAGE_LIST_S390X_INSTANCE_DIGEST##sha256:}
+}
+
+@test "manifest-push-purge" {
+ run_buildah manifest create foo
+ run_buildah manifest add --arch=arm64 foo ${IMAGE_LIST}
+ run_buildah manifest inspect foo
+ run_buildah manifest push $WITH_POLICY_JSON --purge foo dir:${TEST_SCRATCH_DIR}/pushed
+ run_buildah 125 manifest inspect foo
+}
+
+@test "manifest-push-rm" {
+ run_buildah manifest create foo
+ run_buildah manifest add --arch=arm64 foo ${IMAGE_LIST}
+ run_buildah manifest inspect foo
+ run_buildah manifest push $WITH_POLICY_JSON --rm foo dir:${TEST_SCRATCH_DIR}/pushed
+ run_buildah 125 manifest inspect foo
+}
+
+@test "manifest-push should fail with nonexistent authfile" {
+ run_buildah manifest create foo
+ run_buildah manifest add --arch=arm64 foo ${IMAGE_LIST}
+ run_buildah manifest inspect foo
+ run_buildah 125 manifest push --authfile /tmp/nonexistent $WITH_POLICY_JSON --purge foo dir:${TEST_SCRATCH_DIR}/pushed
+
+}
+
+@test "manifest-from-tag" {
+ run_buildah from $WITH_POLICY_JSON --name test-container ${IMAGE_LIST}
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' ${IMAGE_LIST}
+ expect_output --substring $(go env GOARCH)
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' test-container
+ expect_output --substring $(go env GOARCH)
+}
+
+@test "manifest-from-digest" {
+ run_buildah from $WITH_POLICY_JSON --name test-container ${IMAGE_LIST_DIGEST}
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' ${IMAGE_LIST_DIGEST}
+ expect_output --substring $(go env GOARCH)
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' test-container
+ expect_output --substring $(go env GOARCH)
+}
+
+@test "manifest-from-instance" {
+ run_buildah from $WITH_POLICY_JSON --name test-container ${IMAGE_LIST_INSTANCE}
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' ${IMAGE_LIST_INSTANCE}
+ expect_output --substring arm64
+ run_buildah inspect --format ''{{.OCIv1.Architecture}}' test-container
+ expect_output --substring arm64
+}
+
+@test "manifest-no-matching-instance" {
+ # Check that local images which we can't load the config and history for
+ # don't just break multi-layer builds.
+ #
+ # Create a test list with some stuff in it.
+ run_buildah manifest create test-list
+ run_buildah manifest add --all test-list ${IMAGE_LIST}
+ # Remove the entry for the current arch from the list.
+ arch=$(go env GOARCH)
+ run_buildah manifest inspect test-list
+ archinstance=$(jq -r '.manifests|map(select(.platform.architecture=="'$arch'"))[].digest' <<< "$output")
+ run_buildah manifest remove test-list $archinstance
+ # Try to build using the build cache.
+ mkdir ${TEST_SCRATCH_DIR}/build
+ echo 'much content, wow.' > ${TEST_SCRATCH_DIR}/build/content.txt
+ echo 'FROM scratch' > ${TEST_SCRATCH_DIR}/build/Dockerfile
+ echo 'ADD content.txt /' >> ${TEST_SCRATCH_DIR}/build/Dockerfile
+ run_buildah bud --layers --iidfile ${TEST_SCRATCH_DIR}/image-id.txt ${TEST_SCRATCH_DIR}/build
+ # Make sure we can add the new image to the list.
+ run_buildah manifest add test-list $(< ${TEST_SCRATCH_DIR}/image-id.txt)
+}
+
+@test "manifest-add-to-list-from-storage" {
+ run_buildah pull --arch=amd64 busybox
+ run_buildah tag busybox test:amd64
+ run_buildah pull --arch=arm64 busybox
+ run_buildah tag busybox test:arm64
+ run_buildah manifest create test
+ run_buildah manifest add test test:amd64
+ run_buildah manifest add --variant=variant-something test test:arm64
+ run_buildah manifest inspect test
+ # must contain amd64
+ expect_output --substring "amd64"
+ # must contain arm64
+ expect_output --substring "arm64"
+ # must contain variant v8
+ expect_output --substring "variant-something"
+}
+
+@test "manifest-create-list-from-storage" {
+ run_buildah from --quiet --arch amd64 busybox
+ cid=$output
+ run_buildah commit $cid "$cid-committed:latest"
+ run_buildah manifest create test:latest "$cid-committed:latest"
+ run_buildah manifest inspect test
+ # must contain amd64
+ expect_output --substring "amd64"
+ # since manifest exists in local storage this should exit with `0`
+ run_buildah manifest exists test:latest
+ # since manifest does not exist in local storage this should exit with `1`
+ run_buildah 1 manifest exists test2
+}
diff --git a/tests/loglevel.bats b/tests/loglevel.bats
new file mode 100644
index 0000000..15da326
--- /dev/null
+++ b/tests/loglevel.bats
@@ -0,0 +1,28 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "log-level set to debug" {
+ run_buildah --log-level=debug images -q
+ expect_output --substring "level=debug "
+}
+
+@test "log-level set to info" {
+ run_buildah --log-level=info images -q
+ expect_output ""
+}
+
+@test "log-level set to warn" {
+ run_buildah --log-level=warn images -q
+ expect_output ""
+}
+
+@test "log-level set to error" {
+ run_buildah --log-level=error images -q
+ expect_output ""
+}
+
+@test "log-level set to invalid" {
+ run_buildah 125 --log-level=invalid images -q
+ expect_output --substring "unable to parse log level"
+}
diff --git a/tests/mkcw.bats b/tests/mkcw.bats
new file mode 100644
index 0000000..1aa32c1
--- /dev/null
+++ b/tests/mkcw.bats
@@ -0,0 +1,97 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function mkcw_check_image() {
+ local imageID="$1"
+ local expectedEnv="$2"
+ # Mount the container and take a look at what it got from the image.
+ run_buildah from "$imageID"
+ local ctrID="$output"
+ run_buildah mount "$ctrID"
+ local mountpoint="$output"
+ # Should have a /disk.img file.
+ test -s "$mountpoint"/disk.img
+ # Should have a krun-sev.json file.
+ test -s "$mountpoint"/krun-sev.json
+ # Should have an executable entrypoint binary.
+ test -s "$mountpoint"/entrypoint
+ test -x "$mountpoint"/entrypoint
+ # Should have a sticky /tmp directory.
+ test -d "$mountpoint"/tmp
+ test -k "$mountpoint"/tmp
+
+ # Decrypt, mount, and take a look around.
+ uuid=$(cryptsetup luksUUID "$mountpoint"/disk.img)
+ cryptsetup luksOpen --key-file "$TEST_SCRATCH_DIR"/key "$mountpoint"/disk.img "$uuid"
+ mkdir -p "$TEST_SCRATCH_DIR"/mount
+ mount /dev/mapper/"$uuid" "$TEST_SCRATCH_DIR"/mount
+ # Should have a not-empty config file with parts of an image's config.
+ test -s "$TEST_SCRATCH_DIR"/mount/.krun_config.json
+ if test -n "$expectedEnv" ; then
+ grep -q "expectedEnv" "$TEST_SCRATCH_DIR"/mount/.krun_config.json
+ fi
+ # Should have a /tmp directory, at least.
+ test -d "$TEST_SCRATCH_DIR"/mount/tmp
+ # Should have a /bin/sh file from the base image, at least.
+ test -s "$TEST_SCRATCH_DIR"/mount/bin/sh || test -L "$TEST_SCRATCH_DIR"/mount/bin/sh
+
+ # Clean up.
+ umount "$TEST_SCRATCH_DIR"/mount
+ cryptsetup luksClose "$uuid"
+ buildah umount "$ctrID"
+}
+
+@test "mkcw-convert" {
+ skip_if_in_container
+ skip_if_rootless_environment
+ if ! which cryptsetup > /dev/null 2> /dev/null ; then
+ skip "cryptsetup not found"
+ fi
+ _prefetch busybox
+
+ echo -n mkcw-convert > "$TEST_SCRATCH_DIR"/key
+ run_buildah mkcw --ignore-attestation-errors --type snp --passphrase=mkcw-convert busybox busybox-cw
+ mkcw_check_image busybox-cw
+ run_buildah mkcw --ignore-attestation-errors --type SNP --passphrase=mkcw-convert busybox busybox-cw
+ mkcw_check_image busybox-cw
+}
+
+@test "mkcw-commit" {
+ skip_if_in_container
+ skip_if_rootless_environment
+ if ! which cryptsetup > /dev/null 2> /dev/null ; then
+ skip "cryptsetup not found"
+ fi
+ _prefetch busybox
+
+ echo -n "mkcw commit" > "$TEST_SCRATCH_DIR"/key
+ run_buildah from busybox
+ ctrID="$output"
+ run_buildah commit --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=SEV,ignore_attestation_errors,passphrase="mkcw commit" "$ctrID"
+ mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid)
+ run_buildah commit --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=sev,ignore_attestation_errors,passphrase="mkcw commit" "$ctrID"
+ mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid)
+}
+
+@test "mkcw build" {
+ skip_if_in_container
+ skip_if_rootless_environment
+ if ! which cryptsetup > /dev/null 2> /dev/null ; then
+ skip "cryptsetup not found"
+ fi
+ _prefetch alpine
+
+ echo -n "mkcw build" > "$TEST_SCRATCH_DIR"/key
+ run_buildah build --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=SEV,ignore_attestation_errors,passphrase="mkcw build" -f bud/env/Dockerfile.check-env bud/env
+ mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid)
+
+ run_buildah build --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=sev,ignore_attestation_errors,passphrase="mkcw build" -f bud/env/Dockerfile.check-env bud/env
+ mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid)
+
+ # the key thing about this next bit is mixing --layers with a final
+ # instruction in the Dockerfile that normally wouldn't produce a layer
+ echo -n "mkcw build --layers" > "$TEST_SCRATCH_DIR"/key
+ run_buildah build --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=SEV,ignore_attestation_errors,passphrase="mkcw build --layers" --layers -f bud/env/Dockerfile.check-env bud/env
+ mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid)
+}
diff --git a/tests/mount.bats b/tests/mount.bats
new file mode 100644
index 0000000..d267ce9
--- /dev/null
+++ b/tests/mount.bats
@@ -0,0 +1,65 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "mount-flags-order-verification" {
+ run_buildah 125 mount cnt1 --notruncate path1
+ check_options_flag_err "--notruncate"
+
+ run_buildah 125 mount cnt1 --notruncate
+ check_options_flag_err "--notruncate"
+
+ run_buildah 125 mount cnt1 path1 --notruncate
+ check_options_flag_err "--notruncate"
+}
+
+@test "mount one container" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount "$cid"
+}
+
+@test "mount bad container" {
+ run_buildah 125 mount badcontainer
+}
+
+@test "mount multi images" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah mount "$cid1" "$cid2" "$cid3"
+}
+
+@test "mount multi images one bad" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah 125 mount "$cid1" badcontainer "$cid2" "$cid3"
+}
+
+@test "list currently mounted containers" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah mount "$cid1"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah mount "$cid2"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah mount "$cid3"
+ run_buildah mount
+ expect_line_count 3
+ expect_output --from="${lines[0]}" --substring "/tmp" "mount line 1 of 3"
+ expect_output --from="${lines[1]}" --substring "/tmp" "mount line 2 of 3"
+ expect_output --from="${lines[2]}" --substring "/tmp" "mount line 3 of 3"
+}
diff --git a/tests/namespaces.bats b/tests/namespaces.bats
new file mode 100644
index 0000000..7e8384f
--- /dev/null
+++ b/tests/namespaces.bats
@@ -0,0 +1,522 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "already-in-userns" {
+ if test "$BUILDAH_ISOLATION" != "rootless" -o $UID == 0 ; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION"
+ fi
+
+ _prefetch alpine
+ run_buildah from $WITH_POLICY_JSON --quiet alpine
+ expect_output "alpine-working-container"
+ ctr="$output"
+
+ run_buildah unshare buildah run --isolation=oci "$ctr" echo hello
+ expect_output "hello"
+}
+
+@test "user-and-network-namespace" {
+ skip_if_rootless_environment
+ skip_if_chroot
+ skip_if_rootless
+
+ mkdir -p $TEST_SCRATCH_DIR/no-cni-configs
+ RUNOPTS="--cni-config-dir=${TEST_SCRATCH_DIR}/no-cni-configs ${RUNC_BINARY:+--runtime $RUNC_BINARY}"
+ # Check if we're running in an environment that can even test this.
+ run readlink /proc/self/ns/user
+ echo "readlink /proc/self/ns/user -> $output"
+ [ $status -eq 0 ] || skip "user namespaces not supported"
+ run readlink /proc/self/ns/net
+ echo "readlink /proc/self/ns/net -> $output"
+ [ $status -eq 0 ] || skip "network namespaces not supported"
+ mynetns="$output"
+
+ # Generate the mappings to use for using-a-user-namespace cases.
+ uidbase=$((${RANDOM}+1024))
+ gidbase=$((${RANDOM}+1024))
+ uidsize=$((${RANDOM}+1024))
+ gidsize=$((${RANDOM}+1024))
+
+ # Create a container that uses that mapping.
+ _prefetch alpine
+ run_buildah from $WITH_POLICY_JSON --quiet --userns-uid-map 0:$uidbase:$uidsize --userns-gid-map 0:$gidbase:$gidsize alpine
+ ctr="$output"
+
+ # Check that with settings that require a user namespace, we also get a new network namespace by default.
+ run_buildah run $RUNOPTS "$ctr" readlink /proc/self/ns/net
+ assert "$output" != "$mynetns" "we should get a new network namespace"
+
+ # Check that with settings that require a user namespace, we can still try to use the host's network namespace.
+ run_buildah run $RUNOPTS --net=host "$ctr" readlink /proc/self/ns/net
+ expect_output "$mynetns"
+
+ # Check that we are not bind mounting /sys from the host with --net=container
+ host_sys=$(grep "/sys " /proc/self/mountinfo | cut -d ' ' -f 3)
+ run_buildah run $RUNOPTS --net=container "$ctr" sh -c 'grep "/sys " /proc/self/mountinfo | cut -d " " -f 3'
+ assert "$output" != "$host_sys"
+
+ # Create a container that doesn't use that mapping.
+ run_buildah from $WITH_POLICY_JSON --quiet alpine
+ ctr="$output"
+
+ run_buildah run $RUNOPTS --net=host "$ctr" readlink /proc/self/ns/net
+ expect_output "$mynetns"
+
+ # Check that with settings that don't require a user namespace, we can request to use a per-container network namespace.
+ run_buildah run $RUNOPTS --net=container "$ctr" readlink /proc/self/ns/net
+ assert "$output" != "$mynetns" \
+ "[/proc/self/ns/net (--net=container) should not be '$mynetns']"
+
+ run_buildah run $RUNOPTS --net=private "$ctr" readlink /proc/self/ns/net
+ assert "$output" != "$mynetns" \
+ "[/proc/self/ns/net (--net=private) should not be '$mynetns']"
+
+ run_buildah run $RUNOPTS "$ctr" readlink /proc/self/ns/net
+ assert "$output" != "$mynetns" \
+ "[/proc/self/ns/net (--net="") should not be '$mynetns']"
+}
+
+# Helper for idmapping test: check UID or GID mapping
+# NOTE SIDE EFFECT: sets $rootxid for possible use by caller
+idmapping_check_map() {
+ local _output_idmap=$1
+ local _expect_idmap=$2
+ local _testname=$3
+
+ assert "$_output_idmap" != "" "Internal error: output_idmap is empty"
+ local _idmap=$(sed -E -e 's, +, ,g' -e 's,^ +,,g' <<< "${_output_idmap}")
+ expect_output --from="$_idmap" "${_expect_idmap}" "$_testname"
+
+ # SIDE EFFECT: Global: our caller may want this
+ rootxid=$(sed -E -e 's,^([^ ]*) (.*) ([^ ]*),\2,' <<< "$_idmap")
+}
+
+# Helper for idmapping test: check file permissions
+idmapping_check_permission() {
+ local _output_file_stat=$1
+ local _output_dir_stat=$2
+
+ expect_output --from="${_output_file_stat}" "1:1" "Check if a copied file gets the right permissions"
+ expect_output --from="${_output_dir_stat}" "0:0" "Check if a copied directory gets the right permissions"
+}
+
+@test "idmapping" {
+ skip_if_rootless_environment
+ mkdir -p $TEST_SCRATCH_DIR/no-cni-configs
+ RUNOPTS="--cni-config-dir=${TEST_SCRATCH_DIR}/no-cni-configs ${RUNC_BINARY:+--runtime $RUNC_BINARY}"
+
+ # Check if we're running in an environment that can even test this.
+ run readlink /proc/self/ns/user
+ echo "readlink /proc/self/ns/user -> $output"
+ [ $status -eq 0 ] || skip "user namespaces not supported"
+ mynamespace="$output"
+
+ # Generate the mappings to use.
+ uidbase=$((${RANDOM}+1024))
+ gidbase=$((${RANDOM}+1024))
+ uidsize=$((${RANDOM}+1024))
+ gidsize=$((${RANDOM}+1024))
+ # Test with no mappings.
+ uidmapargs[0]=
+ gidmapargs[0]=
+ uidmaps[0]="0 0 4294967295"
+ gidmaps[0]="0 0 4294967295"
+ # Test with both UID and GID maps specified.
+ uidmapargs[1]="--userns-uid-map=0:$uidbase:$uidsize"
+ gidmapargs[1]="--userns-gid-map=0:$gidbase:$gidsize"
+ uidmaps[1]="0 $uidbase $uidsize"
+ gidmaps[1]="0 $gidbase $gidsize"
+ # Conditionalize some tests on the subuid and subgid files being present.
+ if test -s /etc/subuid ; then
+ if test -s /etc/subgid ; then
+ # Look for a name that's in both the subuid and subgid files.
+ for candidate in $(sed -e 's,:.*,,g' /etc/subuid); do
+ if test $(sed -e 's,:.*,,g' -e "/$candidate/!d" /etc/subgid) == "$candidate"; then
+ # Read the start of the subuid/subgid ranges. Assume length=65536.
+ userbase=$(sed -e "/^${candidate}:/!d" -e 's,^[^:]*:,,g' -e 's,:[^:]*,,g' /etc/subuid)
+ groupbase=$(sed -e "/^${candidate}:/!d" -e 's,^[^:]*:,,g' -e 's,:[^:]*,,g' /etc/subgid)
+ # Test specifying both the user and group names.
+ uidmapargs[${#uidmaps[*]}]=--userns-uid-map-user=$candidate
+ gidmapargs[${#gidmaps[*]}]=--userns-gid-map-group=$candidate
+ uidmaps[${#uidmaps[*]}]="0 $userbase 65536"
+ gidmaps[${#gidmaps[*]}]="0 $groupbase 65536"
+ # Test specifying just the user name.
+ uidmapargs[${#uidmaps[*]}]=--userns-uid-map-user=$candidate
+ uidmaps[${#uidmaps[*]}]="0 $userbase 65536"
+ gidmaps[${#gidmaps[*]}]="0 $groupbase 65536"
+ # Test specifying just the group name.
+ gidmapargs[${#gidmaps[*]}]=--userns-gid-map-group=$candidate
+ uidmaps[${#uidmaps[*]}]="0 $userbase 65536"
+ gidmaps[${#gidmaps[*]}]="0 $groupbase 65536"
+ break
+ fi
+ done
+ # Choose different names from the files.
+ for candidateuser in $(sed -e 's,:.*,,g' /etc/subuid); do
+ for candidategroup in $(sed -e 's,:.*,,g' /etc/subgid); do
+ if test "$candidateuser" == "$candidate" ; then
+ continue
+ fi
+ if test "$candidategroup" == "$candidate" ; then
+ continue
+ fi
+ if test "$candidateuser" == "$candidategroup" ; then
+ continue
+ fi
+ # Read the start of the ranges. Assume length=65536.
+ userbase=$(sed -e "/^${candidateuser}:/!d" -e 's,^[^:]*:,,g' -e 's,:[^:]*,,g' /etc/subuid)
+ groupbase=$(sed -e "/^${candidategroup}:/!d" -e 's,^[^:]*:,,g' -e 's,:[^:]*,,g' /etc/subgid)
+ # Test specifying both the user and group names.
+ uidmapargs[${#uidmaps[*]}]=--userns-uid-map-user=$candidateuser
+ gidmapargs[${#gidmaps[*]}]=--userns-gid-map-group=$candidategroup
+ uidmaps[${#uidmaps[*]}]="0 $userbase 65536"
+ gidmaps[${#gidmaps[*]}]="0 $groupbase 65536"
+ break
+ done
+ done
+ fi
+ fi
+
+ touch ${TEST_SCRATCH_DIR}/somefile
+ mkdir ${TEST_SCRATCH_DIR}/somedir
+ touch ${TEST_SCRATCH_DIR}/somedir/someotherfile
+ chmod 700 ${TEST_SCRATCH_DIR}/somedir/someotherfile
+ chmod u+s ${TEST_SCRATCH_DIR}/somedir/someotherfile
+
+ for i in $(seq 0 "$((${#uidmaps[*]}-1))") ; do
+ # local helper function for checking /proc/self/ns/user
+ function idmapping_check_namespace() {
+ local _output=$1
+ local _testname=$2
+
+ assert "$_output" != "" "Internal error: _output is empty"
+ if [ -z "${uidmapargs[$i]}${gidmapargs[$i]}" ]; then
+ if test "$BUILDAH_ISOLATION" != "chroot" -a "$BUILDAH_ISOLATION" != "rootless" ; then
+ expect_output --from="$_output" "$mynamespace" "/proc/self/ns/user ($_testname)"
+ fi
+ else
+ assert "$_output" != "$mynamespace" "_output vs mynamespace"
+ fi
+ }
+
+ # Create a container using these mappings.
+ echo "Building container with $WITH_POLICY_JSON --quiet ${uidmapargs[$i]} ${gidmapargs[$i]} alpine"
+ _prefetch alpine
+ run_buildah from $WITH_POLICY_JSON --quiet ${uidmapargs[$i]} ${gidmapargs[$i]} alpine
+ ctr="$output"
+
+ # If we specified mappings, expect to be in a different namespace by default.
+ run_buildah run $RUNOPTS "$ctr" readlink /proc/self/ns/user
+ idmapping_check_namespace "$output" "container"
+ # Check that we got the UID and GID mappings that we expected.
+ # rootuid/rootgid are obtained (side effect) from helper function
+ run_buildah run $RUNOPTS "$ctr" cat /proc/self/uid_map
+ idmapping_check_map "$output" "${uidmaps[$i]}" "uid_map"
+ rootuid=$rootxid
+
+ run_buildah run $RUNOPTS "$ctr" cat /proc/self/gid_map
+ idmapping_check_map "$output" "${gidmaps[$i]}" "gid_map"
+ rootgid=$rootxid
+
+ # Check that if we copy a file into the container, it gets the right permissions.
+ run_buildah copy --chown 1:1 "$ctr" ${TEST_SCRATCH_DIR}/somefile /
+ run_buildah run $RUNOPTS "$ctr" stat -c '%u:%g' /somefile
+ output_file_stat="$output"
+ # Check that if we copy a directory into the container, its contents get the right permissions.
+ run_buildah copy "$ctr" ${TEST_SCRATCH_DIR}/somedir /somedir
+ run_buildah run $RUNOPTS "$ctr" stat -c '%u:%g' /somedir
+ output_dir_stat="$output"
+ idmapping_check_permission "$output_file_stat" "$output_dir_stat"
+
+ run_buildah run $RUNOPTS "$ctr" stat -c '%u:%g %a' /somedir/someotherfile
+ expect_output "0:0 4700" "stat(someotherfile), in container test"
+
+ # Check that the copied file has the right permissions on host.
+ run_buildah mount "$ctr"
+ mnt="$output"
+ run stat -c '%u:%g %a' "$mnt"/somedir/someotherfile
+ assert "$status" -eq 0 "status of stat $mnt/somedir/someotherfile"
+ expect_output "$rootuid:$rootgid 4700"
+
+ # Check that a container with mapped-layer can be committed.
+ run_buildah commit "$ctr" localhost/alpine-working:$i
+
+
+ # Also test bud command
+ # Build an image using these mappings.
+ echo "Building image with ${uidmapargs[$i]} ${gidmapargs[$i]}"
+ run_buildah bud ${uidmapargs[$i]} ${gidmapargs[$i]} $RUNOPTS $WITH_POLICY_JSON \
+ -t localhost/alpine-bud:$i -f $BUDFILES/namespaces/Containerfile $TEST_SCRATCH_DIR
+ # If we specified mappings, expect to be in a different namespace by default.
+ output_namespace="$(grep -A1 'ReadlinkResult' <<< "$output" | tail -n1)"
+ idmapping_check_namespace "${output_namespace}" "bud"
+ # Check that we got the mappings that we expected.
+ output_uidmap="$(grep -A1 'UidMapResult' <<< "$output" | tail -n1)"
+ output_gidmap="$(grep -A1 'GidMapResult' <<< "$output" | tail -n1)"
+ idmapping_check_map "$output_uidmap" "${uidmaps[$i]}" "UidMapResult"
+ idmapping_check_map "$output_gidmap" "${gidmaps[$i]}" "GidMapResult"
+
+ # Check that if we copy a file into the container, it gets the right permissions.
+ output_file_stat="$(grep -A1 'StatSomefileResult' <<< "$output" | tail -n1)"
+ # Check that if we copy a directory into the container, its contents get the right permissions.
+ output_dir_stat="$(grep -A1 'StatSomedirResult' <<< "$output" | tail -n1)"
+ output_otherfile_stat="$(grep -A1 'StatSomeotherfileResult' <<< "$output" | tail -n1)"
+ output_workdir_stat="$(grep -A1 'StatNewWorkdir' <<< "$output" | tail -n1)"
+ # bud strips suid.
+ idmapping_check_permission "$output_file_stat" "$output_dir_stat"
+ expect_output --from="${output_otherfile_stat}" "0:0 700" "stat(someotherfile), in bud test"
+ expect_output --from="${output_workdir_stat}" "guest:users" "stat(new-workdir), in bud test"
+ done
+}
+
+general_namespace() {
+ mkdir -p $TEST_SCRATCH_DIR/no-cni-configs
+ RUNOPTS="--cni-config-dir=${TEST_SCRATCH_DIR}/no-cni-configs ${RUNC_BINARY:+--runtime $RUNC_BINARY}"
+ mytmpdir=$TEST_SCRATCH_DIR/my-dir
+ mkdir -p ${mytmpdir}
+
+ # The name of the /proc/self/ns/$link.
+ nstype="$1"
+ # The flag to use, if it's not the same as the namespace name.
+ nsflag="${2:-$1}"
+
+ # Check if we're running in an environment that can even test this.
+ run readlink /proc/self/ns/"$nstype"
+ echo "readlink /proc/self/ns/$nstype -> $output"
+ [ $status -eq 0 ] || skip "$nstype namespaces not supported"
+ mynamespace="$output"
+
+ # Settings to test.
+ types[0]=
+ types[1]=container
+ types[2]=host
+ types[3]=/proc/$$/ns/$nstype
+ types[4]=private
+ types[5]=ns:/proc/$$/ns/$nstype
+
+ _prefetch alpine
+ for namespace in "${types[@]}" ; do
+ # Specify the setting for this namespace for this container.
+ run_buildah from $WITH_POLICY_JSON --quiet --"$nsflag"=$namespace alpine
+ assert "$output" != "" "Internal error: buildah-from produced no output"
+ ctr="$output"
+
+ # Check that, unless we override it, we get that setting in "run".
+ run_buildah run $RUNOPTS "$ctr" readlink /proc/self/ns/"$nstype"
+ assert "$output" != "" "readlink /proc/self/ns/$nstype must not be empty"
+ case "$namespace" in
+ ""|container|private)
+ assert "$output" != "$mynamespace" \
+ "readlink /proc/self/ns/$nstype, with namespace=$namespace"
+ ;;
+ host)
+ expect_output "$mynamespace"
+ ;;
+ /*)
+ expect_output "$(readlink $namespace)"
+ ;;
+ esac
+
+ # "run" doesn't have --userns option.
+ if [ "$nsflag" != "userns" ]; then
+ for different in ${types[@]} ; do
+ # Check that, if we override it, we get what we specify for "run".
+ run_buildah run $RUNOPTS --"$nsflag"=$different "$ctr" readlink /proc/self/ns/"$nstype"
+ assert "$output" != "" "readlink /proc/self/ns/$nstype must not be empty"
+ case "$different" in
+ ""|container|private)
+ assert "$output" != "$mynamespace" \
+ "readlink /proc/self/ns/$nstype, with different=$different"
+ ;;
+ host)
+ expect_output "$mynamespace"
+ ;;
+ /*)
+ expect_output "$(readlink $different)"
+ ;;
+ esac
+ done
+ fi
+
+ # Also check "from" command
+ cat > $mytmpdir/Containerfile << _EOF
+FROM alpine
+RUN echo "TargetOutput" && readlink /proc/self/ns/$nstype
+_EOF
+ run_buildah bud --"$nsflag"=$namespace $RUNOPTS $WITH_POLICY_JSON --file ${mytmpdir}/Containerfile .
+ result=$(grep -A1 "TargetOutput" <<< "$output" | tail -n1)
+ case "$namespace" in
+ ""|container|private)
+ assert "$result" != "$mynamespace" "readlink /proc/self/ns/$nstype"
+ ;;
+ host)
+ expect_output --from="$result" "$mynamespace"
+ ;;
+ /*)
+ expect_output --from="$result" "$(readlink $namespace)"
+ ;;
+ esac
+
+ done
+}
+
+@test "ipc-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace ipc
+}
+
+@test "net-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace net
+}
+
+@test "network-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace net network
+}
+
+@test "pid-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace pid
+}
+
+@test "user-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace user userns
+}
+
+@test "uts-namespace" {
+ skip_if_chroot
+ skip_if_rootless
+ skip_if_rootless_environment
+
+ general_namespace uts
+}
+
+@test "combination-namespaces" {
+ skip_if_chroot
+ skip_if_rootless
+
+ _prefetch alpine
+ # mnt is always per-container, cgroup isn't a thing OCI runtime lets us configure
+ for ipc in host private; do
+ for net in host private; do
+ for pid in host private; do
+ for userns in host private; do
+ for uts in host private; do
+ for cgroupns in host private; do
+
+ echo "buildah from $WITH_POLICY_JSON --ipc=$ipc --net=$net --pid=$pid --userns=$userns --uts=$uts --cgroupns=$cgroupns alpine"
+ run_buildah from $WITH_POLICY_JSON --quiet --ipc=$ipc --net=$net --pid=$pid --userns=$userns --uts=$uts --cgroupns=$cgroupns alpine
+ assert "$output" != "" "output from buildah-from"
+ ctr="$output"
+ run_buildah run $ctr pwd
+ assert "$output" != "" "output from pwd"
+ run_buildah run --tty=true $ctr pwd
+ assert "$output" != "" "output from pwd, with --tty=true"
+ run_buildah run --terminal=false $ctr pwd
+ assert "$output" != "" "output from pwd, with --terminal=false"
+ done
+ done
+ done
+ done
+ done
+ done
+}
+
+@test "idmapping-and-squash" {
+ skip_if_rootless_environment
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --userns-uid-map 0:32:16 --userns-gid-map 0:48:16 scratch
+ cid=$output
+ run_buildah copy "$cid" ${TEST_SCRATCH_DIR}/randomfile /
+ run_buildah copy --chown 1:1 "$cid" ${TEST_SCRATCH_DIR}/randomfile /randomfile2
+ run_buildah commit --squash $WITH_POLICY_JSON --rm "$cid" squashed
+ run_buildah from --quiet squashed
+ cid=$output
+ run_buildah mount $cid
+ mountpoint=$output
+ run stat -c %u:%g $mountpoint/randomfile
+ [ "$status" -eq 0 ]
+ expect_output "0:0"
+
+ run stat -c %u:%g $mountpoint/randomfile2
+ [ "$status" -eq 0 ]
+ expect_output "1:1"
+}
+
+@test "invalid userns-uid-map userns-gid-map" {
+ run_buildah 125 from --userns-uid-map 16 --userns-gid-map 0:48:16 scratch
+ expect_output 'Error: initializing ID mappings: userns-uid-map setting is malformed expected ["uint32:uint32:uint32"]: ["16"]'
+
+ run_buildah 125 from --userns-uid-map 0:32:16 --userns-gid-map 16 scratch
+ expect_output 'Error: initializing ID mappings: userns-gid-map setting is malformed expected ["uint32:uint32:uint32"]: ["16"]'
+
+ run_buildah 125 bud --userns-uid-map a --userns-gid-map bogus bud/from-scratch
+ expect_output 'Error: initializing ID mappings: userns-uid-map setting is malformed expected ["uint32:uint32:uint32"]: ["a"]'
+
+ run_buildah 125 bud --userns-uid-map 0:32:16 --userns-gid-map bogus bud/from-scratch
+ expect_output 'Error: initializing ID mappings: userns-gid-map setting is malformed expected ["uint32:uint32:uint32"]: ["bogus"]'
+
+ run_buildah from --userns-uid-map 0:32:16 scratch
+}
+
+@test "idmapping-syntax" {
+ run_buildah from $WITH_POLICY_JSON --quiet --userns-uid-map=0:10000:65536 alpine
+
+ run_buildah 125 from $WITH_POLICY_JSON --quiet --userns-gid-map=0:10000:65536 alpine
+ expect_output --substring "userns-gid-map can not be used without --userns-uid-map"
+}
+
+@test "use containers.conf namespace settings" {
+ skip_if_chroot
+
+ _prefetch alpine
+ containers_conf_file="$TEST_SCRATCH_DIR/containers-namespaces.conf"
+
+ for mode in host private; do
+ cat > "$containers_conf_file" << EOF
+[containers]
+
+cgroupns = "$mode"
+netns = "$mode"
+pidns = "$mode"
+ipcns = "$mode"
+utsns = "$mode"
+EOF
+
+ CONTAINERS_CONF="$containers_conf_file" run_buildah from $WITH_POLICY_JSON --quiet alpine
+ assert "$output" != "" "output from buildah-from"
+ ctr="$output"
+
+ local op="=="
+ if [[ "$mode" == "private" ]]; then
+ op="!="
+ fi
+
+ for nstype in cgroup ipc net pid uts; do
+ run readlink /proc/self/ns/"$nstype"
+ ns="$output"
+ run_buildah run $ctr readlink /proc/self/ns/"$nstype"
+ assert "$output" $op "$ns" "namespace matches expected ($mode)"
+ done
+ done
+
+ rm "$containers_conf_file"
+}
diff --git a/tests/overlay.bats b/tests/overlay.bats
new file mode 100644
index 0000000..359a797
--- /dev/null
+++ b/tests/overlay.bats
@@ -0,0 +1,96 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "overlay specific level" {
+ if test \! -e /usr/bin/fuse-overlayfs -a "$BUILDAH_ISOLATION" = "rootless"; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION" and no /usr/bin/fuse-overlayfs present
+ elif test "$STORAGE_DRIVER" = "vfs"; then
+ skip "skipping overlay test because \$STORAGE_DRIVER = $STORAGE_DRIVER"
+ fi
+ image=alpine
+ mkdir ${TEST_SCRATCH_DIR}/lower
+ touch ${TEST_SCRATCH_DIR}/lower/foo
+
+ run_buildah from --quiet -v ${TEST_SCRATCH_DIR}/lower:/lower:O --quiet $WITH_POLICY_JSON $image
+ cid=$output
+
+ # This should succeed
+ run_buildah run $cid ls /lower/foo
+
+ # Create and remove content in the overlay directory, should succeed,
+ # resetting the contents between each run.
+ run_buildah run $cid touch /lower/bar
+ run_buildah run $cid rm /lower/foo
+
+ # This should fail, second runs of containers go back to original
+ run_buildah 1 run $cid ls /lower/bar
+
+ # This should fail
+ run ls ${TEST_SCRATCH_DIR}/lower/bar
+ assert "$status" -ne 0 "status of ls ${TEST_SCRATCH_DIR}/lower/bar"
+}
+
+@test "overlay source permissions and owners" {
+ if test \! -e /usr/bin/fuse-overlayfs -a "$BUILDAH_ISOLATION" = "rootless"; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION" and no /usr/bin/fuse-overlayfs present
+ elif test "$STORAGE_DRIVER" = "vfs"; then
+ skip "skipping overlay test because \$STORAGE_DRIVER = $STORAGE_DRIVER"
+ fi
+ image=alpine
+ mkdir -m 770 ${TEST_SCRATCH_DIR}/lower
+ chown 1:1 ${TEST_SCRATCH_DIR}/lower
+ permission=$(stat -c "%a %u %g" ${TEST_SCRATCH_DIR}/lower)
+ run_buildah from --quiet -v ${TEST_SCRATCH_DIR}/lower:/tmp/test:O --quiet $WITH_POLICY_JSON $image
+ cid=$output
+
+ # This should succeed
+ run_buildah run $cid sh -c 'stat -c "%a %u %g" /tmp/test'
+ expect_output "$permission"
+
+ # Create and remove content in the overlay directory, should succeed
+ touch ${TEST_SCRATCH_DIR}/lower/foo
+ run_buildah run $cid touch /tmp/test/bar
+ run_buildah run $cid rm /tmp/test/foo
+
+ # This should fail, second runs of containers go back to original
+ run_buildah 1 run $cid ls /tmp/test/bar
+
+ # This should fail since /tmp/test was an overlay, not a bind mount
+ run ls ${TEST_SCRATCH_DIR}/lower/bar
+ assert "$status" -ne 0 "status of ls ${TEST_SCRATCH_DIR}/lower/bar"
+}
+
+@test "overlay path contains colon" {
+ if test \! -e /usr/bin/fuse-overlayfs -a "$BUILDAH_ISOLATION" = "rootless"; then
+ skip "BUILDAH_ISOLATION = $BUILDAH_ISOLATION" and no /usr/bin/fuse-overlayfs present
+ elif test "$STORAGE_DRIVER" = "vfs"; then
+ skip "skipping overlay test because \$STORAGE_DRIVER = $STORAGE_DRIVER"
+ fi
+ image=alpine
+ mkdir ${TEST_SCRATCH_DIR}/a:lower
+ touch ${TEST_SCRATCH_DIR}/a:lower/foo
+
+ # This should succeed.
+ # Add double backslash, because shell will escape.
+ run_buildah from --quiet -v ${TEST_SCRATCH_DIR}/a\\:lower:/a\\:lower:O --quiet $WITH_POLICY_JSON $image
+ cid=$output
+
+ # This should succeed
+ run_buildah run $cid ls /a:lower/foo
+
+ # Mount volume when run
+ run_buildah run -v ${TEST_SCRATCH_DIR}/a\\:lower:/b\\:lower:O $cid ls /b:lower/foo
+
+ # Create and remove content in the overlay directory, should succeed,
+ # resetting the contents between each run.
+ run_buildah run $cid touch /a:lower/bar
+ run_buildah run $cid rm /a:lower/foo
+
+ # This should fail, second runs of containers go back to original
+ run_buildah 1 run $cid ls /a:lower/bar
+
+ # This should fail
+ run ls ${TEST_SCRATCH_DIR}/a:lower/bar
+ assert "$status" -ne 0 "status of ls ${TEST_SCRATCH_DIR}/a:lower/bar"
+}
diff --git a/tests/policy.json b/tests/policy.json
new file mode 100644
index 0000000..bb26e57
--- /dev/null
+++ b/tests/policy.json
@@ -0,0 +1,7 @@
+{
+ "default": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ]
+}
diff --git a/tests/pull.bats b/tests/pull.bats
new file mode 100644
index 0000000..396976c
--- /dev/null
+++ b/tests/pull.bats
@@ -0,0 +1,405 @@
+#!/usr/bin/env bats
+
+load helpers
+
+# Regression test for #2904
+@test "local-image resolution" {
+ run_buildah pull -q busybox
+ iid=$output
+ run_buildah tag ${iid} localhost/image
+
+ # We want to make sure that "image" will always resolve to "localhost/image"
+ # (given a local image with that name exists). The trick we're using is to
+ # force a failed pull and look at the error message which *must* include the
+ # the resolved image name (localhost/image:latest).
+ run_buildah 125 pull --policy=always image
+ assert "$output" =~ "initializing source docker://localhost/image:latest"
+ run_buildah rmi localhost/image ${iid}
+}
+
+@test "pull-flags-order-verification" {
+ run_buildah 125 pull --retry 4 --retry-delay 4s image1 --tls-verify
+ check_options_flag_err "--tls-verify"
+
+ run_buildah 125 pull image1 --authfile=/tmp/somefile
+ check_options_flag_err "--authfile=/tmp/somefile"
+
+ run_buildah 125 pull image1 -q --cred bla:bla --authfile=/tmp/somefile
+ check_options_flag_err "-q"
+}
+
+@test "pull-blocked" {
+ run_buildah 125 --registries-conf ${TEST_SOURCES}/registries.conf.block pull $WITH_POLICY_JSON docker.io/alpine
+ expect_output --substring "registry docker.io is blocked in"
+
+ run_buildah --retry --registries-conf ${TEST_SOURCES}/registries.conf pull $WITH_POLICY_JSON docker.io/alpine
+}
+
+@test "pull-from-registry" {
+ run_buildah --retry pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON busybox:glibc
+ run_buildah pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON busybox:latest
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "busybox:glibc"
+ expect_output --substring "busybox:latest"
+ # We need to see if this file is created after first pull in at least one test
+ [ -f ${TEST_SCRATCH_DIR}/root/defaultNetworkBackend ]
+
+ run_buildah --retry pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON quay.io/libpod/alpine_nginx:latest
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "alpine_nginx:latest"
+
+ run_buildah rmi quay.io/libpod/alpine_nginx:latest
+ run_buildah --retry pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON quay.io/libpod/alpine_nginx
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "alpine_nginx:latest"
+
+ run_buildah --retry pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON alpine@sha256:e9a2035f9d0d7cee1cdd445f5bfa0c5c646455ee26f14565dce23cf2d2de7570
+ run_buildah 125 pull --registries-conf ${TEST_SOURCES}/registries.conf $WITH_POLICY_JSON fakeimage/fortest
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ assert "$output" !~ "fakeimage/fortest" "fakeimage/fortest found in buildah images"
+}
+
+@test "pull-from-docker-archive" {
+ run_buildah --retry pull $WITH_POLICY_JSON alpine
+ run_buildah push $WITH_POLICY_JSON docker.io/library/alpine:latest docker-archive:${TEST_SCRATCH_DIR}/alp.tar:alpine:latest
+ run_buildah rmi alpine
+ run_buildah --retry pull $WITH_POLICY_JSON docker-archive:${TEST_SCRATCH_DIR}/alp.tar
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "alpine"
+ run_buildah 125 pull --all-tags $WITH_POLICY_JSON docker-archive:${TEST_SCRATCH_DIR}/alp.tar
+ expect_output --substring "pulling all tags is not supported for docker-archive transport"
+}
+
+@test "pull-from-oci-archive" {
+ run_buildah --retry pull $WITH_POLICY_JSON alpine
+ run_buildah push $WITH_POLICY_JSON docker.io/library/alpine:latest oci-archive:${TEST_SCRATCH_DIR}/alp.tar:alpine
+ run_buildah rmi alpine
+ run_buildah pull $WITH_POLICY_JSON oci-archive:${TEST_SCRATCH_DIR}/alp.tar
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "alpine"
+ run_buildah 125 pull --all-tags $WITH_POLICY_JSON oci-archive:${TEST_SCRATCH_DIR}/alp.tar
+ expect_output --substring "pulling all tags is not supported for oci-archive transport"
+}
+
+@test "pull-from-local-directory" {
+ mkdir ${TEST_SCRATCH_DIR}/buildahtest
+ run_buildah --retry pull $WITH_POLICY_JSON alpine
+ run_buildah push $WITH_POLICY_JSON docker.io/library/alpine:latest dir:${TEST_SCRATCH_DIR}/buildahtest
+ run_buildah rmi alpine
+ run_buildah pull --quiet $WITH_POLICY_JSON dir:${TEST_SCRATCH_DIR}/buildahtest
+ imageID="$output"
+ # Images pulled via the dir transport are untagged.
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "<none>:<none>"
+ run_buildah 125 pull --all-tags $WITH_POLICY_JSON dir:$imageID
+ expect_output --substring "pulling all tags is not supported for dir transport"
+}
+
+@test "pull-from-docker-daemon" {
+ skip_if_no_docker
+
+ run docker pull alpine
+ echo "$output"
+ assert "$status" -eq 0 "status of docker (yes, docker) pull alpine"
+ run_buildah pull $WITH_POLICY_JSON docker-daemon:docker.io/library/alpine:latest
+ run_buildah images --format "{{.Name}}:{{.Tag}}"
+ expect_output --substring "alpine:latest"
+ run_buildah rmi alpine
+ run_buildah 125 pull --all-tags $WITH_POLICY_JSON docker-daemon:docker.io/library/alpine:latest
+ expect_output --substring "pulling all tags is not supported for docker-daemon transport"
+}
+
+@test "pull-all-tags" {
+ start_registry
+ declare -a tags=(0.9 0.9.1 1.1 alpha beta gamma2.0 latest)
+
+ # setup: pull alpine, and push it repeatedly to localhost using those tags
+ opts="--signature-policy ${TEST_SOURCES}/policy.json --tls-verify=false --creds testuser:testpassword"
+ run_buildah --retry pull --quiet $WITH_POLICY_JSON alpine
+ for tag in "${tags[@]}"; do
+ run_buildah push $opts alpine localhost:${REGISTRY_PORT}/myalpine:$tag
+ done
+
+ run_buildah images -q
+ expect_line_count 1 "There's only one actual image ID"
+ alpine_iid=$output
+
+ # Remove it, and confirm.
+ run_buildah rmi alpine
+ run_buildah images -q
+ expect_output "" "After buildah rmi, there are no locally stored images"
+
+ # Now pull with --all-tags, and confirm that we see all expected tag strings
+ run_buildah pull $opts --all-tags localhost:${REGISTRY_PORT}/myalpine
+ for tag in "${tags[@]}"; do
+ expect_output --substring "Trying to pull localhost:${REGISTRY_PORT}/myalpine:$tag"
+ done
+
+ # Confirm that 'images -a' lists all of them. <Brackets> help confirm
+ # that tag names are exact, e.g we don't confuse 0.9 and 0.9.1
+ run_buildah images -a --format '<{{.Tag}}>'
+ expect_line_count "${#tags[@]}" "number of tagged images"
+ for tag in "${tags[@]}"; do
+ expect_output --substring "<$tag>"
+ done
+
+ # Finally, make sure that there's actually one and exactly one image
+ run_buildah images -q
+ expect_output $alpine_iid "Pulled image has the same IID as original alpine"
+}
+
+@test "pull-from-oci-directory" {
+ run_buildah --retry pull $WITH_POLICY_JSON alpine
+
+ run_buildah 125 pull --all-tags $WITH_POLICY_JSON oci:${TEST_SCRATCH_DIR}/alpine
+ expect_output --substring "pulling all tags is not supported for oci transport"
+
+ # Create on OCI image with reference and one without. The first is expected
+ # to preserve the reference while the latter should be unnamed.
+ name="foo.com/name"
+ tag="tag"
+ withref="oci:${TEST_SCRATCH_DIR}/withref:$name:$tag"
+ noref="oci:${TEST_SCRATCH_DIR}/noref"
+
+ run_buildah push $WITH_POLICY_JSON docker.io/library/alpine:latest $withref
+ run_buildah push $WITH_POLICY_JSON docker.io/library/alpine:latest $noref
+ run_buildah rmi alpine
+
+ # Image without optional reference is unnamed.
+ run_buildah pull -q $WITH_POLICY_JSON $noref
+ run_buildah images --format "{{.Name}}:{{.Tag}}" $output
+ expect_output "<none>:<none>"
+
+ # Image with optional reference is named.
+ run_buildah pull -q $WITH_POLICY_JSON $withref
+ run_buildah images --format "{{.Name}}:{{.Tag}}" $output
+ expect_output "$name:$tag"
+}
+
+@test "pull-denied-by-registry-sources" {
+ export BUILD_REGISTRY_SOURCES='{"blockedRegistries": ["docker.io"]}'
+
+ run_buildah 125 pull $WITH_POLICY_JSON --registries-conf ${TEST_SOURCES}/registries.conf.hub --quiet busybox
+ expect_output --substring 'registry "docker.io" denied by policy: it is in the blocked registries list'
+
+ run_buildah 125 pull $WITH_POLICY_JSON --registries-conf ${TEST_SOURCES}/registries.conf.hub --quiet busybox
+ expect_output --substring 'registry "docker.io" denied by policy: it is in the blocked registries list'
+
+ export BUILD_REGISTRY_SOURCES='{"allowedRegistries": ["some-other-registry.example.com"]}'
+
+ run_buildah 125 pull $WITH_POLICY_JSON --registries-conf ${TEST_SOURCES}/registries.conf.hub --quiet busybox
+ expect_output --substring 'registry "docker.io" denied by policy: not in allowed registries list'
+
+ run_buildah 125 pull $WITH_POLICY_JSON --registries-conf ${TEST_SOURCES}/registries.conf.hub --quiet busybox
+ expect_output --substring 'registry "docker.io" denied by policy: not in allowed registries list'
+}
+
+@test "pull should fail with nonexistent authfile" {
+ run_buildah 125 pull --authfile /tmp/nonexistent $WITH_POLICY_JSON alpine
+}
+
+@test "pull encrypted local image" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey2.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah push $WITH_POLICY_JSON --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+
+ # Try to pull encrypted image without key should fail
+ run_buildah 1 pull $WITH_POLICY_JSON oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ expect_output --substring "archive/tar: invalid tar header"
+
+ # Try to pull encrypted image with wrong key should fail
+ run_buildah 125 pull $WITH_POLICY_JSON --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey2.pem oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
+
+ # Providing the right key should succeed
+ run_buildah pull $WITH_POLICY_JSON --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey.pem oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "pull encrypted registry image" {
+ _prefetch busybox
+ start_registry
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey2.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ # Try to pull encrypted image without key should fail
+ run_buildah 1 pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "archive/tar: invalid tar header"
+
+ # Try to pull encrypted image with wrong key should fail, with diff. msg
+ run_buildah 125 pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey2.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
+
+ # Providing the right key should succeed
+ run_buildah pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "pull encrypted registry image from commit" {
+ _prefetch busybox
+ start_registry
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey2.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid=$output
+ run_buildah commit --iidfile /dev/null --tls-verify=false --creds testuser:testpassword $WITH_POLICY_JSON --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub -q $cid docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ # Try to pull encrypted image without key should fail
+ run_buildah 1 pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "archive/tar: invalid tar header"
+
+ # Try to pull encrypted image with wrong key should fail
+ run_buildah 125 pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey2.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ expect_output --substring "decrypting layer .* no suitable key unwrapper found or none of the private keys could be used for decryption"
+
+ # Providing the right key should succeed
+ run_buildah pull $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --decryption-key ${TEST_SCRATCH_DIR}/tmp/mykey.pem docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "pull image into a full storage" {
+ skip_if_rootless_environment
+ mkdir /tmp/buildah-test
+ mount -t tmpfs -o size=5M tmpfs /tmp/buildah-test
+ run dd if=/dev/urandom of=/tmp/buildah-test/full
+ run_buildah 125 --root=/tmp/buildah-test pull $WITH_POLICY_JSON alpine
+ expect_output --substring "no space left on device"
+ umount /tmp/buildah-test
+ rm -rf /tmp/buildah-test
+}
+
+@test "pull with authfile" {
+ _prefetch busybox
+ start_registry
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ run_buildah push --creds testuser:testpassword --tls-verify=false busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ run_buildah login --authfile ${TEST_SCRATCH_DIR}/tmp/test.auth --username testuser --password testpassword --tls-verify=false localhost:${REGISTRY_PORT}
+ run_buildah pull --authfile ${TEST_SCRATCH_DIR}/tmp/test.auth --tls-verify=false docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ run_buildah rmi localhost:${REGISTRY_PORT}/buildah/busybox:latest
+
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "pull quietly" {
+ run_buildah pull -q busybox
+ iid=$output
+ run_buildah rmi ${iid}
+}
+
+@test "pull-policy" {
+ mkdir ${TEST_SCRATCH_DIR}/buildahtest
+ run_buildah 125 pull $WITH_POLICY_JSON --policy bogus alpine
+ expect_output --substring "unsupported pull policy \"bogus\""
+
+ # If image does not exist the never will fail
+ run_buildah 125 pull -q $WITH_POLICY_JSON --policy never alpine
+ expect_output --substring "image not known"
+ run_buildah 125 inspect --type image alpine
+ expect_output --substring "image not known"
+
+ # create bogus alpine image
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit -q $cid docker.io/library/alpine
+ iid=$output
+
+ # If image does not exist the never will succeed, but iid should not change
+ run_buildah pull -q $WITH_POLICY_JSON --policy never alpine
+ expect_output $iid
+
+ # Pull image by default should change the image id
+ run_buildah pull -q --policy always $WITH_POLICY_JSON alpine
+ assert "$output" != "$iid" "pulled image should have a new IID"
+
+ # Recreate image
+ run_buildah commit -q $cid docker.io/library/alpine
+ iid=$output
+
+ # Make sure missing image works
+ run_buildah pull -q $WITH_POLICY_JSON --policy missing alpine
+ expect_output $iid
+
+ run_buildah rmi alpine
+ run_buildah pull -q $WITH_POLICY_JSON alpine
+ run_buildah inspect alpine
+
+ run_buildah rmi alpine
+ run_buildah pull -q $WITH_POLICY_JSON --policy missing alpine
+ run_buildah inspect alpine
+
+ run_buildah rmi alpine
+}
+
+@test "pull --arch" {
+ mkdir ${TEST_SCRATCH_DIR}/buildahtest
+ run_buildah 125 pull $WITH_POLICY_JSON --arch bogus alpine
+ expect_output --substring "no image found in manifest list"
+
+ # Make sure missing image works
+ run_buildah pull -q $WITH_POLICY_JSON --arch arm64 alpine
+
+ run_buildah inspect --format "{{ .Docker.Architecture }}" alpine
+ expect_output arm64
+
+ run_buildah inspect --format "{{ .OCIv1.Architecture }}" alpine
+ expect_output arm64
+
+ run_buildah rmi alpine
+}
+
+@test "pull --platform" {
+ mkdir ${TEST_SCRATCH_DIR}/buildahtest
+ run_buildah 125 pull $WITH_POLICY_JSON --platform linux/bogus alpine
+ expect_output --substring "no image found in manifest list"
+
+ # Make sure missing image works
+ run_buildah pull -q $WITH_POLICY_JSON --platform linux/arm64 alpine
+
+ run_buildah inspect --format "{{ .Docker.Architecture }}" alpine
+ expect_output arm64
+
+ run_buildah inspect --format "{{ .OCIv1.Architecture }}" alpine
+ expect_output arm64
+
+ run_buildah rmi alpine
+}
+
+@test "pull image with TMPDIR set" {
+ skip_if_rootless_environment
+ testdir=${TEST_SCRATCH_DIR}/buildah-test
+ mkdir -p $testdir
+ mount -t tmpfs -o size=1M tmpfs $testdir
+
+ TMPDIR=$testdir run_buildah 125 pull --policy always $WITH_POLICY_JSON quay.io/libpod/alpine_nginx:latest
+ expect_output --substring "no space left on device"
+
+ run_buildah pull --policy always $WITH_POLICY_JSON quay.io/libpod/alpine_nginx:latest
+ umount $testdir
+ rm -rf $testdir
+}
+
+@test "pull-policy --missing --arch" {
+ # Make sure missing image works
+ run_buildah pull -q $WITH_POLICY_JSON --policy missing --arch amd64 alpine
+ amdiid=$output
+
+ run_buildah pull -q $WITH_POLICY_JSON --policy missing --arch arm64 alpine
+ armiid=$output
+
+ assert "$amdiid" != "$armiid" "AMD and ARM ids should differ"
+}
diff --git a/tests/push.bats b/tests/push.bats
new file mode 100644
index 0000000..aaa48db
--- /dev/null
+++ b/tests/push.bats
@@ -0,0 +1,226 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "push-flags-order-verification" {
+ run_buildah 125 push img1 dest1 -q
+ check_options_flag_err "-q"
+
+ run_buildah 125 push img1 --tls-verify dest1
+ check_options_flag_err "--tls-verify"
+
+ run_buildah 125 push img1 dest1 arg3 --creds user1:pass1
+ check_options_flag_err "--creds"
+
+ run_buildah 125 push img1 --creds=user1:pass1 dest1
+ check_options_flag_err "--creds=user1:pass1"
+}
+
+@test "push" {
+ skip_if_rootless_environment
+ touch ${TEST_SCRATCH_DIR}/reference-time-file
+ for source in scratch scratch-image; do
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON ${source}
+ cid=$output
+ for format in "" docker oci ; do
+ mkdir -p ${TEST_SCRATCH_DIR}/committed${format:+.${format}}
+ # Force no compression to generate what we push.
+ run_buildah commit -D ${format:+--format ${format}} --reference-time ${TEST_SCRATCH_DIR}/reference-time-file $WITH_POLICY_JSON "$cid" scratch-image${format:+-${format}}
+ run_buildah commit -D ${format:+--format ${format}} --reference-time ${TEST_SCRATCH_DIR}/reference-time-file $WITH_POLICY_JSON "$cid" dir:${TEST_SCRATCH_DIR}/committed${format:+.${format}}
+ mkdir -p ${TEST_SCRATCH_DIR}/pushed${format:+.${format}}
+ run_buildah push -D $WITH_POLICY_JSON scratch-image${format:+-${format}} dir:${TEST_SCRATCH_DIR}/pushed${format:+.${format}}
+ # Re-encode the manifest to lose variations due to different encoders or definitions of structures.
+ imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TEST_SCRATCH_DIR}/committed${format:+.${format}} > ${TEST_SCRATCH_DIR}/manifest.committed${format:+.${format}}
+ imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TEST_SCRATCH_DIR}/pushed${format:+.${format}} > ${TEST_SCRATCH_DIR}/manifest.pushed${format:+.${format}}
+ diff -u ${TEST_SCRATCH_DIR}/manifest.committed${format:+.${format}} ${TEST_SCRATCH_DIR}/manifest.pushed${format:+.${format}}
+ done
+ run_buildah rm "$cid"
+ done
+}
+
+@test "push with manifest type conversion" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah push --retry 4 --retry-delay 4s $WITH_POLICY_JSON --format oci alpine dir:$mytmpdir
+ run cat $mytmpdir/manifest.json
+ expect_output --substring "application/vnd.oci.image.config.v1\\+json"
+
+ run_buildah push $WITH_POLICY_JSON --format v2s2 alpine dir:$mytmpdir
+ run cat $mytmpdir/manifest.json
+ expect_output --substring "application/vnd.docker.distribution.manifest.v2\\+json"
+}
+
+@test "push with imageid" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah images -q
+ imageid=$output
+ run_buildah push $WITH_POLICY_JSON $imageid dir:$mytmpdir
+}
+
+@test "push with imageid and digest file" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah images -q
+ imageid=$output
+ run_buildah push --digestfile=${TEST_SCRATCH_DIR}/digest.txt $WITH_POLICY_JSON $imageid dir:$mytmpdir
+ cat ${TEST_SCRATCH_DIR}/digest.txt
+ test -s ${TEST_SCRATCH_DIR}/digest.txt
+}
+
+@test "push without destination" {
+ _prefetch busybox
+ run_buildah pull $WITH_POLICY_JSON busybox
+ run_buildah 125 push $WITH_POLICY_JSON busybox
+ expect_output --substring "busybox"
+}
+
+@test "push should fail with nonexistent authfile" {
+ _prefetch alpine
+ run_buildah from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah images -q
+ imageid=$output
+ run_buildah 125 push $WITH_POLICY_JSON --authfile /tmp/nonexistent $imageid dir:${TEST_SCRATCH_DIR}/my-tmp-dir
+}
+
+@test "push-denied-by-registry-sources" {
+ _prefetch busybox
+
+ export BUILD_REGISTRY_SOURCES='{"blockedRegistries": ["registry.example.com"]}'
+
+ run_buildah from --quiet $WITH_POLICY_JSON --quiet busybox
+ cid=$output
+ run_buildah 125 commit $WITH_POLICY_JSON ${cid} docker://registry.example.com/busierbox
+ expect_output --substring 'commit to registry at "registry.example.com" denied by policy: it is in the blocked registries list'
+
+ run_buildah pull $WITH_POLICY_JSON --quiet busybox
+ run_buildah 125 push $WITH_POLICY_JSON busybox docker://registry.example.com/evenbusierbox
+
+ export BUILD_REGISTRY_SOURCES='{"allowedRegistries": ["some-other-registry.example.com"]}'
+
+ run_buildah from --quiet $WITH_POLICY_JSON --quiet busybox
+ cid=$output
+ run_buildah 125 commit $WITH_POLICY_JSON ${cid} docker://registry.example.com/busierbox
+ expect_output --substring 'commit to registry at "registry.example.com" denied by policy: not in allowed registries list'
+
+ run_buildah pull $WITH_POLICY_JSON --quiet busybox
+ run_buildah 125 push $WITH_POLICY_JSON busybox docker://registry.example.com/evenbusierbox
+ expect_output --substring 'registry "registry.example.com" denied by policy: not in allowed registries list'
+}
+
+
+@test "buildah push image to containers-storage" {
+ _prefetch busybox
+ run_buildah push $WITH_POLICY_JSON busybox containers-storage:newimage:latest
+ run_buildah images
+ expect_output --substring "newimage"
+}
+
+@test "buildah push image to docker-archive and oci-archive" {
+ _prefetch busybox
+ for dest in docker-archive oci-archive; do
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ run_buildah push $WITH_POLICY_JSON busybox $dest:${TEST_SCRATCH_DIR}/tmp/busybox.tar:latest
+ ls ${TEST_SCRATCH_DIR}/tmp/busybox.tar
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+ done
+}
+
+@test "buildah push image to docker and docker registry" {
+ skip_if_no_docker
+
+ _prefetch busybox
+ run_buildah push $WITH_POLICY_JSON busybox docker-daemon:buildah/busybox:latest
+ run docker images
+ expect_output --substring "buildah/busybox"
+ docker rmi buildah/busybox
+
+ start_registry
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword docker.io/busybox:latest docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ docker login localhost:${REGISTRY_PORT} --username testuser --password testpassword
+ docker pull localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ output=$(docker images)
+ expect_output --substring "buildah/busybox"
+ docker rmi localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ docker logout localhost:${REGISTRY_PORT}
+}
+
+@test "buildah oci encrypt and push local oci" {
+ skip_if_rootless_environment
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah push $WITH_POLICY_JSON --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc
+ imgtype -show-manifest oci:${TEST_SCRATCH_DIR}/tmp/busybox_enc | grep "+encrypted"
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "buildah oci encrypt and push registry" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ start_registry
+ openssl genrsa -out ${TEST_SCRATCH_DIR}/tmp/mykey.pem 1024
+ openssl rsa -in ${TEST_SCRATCH_DIR}/tmp/mykey.pem -pubout > ${TEST_SCRATCH_DIR}/tmp/mykey.pub
+ run_buildah push $WITH_POLICY_JSON --tls-verify=false --creds testuser:testpassword --encryption-key jwe:${TEST_SCRATCH_DIR}/tmp/mykey.pub busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox_encrypted:latest
+ # this test, just checks the ability to push an image
+ # there is no good way to test the details of the image unless with ./buildah pull, test will be in pull.bats
+ rm -rf ${TEST_SCRATCH_DIR}/tmp
+}
+
+@test "buildah push to registry allowed by BUILD_REGISTRY_SOURCES" {
+ _prefetch busybox
+ start_registry
+ export BUILD_REGISTRY_SOURCES='{"insecureRegistries": ["localhost:${REGISTRY_PORT}"]}'
+
+ run_buildah 125 push --creds testuser:testpassword $WITH_POLICY_JSON --tls-verify=true busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ expect_output --substring "certificate signed by unknown authority"
+
+ run_buildah push --creds testuser:testpassword $WITH_POLICY_JSON --cert-dir ${TEST_SCRATCH_DIR}/registry busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+}
+
+@test "push with authfile" {
+ _prefetch busybox
+ mkdir ${TEST_SCRATCH_DIR}/tmp
+ start_registry
+ run_buildah login --authfile ${TEST_SCRATCH_DIR}/tmp/test.auth --username testuser --password testpassword --tls-verify=false localhost:${REGISTRY_PORT}
+ run_buildah push --authfile ${TEST_SCRATCH_DIR}/tmp/test.auth $WITH_POLICY_JSON --tls-verify=false busybox docker://localhost:${REGISTRY_PORT}/buildah/busybox:latest
+ expect_output --substring "Copying"
+
+ run_buildah manifest create localhost:${REGISTRY_PORT}/testmanifest
+ run_buildah manifest push --authfile ${TEST_SCRATCH_DIR}/tmp/test.auth $WITH_POLICY_JSON --tls-verify=false localhost:${REGISTRY_PORT}/testmanifest
+ expect_output --substring "Writing manifest list to image destination"
+}
+
+@test "push with --quiet" {
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir
+ mkdir -p $mytmpdir
+
+ _prefetch alpine
+ run_buildah push --quiet $WITH_POLICY_JSON alpine dir:$mytmpdir
+ expect_output ""
+}
+
+@test "push with --compression-format" {
+ _prefetch alpine
+ run_buildah from --quiet --pull alpine
+ cid=$output
+ run_buildah images -q
+ imageid=$output
+ run_buildah push --format oci --compression-format zstd:chunked $imageid dir:${TEST_SCRATCH_DIR}/zstd
+ # Verify there is some zstd compressed layer.
+ grep application/vnd.oci.image.layer.v1.tar+zstd ${TEST_SCRATCH_DIR}/zstd/manifest.json
+}
diff --git a/tests/registries.bats b/tests/registries.bats
new file mode 100644
index 0000000..5879e62
--- /dev/null
+++ b/tests/registries.bats
@@ -0,0 +1,32 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "registries" {
+ registrypair() {
+ image1=$1
+ image2=$2
+
+ # Create a container by specifying the image with one name.
+ run_buildah --retry from --quiet --pull=false $WITH_POLICY_JSON $image1
+ cid1=$output
+
+ # Create a container by specifying the image with another name.
+ run_buildah --retry from --quiet --pull=false $WITH_POLICY_JSON $image2
+ cid2=$output
+
+ # Get their image IDs. They should be the same one.
+ run_buildah inspect -f "{{.FromImageID}}" $cid1
+ iid1=$output
+ run_buildah inspect -f "{{.FromImageID}}" $cid2
+ expect_output $iid1 "$image2.FromImageID == $image1.FromImageID"
+
+ # Clean up.
+ run_buildah rm -a
+ run_buildah rmi -a
+ }
+ # Test with pairs of short and fully-qualified names that should be the same image.
+ registrypair busybox docker.io/busybox
+ registrypair busybox docker.io/library/busybox
+ registrypair fedora-minimal:32 registry.fedoraproject.org/fedora-minimal:32
+}
diff --git a/tests/registries.conf b/tests/registries.conf
new file mode 100644
index 0000000..010696a
--- /dev/null
+++ b/tests/registries.conf
@@ -0,0 +1,26 @@
+# Note that changing the order here may break tests.
+unqualified-search-registries = ['docker.io', 'quay.io', 'registry.fedoraproject.org']
+
+[[registry]]
+# In Nov. 2020, Docker rate-limits image pulling. To avoid hitting these
+# limits while testing, always use the google mirror for qualified and
+# unqualified `docker.io` images.
+# Ref: https://cloud.google.com/container-registry/docs/pulling-cached-images
+prefix="docker.io"
+location="mirror.gcr.io"
+
+# 2020-10-27 a number of images are not present in gcr.io, and podman
+# barfs spectacularly when trying to fetch them. We've hand-copied
+# those to quay, using skopeo copy --all ...
+[[registry]]
+location="docker.io/library"
+mirror=[{location="quay.io/libpod"}]
+
+# 2021-03-23 these are used in buildah system tests, but not (yet?)
+# listed in the global shortnames.conf.
+[aliases]
+busybox="docker.io/library/busybox"
+ubuntu="docker.io/library/ubuntu"
+php="docker.io/library/php"
+alpine="docker.io/library/alpine"
+debian="docker.io/library/debian"
diff --git a/tests/registries.conf.block b/tests/registries.conf.block
new file mode 100644
index 0000000..70855fb
--- /dev/null
+++ b/tests/registries.conf.block
@@ -0,0 +1,25 @@
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to TOML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries.search', 'registries.insecure',
+# and 'registries.block'.
+
+[registries.search]
+registries = ['docker.io', 'registry.fedoraproject.org', 'registry.access.redhat.com']
+
+# If you need to access insecure registries, add the registry's fully-qualified name.
+# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
+[registries.insecure]
+registries = []
+
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#
+# Docker only
+[registries.block]
+registries = ['docker.io', 'registry.fedoraproject.org', 'registry.access.redhat.com']
diff --git a/tests/registries.conf.hub b/tests/registries.conf.hub
new file mode 100644
index 0000000..46ee5b5
--- /dev/null
+++ b/tests/registries.conf.hub
@@ -0,0 +1,25 @@
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to TOML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries.search', 'registries.insecure',
+# and 'registries.block'.
+
+[registries.search]
+registries = ['docker.io']
+
+# If you need to access insecure registries, add the registry's fully-qualified name.
+# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
+[registries.insecure]
+registries = []
+
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#
+# Docker only
+[registries.block]
+registries = []
diff --git a/tests/rename.bats b/tests/rename.bats
new file mode 100644
index 0000000..b495377
--- /dev/null
+++ b/tests/rename.bats
@@ -0,0 +1,37 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "rename" {
+ _prefetch alpine
+ new_name=test-container
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah containers --format "{{.ContainerName}}"
+ old_name=$output
+ run_buildah rename ${cid} ${new_name}
+
+ run_buildah containers --format "{{.ContainerName}}"
+ expect_output --substring "test-container"
+
+ run_buildah containers --quiet -f name=${old_name}
+ expect_output ""
+}
+
+@test "rename same name as current name" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 125 rename ${cid} ${cid}
+ expect_output 'Error: renaming a container with the same name as its current name'
+}
+
+@test "rename same name as other container name" {
+ _prefetch alpine busybox
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
+ cid2=$output
+ run_buildah 125 rename ${cid1} ${cid2}
+ expect_output --substring " already in use by "
+}
diff --git a/tests/rm.bats b/tests/rm.bats
new file mode 100644
index 0000000..121c4b0
--- /dev/null
+++ b/tests/rm.bats
@@ -0,0 +1,71 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "rm-flags-order-verification" {
+ run_buildah 125 rm cnt1 -a
+ check_options_flag_err "-a"
+
+ run_buildah 125 rm cnt1 --all cnt2
+ check_options_flag_err "--all"
+}
+
+@test "remove multiple containers errors" {
+ run_buildah 125 rm mycontainer1 mycontainer2 mycontainer3
+ expect_output --from="${lines[0]}" "removing container \"mycontainer1\": container not known" "output line 1"
+ expect_output --from="${lines[1]}" "removing container \"mycontainer2\": container not known" "output line 2"
+ expect_output --from="${lines[2]}" "Error: removing container \"mycontainer3\": container not known" "output line 3"
+ expect_line_count 3
+}
+
+@test "remove one container" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm "$cid"
+}
+
+@test "remove multiple containers" {
+ _prefetch alpine busybox
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid3=$output
+ run_buildah rm "$cid2" "$cid3"
+}
+
+@test "remove all containers" {
+ _prefetch alpine busybox
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid1=$output
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid3=$output
+ run_buildah rm -a
+}
+
+@test "use conflicting commands to remove containers" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 125 rm -a "$cid"
+ expect_output --substring "when using the --all switch, you may not pass any containers names or IDs"
+}
+
+@test "remove a single tagged manifest list" {
+ _prefetch busybox
+ run_buildah manifest create manifestsample
+ run_buildah manifest add manifestsample busybox
+ run_buildah tag manifestsample manifestsample2
+ run_buildah manifest rm manifestsample2
+ # Output should only untag the listed manifest nothing else
+ expect_output "untagged: localhost/manifestsample2:latest"
+ run_buildah manifest rm manifestsample
+ # Since actual list is getting removed it will also print the image id of list
+ # So check for substring instead of exact match
+ expect_output --substring "untagged: localhost/manifestsample:latest"
+ # Check if busybox is still there
+ run_buildah images
+ expect_output --substring "busybox"
+}
diff --git a/tests/rmi.bats b/tests/rmi.bats
new file mode 100644
index 0000000..c53935c
--- /dev/null
+++ b/tests/rmi.bats
@@ -0,0 +1,248 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "rmi-flags-order-verification" {
+ run_buildah 125 rmi img1 -f
+ check_options_flag_err "-f"
+
+ run_buildah 125 rmi img1 --all img2
+ check_options_flag_err "--all"
+
+ run_buildah 125 rmi img1 img2 --force
+ check_options_flag_err "--force"
+}
+
+@test "remove one image" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm "$cid"
+ run_buildah rmi alpine
+ run_buildah images -q
+ expect_output ""
+}
+
+@test "remove multiple images" {
+ _prefetch alpine busybox
+ run_buildah from --pull=false --quiet $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --pull=false --quiet $WITH_POLICY_JSON busybox
+ cid3=$output
+ run_buildah 125 rmi alpine busybox
+ run_buildah images -q
+ assert "$output" != "" "images -q"
+
+ run_buildah rmi -f alpine busybox
+ run_buildah images -q
+ expect_output ""
+}
+
+@test "remove multiple non-existent images errors" {
+ run_buildah 125 rmi image1 image2 image3
+ expect_output --from="${lines[1]}" --substring " image1: image not known"
+ expect_output --from="${lines[2]}" --substring " image2: image not known"
+ expect_output --from="${lines[3]}" --substring " image3: image not known"
+}
+
+@test "remove all images" {
+ _prefetch alpine busybox
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid1=$output
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid3=$output
+ run_buildah rmi -a -f
+ run_buildah images -q
+ expect_output ""
+
+ _prefetch alpine busybox
+ run_buildah from $WITH_POLICY_JSON scratch
+ cid1=$output
+ run_buildah from --quiet $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah from --quiet $WITH_POLICY_JSON busybox
+ cid3=$output
+ run_buildah 125 rmi --all
+ run_buildah images -q
+ assert "$output" != "" "images -q"
+
+ run_buildah rmi --all --force
+ run_buildah images -q
+ expect_output ""
+}
+
+@test "use prune to remove dangling images" {
+ _prefetch busybox
+
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+
+ run_buildah from --pull=false --quiet $WITH_POLICY_JSON busybox
+ cid=$output
+
+ run_buildah images -q
+ expect_line_count 1
+
+ run_buildah mount $cid
+ root=$output
+ cp ${TEST_SCRATCH_DIR}/randomfile $root/randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+
+ run_buildah images -q
+ expect_line_count 2
+
+ run_buildah mount $cid
+ root=$output
+ cp ${TEST_SCRATCH_DIR}/other-randomfile $root/other-randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid containers-storage:new-image
+
+ run_buildah images -q
+ expect_line_count 3
+
+ run_buildah rmi --prune
+
+ run_buildah images -q
+ expect_line_count 2
+
+ run_buildah rmi --all --force
+ run_buildah images -q
+ expect_output ""
+}
+
+@test "use prune to remove dangling images with parent" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ createrandom ${TEST_SCRATCH_DIR}/other-randomfile
+
+ run_buildah from --quiet $WITH_POLICY_JSON scratch
+ cid=$output
+
+ run_buildah images -q -a
+ expect_line_count 0
+
+ run_buildah mount $cid
+ root=$output
+ cp ${TEST_SCRATCH_DIR}/randomfile $root/randomfile
+ run_buildah unmount $cid
+ run_buildah commit --quiet $WITH_POLICY_JSON $cid
+ image=$output
+ run_buildah rm $cid
+
+ run_buildah images -q -a
+ expect_line_count 1
+
+ run_buildah from --quiet $WITH_POLICY_JSON $image
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ cp ${TEST_SCRATCH_DIR}/other-randomfile $root/other-randomfile
+ run_buildah unmount $cid
+ run_buildah commit $WITH_POLICY_JSON $cid
+ run_buildah rm $cid
+
+ run_buildah images -q -a
+ expect_line_count 2
+
+ run_buildah rmi --prune
+
+ run_buildah images -q -a
+ expect_line_count 0
+
+ run_buildah images -q -a
+ expect_output ""
+}
+
+@test "attempt to prune non-dangling empty images" {
+ # Regression test for containers/podman/issues/10832
+ ctxdir=${TEST_SCRATCH_DIR}/bud
+ mkdir -p $ctxdir
+ cat >$ctxdir/Dockerfile <<EOF
+FROM scratch
+ENV test1=test1
+ENV test2=test2
+EOF
+
+ run_buildah bud -t test $ctxdir
+ run_buildah rmi --prune
+ expect_output "" "no image gets pruned"
+}
+
+@test "use conflicting commands to remove images" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm "$cid"
+ run_buildah 125 rmi -a alpine
+ expect_output --substring "when using the --all switch, you may not pass any images names or IDs"
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm "$cid"
+ run_buildah 125 rmi -p alpine
+ expect_output --substring "when using the --prune switch, you may not pass any images names or IDs"
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah rm "$cid"
+ run_buildah 125 rmi -a -p
+ expect_output --substring "when using the --all switch, you may not use --prune switch"
+ run_buildah rmi --all
+}
+
+@test "remove image that is a parent of another image" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --entrypoint '[ "/ENTRYPOINT" ]' $cid
+ run_buildah commit $WITH_POLICY_JSON $cid new-image
+ run_buildah rm -a
+
+ # Since it has children, alpine will only be untagged (Podman compat) but not
+ # marked as removed. However, it won't show up in the image list anymore.
+ run_buildah rmi alpine
+ expect_output --substring "untagged: "
+ run_buildah images -q
+ expect_line_count 1
+ run_buildah images -q -a
+ expect_line_count 1
+}
+
+@test "rmi with cached images" {
+ _prefetch alpine
+ run_buildah bud $WITH_POLICY_JSON --layers -t test1 $BUDFILES/use-layers
+ run_buildah images -a -q
+ expect_line_count 7
+ run_buildah bud $WITH_POLICY_JSON --layers -t test2 -f Dockerfile.2 $BUDFILES/use-layers
+ run_buildah images -a -q
+ expect_line_count 9
+ run_buildah rmi test2
+ run_buildah images -a -q
+ expect_line_count 7
+ run_buildah rmi test1
+ run_buildah images -a -q
+ expect_line_count 1
+ run_buildah bud $WITH_POLICY_JSON --layers -t test3 -f Dockerfile.2 $BUDFILES/use-layers
+ run_buildah rmi alpine
+ run_buildah rmi test3
+ run_buildah images -a -q
+ expect_output ""
+}
+
+@test "rmi image that is created from another named image" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --entrypoint '[ "/ENTRYPOINT" ]' $cid
+ run_buildah commit $WITH_POLICY_JSON $cid new-image
+ run_buildah from --quiet --pull=true $WITH_POLICY_JSON new-image
+ cid=$output
+ run_buildah config --env 'foo=bar' $cid
+ run_buildah commit $WITH_POLICY_JSON $cid new-image-2
+ run_buildah rm -a
+ run_buildah rmi new-image-2
+ run_buildah images -q
+ expect_line_count 2
+}
diff --git a/tests/run.bats b/tests/run.bats
new file mode 100644
index 0000000..e20d881
--- /dev/null
+++ b/tests/run.bats
@@ -0,0 +1,954 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "run" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ ${OCI} --version
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+ run_buildah config --workingdir /tmp $cid
+ run_buildah run $cid pwd
+ expect_output "/tmp"
+ run_buildah config --workingdir /root $cid
+ run_buildah run $cid pwd
+ expect_output "/root"
+ cp ${TEST_SCRATCH_DIR}/randomfile $root/tmp/
+ run_buildah run $cid cp /tmp/randomfile /tmp/other-randomfile
+ test -s $root/tmp/other-randomfile
+ cmp ${TEST_SCRATCH_DIR}/randomfile $root/tmp/other-randomfile
+
+ seq 100000 | buildah run $cid -- sh -c 'while read i; do echo $i; done'
+}
+
+@test "run--args" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+
+ # This should fail, because buildah run doesn't have a -n flag.
+ run_buildah 125 run -n $cid echo test
+
+ # This should succeed, because buildah run stops caring at the --, which is preserved as part of the command.
+ run_buildah run $cid echo -- -n test
+ expect_output -- "-- -n test"
+
+ # This should succeed, because buildah run stops caring at the --, which is not part of the command.
+ run_buildah run $cid -- echo -n -- test
+ expect_output -- "-- test"
+
+ # This should succeed, because buildah run stops caring at the --.
+ run_buildah run $cid -- echo -- -n test --
+ expect_output -- "-- -n test --"
+
+ # This should succeed, because buildah run stops caring at the --.
+ run_buildah run $cid -- echo -n "test"
+ expect_output "test"
+}
+
+@test "run-cmd" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --workingdir /tmp $cid
+
+
+ # Configured entrypoint/cmd shouldn't modify behaviour of run with no arguments
+
+ # empty entrypoint, configured cmd, empty run arguments
+ run_buildah config --entrypoint "" $cid
+ run_buildah config --cmd pwd $cid
+ run_buildah 125 run $cid
+ expect_output --substring "command must be specified" "empty entrypoint, cmd, no args"
+
+ # empty entrypoint, configured cmd, empty run arguments, end parsing option
+ run_buildah config --entrypoint "" $cid
+ run_buildah config --cmd pwd $cid
+ run_buildah 125 run $cid --
+ expect_output --substring "command must be specified" "empty entrypoint, cmd, no args, --"
+
+ # configured entrypoint, empty cmd, empty run arguments
+ run_buildah config --entrypoint pwd $cid
+ run_buildah config --cmd "" $cid
+ run_buildah 125 run $cid
+ expect_output --substring "command must be specified" "entrypoint, empty cmd, no args"
+
+ # configured entrypoint, empty cmd, empty run arguments, end parsing option
+ run_buildah config --entrypoint pwd $cid
+ run_buildah config --cmd "" $cid
+ run_buildah 125 run $cid --
+ expect_output --substring "command must be specified" "entrypoint, empty cmd, no args, --"
+
+ # configured entrypoint only, empty run arguments
+ run_buildah config --entrypoint pwd $cid
+ run_buildah 125 run $cid
+ expect_output --substring "command must be specified" "entrypoint, no args"
+
+ # configured entrypoint only, empty run arguments, end parsing option
+ run_buildah config --entrypoint pwd $cid
+ run_buildah 125 run $cid --
+ expect_output --substring "command must be specified" "entrypoint, no args, --"
+
+ # configured cmd only, empty run arguments
+ run_buildah config --cmd pwd $cid
+ run_buildah 125 run $cid
+ expect_output --substring "command must be specified" "cmd, no args"
+
+ # configured cmd only, empty run arguments, end parsing option
+ run_buildah config --cmd pwd $cid
+ run_buildah 125 run $cid --
+ expect_output --substring "command must be specified" "cmd, no args, --"
+
+ # configured entrypoint, configured cmd, empty run arguments
+ run_buildah config --entrypoint "pwd" $cid
+ run_buildah config --cmd "whoami" $cid
+ run_buildah 125 run $cid
+ expect_output --substring "command must be specified" "entrypoint, cmd, no args"
+
+ # configured entrypoint, configured cmd, empty run arguments, end parsing option
+ run_buildah config --entrypoint "pwd" $cid
+ run_buildah config --cmd "whoami" $cid
+ run_buildah 125 run $cid --
+ expect_output --substring "command must be specified" "entrypoint, cmd, no args"
+
+
+ # Configured entrypoint/cmd shouldn't modify behaviour of run with argument
+ # Note: entrypoint and cmd can be invalid in below tests as they should never execute
+
+ # empty entrypoint, configured cmd, configured run arguments
+ run_buildah config --entrypoint "" $cid
+ run_buildah config --cmd "/invalid/cmd" $cid
+ run_buildah run $cid -- pwd
+ expect_output "/tmp" "empty entrypoint, invalid cmd, pwd"
+
+ # configured entrypoint, empty cmd, configured run arguments
+ run_buildah config --entrypoint "/invalid/entrypoint" $cid
+ run_buildah config --cmd "" $cid
+ run_buildah run $cid -- pwd
+ expect_output "/tmp" "invalid entrypoint, empty cmd, pwd"
+
+ # configured entrypoint only, configured run arguments
+ run_buildah config --entrypoint "/invalid/entrypoint" $cid
+ run_buildah run $cid -- pwd
+ expect_output "/tmp" "invalid entrypoint, no cmd(??), pwd"
+
+ # configured cmd only, configured run arguments
+ run_buildah config --cmd "/invalid/cmd" $cid
+ run_buildah run $cid -- pwd
+ expect_output "/tmp" "invalid cmd, no entrypoint(??), pwd"
+
+ # configured entrypoint, configured cmd, configured run arguments
+ run_buildah config --entrypoint "/invalid/entrypoint" $cid
+ run_buildah config --cmd "/invalid/cmd" $cid
+ run_buildah run $cid -- pwd
+ expect_output "/tmp" "invalid cmd & entrypoint, pwd"
+}
+
+# Helper for run-user test. Generates a UID or GID that is not present
+# in the given idfile (mounted /etc/passwd or /etc/group)
+function random_unused_id() {
+ local idfile=$1
+
+ while :;do
+ id=$RANDOM
+ if ! fgrep -q :$id: $idfile; then
+ echo $id
+ return
+ fi
+ done
+}
+
+function configure_and_check_user() {
+ local setting=$1
+ local expect_u=$2
+ local expect_g=$3
+
+ run_buildah config -u "$setting" $cid
+ run_buildah run -- $cid id -u
+ expect_output "$expect_u" "id -u ($setting)"
+
+ run_buildah run -- $cid id -g
+ expect_output "$expect_g" "id -g ($setting)"
+}
+
+@test "run-user" {
+ skip_if_no_runtime
+
+ eval $(go env)
+ echo CGO_ENABLED=${CGO_ENABLED}
+ if test "$CGO_ENABLED" -ne 1; then
+ skip "CGO_ENABLED = '$CGO_ENABLED'"
+ fi
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount $cid
+ root=$output
+
+ testuser=jimbo
+ testbogususer=nosuchuser
+ testgroup=jimbogroup
+ testuid=$(random_unused_id $root/etc/passwd)
+ testotheruid=$(random_unused_id $root/etc/passwd)
+ testgid=$(random_unused_id $root/etc/group)
+ testgroupid=$(random_unused_id $root/etc/group)
+ echo "$testuser:x:$testuid:$testgid:Jimbo Jenkins:/home/$testuser:/bin/sh" >> $root/etc/passwd
+ echo "$testgroup:x:$testgroupid:" >> $root/etc/group
+
+ configure_and_check_user "" 0 0
+ configure_and_check_user "${testuser}" $testuid $testgid
+ configure_and_check_user "${testuid}" $testuid $testgid
+ configure_and_check_user "${testuser}:${testgroup}" $testuid $testgroupid
+ configure_and_check_user "${testuid}:${testgroup}" $testuid $testgroupid
+ configure_and_check_user "${testotheruid}:${testgroup}" $testotheruid $testgroupid
+ configure_and_check_user "${testotheruid}" $testotheruid 0
+ configure_and_check_user "${testuser}:${testgroupid}" $testuid $testgroupid
+ configure_and_check_user "${testuid}:${testgroupid}" $testuid $testgroupid
+
+ run_buildah config -u ${testbogususer} $cid
+ run_buildah 125 run -- $cid id -u
+ expect_output --substring "unknown user" "id -u (bogus user)"
+ run_buildah 125 run -- $cid id -g
+ expect_output --substring "unknown user" "id -g (bogus user)"
+
+ ln -vsf /etc/passwd $root/etc/passwd
+ run_buildah config -u ${testuser}:${testgroup} $cid
+ run_buildah 125 run -- $cid id -u
+ echo "$output"
+ expect_output --substring "unknown user" "run as unknown user"
+}
+
+@test "run --env" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah config --env foo=foo $cid
+
+ # Ensure foo=foo from `buildah config`
+ run_buildah run $cid -- /bin/sh -c 'echo $foo'
+ expect_output "foo"
+
+ # Ensure foo=bar from --env override
+ run_buildah run --env foo=bar $cid -- /bin/sh -c 'echo $foo'
+ expect_output "bar"
+
+ # Reference foo=baz from process environment
+ foo=baz run_buildah run --env foo $cid -- /bin/sh -c 'echo $foo'
+ expect_output "baz"
+
+ # Ensure that the --env override did not persist
+ run_buildah run $cid -- /bin/sh -c 'echo $foo'
+ expect_output "foo"
+}
+
+@test "run --group-add" {
+ skip_if_no_runtime
+ id=$RANDOM
+
+ _prefetch alpine
+ run_buildah from --group-add $id --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid id -G
+ expect_output --substring "$id"
+
+ if is_rootless && has_supplemental_groups; then
+ run_buildah from --group-add keep-groups --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid id -G
+ expect_output --substring "65534"
+ fi
+}
+
+@test "run --hostname" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ ${OCI} --version
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid hostname
+ [ "$output" != "foobar" ]
+ run_buildah run --hostname foobar $cid hostname
+ expect_output "foobar"
+}
+
+@test "run should also override /etc/hostname" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ ${OCI} --version
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --hostname foobar $cid hostname
+ expect_output "foobar"
+ hostname=$output
+ run_buildah run --hostname foobar $cid cat /etc/hostname
+ expect_output $hostname
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah inspect --format "{{ .ContainerID }}" $cid
+ id=$output
+ run_buildah run $cid cat /etc/hostname
+ expect_output "${id:0:12}"
+ run_buildah run --no-hostname $cid cat /etc/hostname
+ expect_output 'localhost'
+}
+
+@test "run --volume" {
+ skip_if_no_runtime
+
+ zflag=
+ if which selinuxenabled > /dev/null 2> /dev/null ; then
+ if selinuxenabled ; then
+ zflag=z
+ fi
+ fi
+ ${OCI} --version
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ mkdir -p ${TEST_SCRATCH_DIR}/was-empty
+ # As a baseline, this should succeed.
+ run_buildah run -v ${TEST_SCRATCH_DIR}/was-empty:/var/not-empty${zflag:+:${zflag}} $cid touch /var/not-empty/testfile
+ # Parsing options that with comma, this should succeed.
+ run_buildah run -v ${TEST_SCRATCH_DIR}/was-empty:/var/not-empty:rw,rshared${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
+ # If we're parsing the options at all, this should be read-only, so it should fail.
+ run_buildah 1 run -v ${TEST_SCRATCH_DIR}/was-empty:/var/not-empty:ro${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
+ # Even if the parent directory doesn't exist yet, this should succeed.
+ run_buildah run -v ${TEST_SCRATCH_DIR}/was-empty:/var/multi-level/subdirectory $cid touch /var/multi-level/subdirectory/testfile
+ # And check the same for file volumes.
+ run_buildah run -v ${TEST_SCRATCH_DIR}/was-empty/testfile:/var/different-multi-level/subdirectory/testfile $cid touch /var/different-multi-level/subdirectory/testfile
+ # And check the same for file volumes.
+ # Make sure directories show up inside of container on builtin mounts
+ run_buildah run -v ${TEST_SCRATCH_DIR}/was-empty:/run/secrets/testdir $cid ls -ld /run/secrets/testdir
+}
+
+@test "run overlay --volume with custom upper and workdir" {
+ skip_if_no_runtime
+
+ zflag=
+ if which selinuxenabled > /dev/null 2> /dev/null ; then
+ if selinuxenabled ; then
+ zflag=z
+ fi
+ fi
+ ${OCI} --version
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ mkdir -p ${TEST_SCRATCH_DIR}/upperdir
+ mkdir -p ${TEST_SCRATCH_DIR}/workdir
+ mkdir -p ${TEST_SCRATCH_DIR}/lower
+
+ echo 'hello' >> ${TEST_SCRATCH_DIR}/lower/hello
+
+ # As a baseline, this should succeed.
+ run_buildah run -v ${TEST_SCRATCH_DIR}/lower:/test:O,upperdir=${TEST_SCRATCH_DIR}/upperdir,workdir=${TEST_SCRATCH_DIR}/workdir${zflag:+:${zflag}} $cid cat /test/hello
+ expect_output "hello"
+ run_buildah run -v ${TEST_SCRATCH_DIR}/lower:/test:O,upperdir=${TEST_SCRATCH_DIR}/upperdir,workdir=${TEST_SCRATCH_DIR}/workdir${zflag:+:${zflag}} $cid sh -c 'echo "world" > /test/world'
+
+ #upper dir should persist content
+ result="$(cat ${TEST_SCRATCH_DIR}/upperdir/world)"
+ test "$result" == "world"
+}
+
+@test "run --volume with U flag" {
+ skip_if_no_runtime
+
+ # Create source volume.
+ mkdir ${TEST_SCRATCH_DIR}/testdata
+
+ # Create the container.
+ _prefetch alpine
+ run_buildah from $WITH_POLICY_JSON alpine
+ ctr="$output"
+
+ # Test user can create file in the mounted volume.
+ run_buildah run --user 888:888 --volume ${TEST_SCRATCH_DIR}/testdata:/mnt:z,U "$ctr" touch /mnt/testfile1.txt
+
+ # Test created file has correct UID and GID ownership.
+ run_buildah run --user 888:888 --volume ${TEST_SCRATCH_DIR}/testdata:/mnt:z,U "$ctr" stat -c "%u:%g" /mnt/testfile1.txt
+ expect_output "888:888"
+}
+
+@test "run --user and verify gid in supplemental groups" {
+ skip_if_no_runtime
+
+ # Create the container.
+ _prefetch alpine
+ run_buildah from $WITH_POLICY_JSON alpine
+ ctr="$output"
+
+ # Run with uid:gid 1000:1000 and verify if gid is present in additional groups
+ run_buildah run --user 1000:1000 "$ctr" cat /proc/self/status
+ # gid 1000 must be in additional/supplemental groups
+ expect_output --substring "Groups: 1000 "
+}
+
+@test "run --workingdir" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid pwd
+ expect_output "/"
+ run_buildah run --workingdir /bin $cid pwd
+ expect_output "/bin"
+ # Ensure the /bin workingdir override did not persist
+ run_buildah run $cid pwd
+ expect_output "/"
+}
+
+@test "run --mount" {
+ skip_if_no_runtime
+
+ zflag=
+ if which selinuxenabled > /dev/null 2> /dev/null ; then
+ if selinuxenabled ; then
+ zflag=z
+ fi
+ fi
+ ${OCI} --version
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ mkdir -p ${TEST_SCRATCH_DIR}/was:empty
+ # As a baseline, this should succeed.
+ run_buildah run --mount type=tmpfs,dst=/var/tmpfs-not-empty $cid touch /var/tmpfs-not-empty/testfile
+ run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/not-empty,rw${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
+ # If we're parsing the options at all, this should be read-only, so it should fail.
+ run_buildah 1 run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/not-empty,ro${zflag:+,${zflag}} $cid touch /var/not-empty/testfile
+ # Even if the parent directory doesn't exist yet, this should succeed.
+ run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty,dst=/var/multi-level/subdirectory,rw $cid touch /var/multi-level/subdirectory/testfile
+ # And check the same for file volumes.
+ run_buildah run --mount type=bind,src=${TEST_SCRATCH_DIR}/was:empty/testfile,dst=/var/different-multi-level/subdirectory/testfile,rw $cid touch /var/different-multi-level/subdirectory/testfile
+}
+
+@test "run --mount=type=bind with from like buildkit" {
+ skip_if_no_runtime
+ zflag=
+ if which selinuxenabled > /dev/null 2> /dev/null ; then
+ if selinuxenabled ; then
+ skip "skip if selinux enabled, since stages have different selinux label"
+ fi
+ fi
+ run_buildah build -t buildkitbase $WITH_POLICY_JSON -f $BUDFILES/buildkit-mount-from/Dockerfilebuildkitbase $BUDFILES/buildkit-mount-from/
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --mount type=bind,source=.,from=buildkitbase,target=/test,z $cid cat /test/hello
+ expect_output --substring "hello"
+ run_buildah rmi -f buildkitbase
+}
+
+@test "run --mount=type=cache like buildkit" {
+ skip_if_no_runtime
+ zflag=
+ if which selinuxenabled > /dev/null 2> /dev/null ; then
+ if selinuxenabled ; then
+ skip "skip if selinux enabled, since stages have different selinux label"
+ fi
+ fi
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --mount type=cache,target=/test,z $cid sh -c 'echo "hello" > /test/hello && cat /test/hello'
+ run_buildah run --mount type=cache,target=/test,z $cid cat /test/hello
+ expect_output --substring "hello"
+}
+
+@test "run symlinks" {
+ skip_if_no_runtime
+
+ ${OCI} --version
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ mkdir -p ${TEST_SCRATCH_DIR}/tmp
+ ln -s tmp ${TEST_SCRATCH_DIR}/tmp2
+ export TMPDIR=${TEST_SCRATCH_DIR}/tmp2
+ run_buildah run $cid id
+}
+
+@test "run --cap-add/--cap-drop" {
+ skip_if_no_runtime
+
+ ${OCI} --version
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ # Try with default caps.
+ run_buildah run $cid grep ^CapEff /proc/self/status
+ defaultcaps="$output"
+ # Try adding DAC_OVERRIDE.
+ run_buildah run --cap-add CAP_DAC_OVERRIDE $cid grep ^CapEff /proc/self/status
+ addedcaps="$output"
+ # Try dropping DAC_OVERRIDE.
+ run_buildah run --cap-drop CAP_DAC_OVERRIDE $cid grep ^CapEff /proc/self/status
+ droppedcaps="$output"
+ # Okay, now the "dropped" and "added" should be different.
+ test "$addedcaps" != "$droppedcaps"
+ # And one or the other should be different from the default, with the other being the same.
+ if test "$defaultcaps" == "$addedcaps" ; then
+ test "$defaultcaps" != "$droppedcaps"
+ fi
+ if test "$defaultcaps" == "$droppedcaps" ; then
+ test "$defaultcaps" != "$addedcaps"
+ fi
+}
+
+@test "Check if containers run with correct open files/processes limits" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ # we need to not use the list of limits that are set in our default
+ # ${TEST_SOURCES}/containers.conf for the sake of other tests, and override
+ # any that might be picked up from system-wide configuration
+ echo '[containers]' > ${TEST_SCRATCH_DIR}/containers.conf
+ echo 'default_ulimits = []' >> ${TEST_SCRATCH_DIR}/containers.conf
+ export CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf
+
+ _prefetch alpine
+ maxpids=$(cat /proc/sys/kernel/pid_max)
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/open files/{print $4}' /proc/self/limits
+ expect_output 1024 "limits: open files (unlimited)"
+ run_buildah run $cid awk '/processes/{print $3}' /proc/self/limits
+ expect_output ${maxpids} "limits: processes (unlimited)"
+ run_buildah rm $cid
+
+ run_buildah from --quiet --ulimit nofile=300:400 --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/open files/{print $4}' /proc/self/limits
+ expect_output "300" "limits: open files (w/file limit)"
+ run_buildah run $cid awk '/processes/{print $3}' /proc/self/limits
+ expect_output ${maxpids} "limits: processes (w/file limit)"
+ run_buildah rm $cid
+
+ run_buildah from --quiet --ulimit nproc=100:200 --ulimit nofile=300:400 --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid awk '/open files/{print $4}' /proc/self/limits
+ expect_output "300" "limits: open files (w/file & proc limits)"
+ run_buildah run $cid awk '/processes/{print $3}' /proc/self/limits
+ expect_output "100" "limits: processes (w/file & proc limits)"
+
+ unset CONTAINERS_CONF
+}
+
+@test "run-builtin-volume-omitted" {
+ # This image is known to include a volume, but not include the mountpoint
+ # in the image.
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON quay.io/libpod/registry:volume_omitted
+ cid=$output
+ run_buildah mount $cid
+ mnt=$output
+ # By default, the mountpoint should not be there.
+ run test -d "$mnt"/var/lib/registry
+ echo "$output"
+ [ "$status" -ne 0 ]
+ # We'll create the mountpoint for "run".
+ run_buildah run $cid ls -1 /var/lib
+ expect_output --substring "registry"
+
+ # Double-check that the mountpoint is there.
+ test -d "$mnt"/var/lib/registry
+}
+
+@test "run-exit-status" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 42 run ${cid} sh -c 'exit 42'
+}
+
+@test "run-exit-status on non executable" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 1 run ${cid} /etc
+}
+
+@test "Verify /run/.containerenv exist" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ # test a standard mount to /run/.containerenv
+ run_buildah run $cid ls -1 /run/.containerenv
+ expect_output --substring "/run/.containerenv"
+
+ run_buildah run $cid sh -c '. /run/.containerenv; echo $engine'
+ expect_output --substring "buildah"
+
+ run_buildah run $cid sh -c '. /run/.containerenv; echo $name'
+ expect_output "alpine-working-container"
+
+ run_buildah run $cid sh -c '. /run/.containerenv; echo $image'
+ expect_output --substring "alpine:latest"
+
+ rootless=0
+ if ["$(id -u)" -ne 0 ]; then
+ rootless=1
+ fi
+
+ run_buildah run $cid sh -c '. /run/.containerenv; echo $rootless'
+ expect_output ${rootless}
+}
+
+@test "run-device" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false --device /dev/fuse $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 0 run ${cid} ls /dev/fuse
+
+ run_buildah from --quiet --pull=false --device /dev/fuse:/dev/fuse:rm $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 0 run ${cid} ls /dev/fuse
+
+ run_buildah from --quiet --pull=false --device /dev/fuse:/dev/fuse:rwm $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 0 run ${cid} ls /dev/fuse
+
+}
+
+@test "run-device-Rename" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+ skip_if_chroot
+ skip_if_rootless
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false --device /dev/fuse:/dev/fuse1 $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 0 run ${cid} ls /dev/fuse1
+}
+
+@test "run check /etc/hosts" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+ skip_if_in_container
+
+ ${OCI} --version
+ _prefetch debian
+
+ local hostname=h-$(random_string)
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON debian
+ cid=$output
+ run_buildah 125 run --network=bogus $cid cat /etc/hosts
+ expect_output --substring "unable to find network with name or ID bogus: network not found"
+ run_buildah run --hostname $hostname $cid cat /etc/hosts
+ expect_output --substring "(10.88.*|10.0.2.100)[[:blank:]]$hostname $cid"
+ ip=$(hostname -I | cut -f 1 -d " ")
+ expect_output --substring "$ip.*host.containers.internal"
+
+ hosts="127.0.0.5 host1
+127.0.0.6 host2"
+ base_hosts_file="$TEST_SCRATCH_DIR/base_hosts"
+ echo "$hosts" > "$base_hosts_file"
+ containers_conf_file="$TEST_SCRATCH_DIR/containers.conf"
+ echo -e "[containers]\nbase_hosts_file = \"$base_hosts_file\"" > "$containers_conf_file"
+ CONTAINERS_CONF="$containers_conf_file" run_buildah run --hostname $hostname $cid cat /etc/hosts
+ expect_output --substring "127.0.0.5[[:blank:]]host1"
+ expect_output --substring "127.0.0.6[[:blank:]]host2"
+ expect_output --substring "(10.88.*|10.0.2.100)[[:blank:]]$hostname $cid"
+
+ # now check that hostname from base file is not overwritten
+ CONTAINERS_CONF="$containers_conf_file" run_buildah run --hostname host1 $cid cat /etc/hosts
+ expect_output --substring "127.0.0.5[[:blank:]]host1"
+ expect_output --substring "127.0.0.6[[:blank:]]host2"
+ expect_output --substring "(10.88.*|10.0.2.100)[[:blank:]]$cid"
+ assert "$output" !~ "(10.88.*|10.0.2.100)[[:blank:]]host1 $cid" "Container IP should not contain host1"
+
+ # check slirp4netns sets correct hostname with another cidr
+ run_buildah run --network slirp4netns:cidr=192.168.2.0/24 --hostname $hostname $cid cat /etc/hosts
+ expect_output --substring "192.168.2.100[[:blank:]]$hostname $cid"
+
+ run_buildah run --network=container $cid cat /etc/hosts
+ m=$(buildah mount $cid)
+ run cat $m/etc/hosts
+ [ "$status" -eq 0 ]
+ expect_output --substring ""
+ run_buildah rm -a
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON debian
+ cid=$output
+ run_buildah run --network=host --hostname $hostname $cid cat /etc/hosts
+ assert "$output" =~ "$ip[[:blank:]]$hostname"
+ hostOutput=$output
+ m=$(buildah mount $cid)
+ run cat $m/etc/hosts
+ [ "$status" -eq 0 ]
+ expect_output --substring ""
+ run_buildah run --network=host --no-hosts $cid cat /etc/hosts
+ [ "$output" != "$hostOutput" ]
+ # --isolation chroot implies host networking so check for the correct hosts entry
+ run_buildah run --isolation chroot --hostname $hostname $cid cat /etc/hosts
+ assert "$output" =~ "$ip[[:blank:]]$hostname"
+ run_buildah rm -a
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON debian
+ cid=$output
+ run_buildah run --network=none $cid sh -c 'echo "110.110.110.0 fake_host" >> /etc/hosts; cat /etc/hosts'
+ expect_output "110.110.110.0 fake_host"
+ m=$(buildah mount $cid)
+ run cat $m/etc/hosts
+ [ "$status" -eq 0 ]
+ expect_output "110.110.110.0 fake_host"
+ run_buildah rm -a
+}
+
+@test "run check /etc/hosts with --network pasta" {
+ skip_if_no_runtime
+ skip_if_chroot
+ skip_if_root_environment "pasta only works rootless"
+
+ # FIXME: unskip when we have a new pasta version with:
+ # https://archives.passt.top/passt-dev/20230623082531.25947-2-pholzing@redhat.com/
+ skip "pasta bug prevents this from working"
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON debian
+ cid=$output
+
+ local hostname=h-$(random_string)
+ ip=$(hostname -I | cut -f 1 -d " ")
+ run_buildah run --network pasta --hostname $hostname $cid cat /etc/hosts
+ assert "$output" =~ "$ip[[:blank:]]$hostname $cid" "--network pasta adds correct hostname"
+
+ # check with containers.conf setting
+ echo -e "[network]\ndefault_rootless_network_cmd = \"pasta\"" > ${TEST_SCRATCH_DIR}/containers.conf
+ CONTAINERS_CONF_OVERRIDE=${TEST_SCRATCH_DIR}/containers.conf run_buildah run --hostname $hostname $cid cat /etc/hosts
+ assert "$output" =~ "$ip[[:blank:]]$hostname $cid" "default_rootless_network_cmd = \"pasta\" works"
+}
+
+@test "run check /etc/resolv.conf" {
+ skip_if_rootless_environment
+ skip_if_no_runtime
+
+ ${OCI} --version
+ _prefetch alpine
+
+ # Make sure to read the correct /etc/resolv.conf file in case of systemd-resolved.
+ resolve_file=$(readlink -f /etc/resolv.conf)
+ if [[ "$resolve_file" == "/run/systemd/resolve/stub-resolv.conf" ]]; then
+ resolve_file="/run/systemd/resolve/resolv.conf"
+ fi
+
+ run grep nameserver $resolve_file
+ # filter out 127... nameservers
+ run grep -v "nameserver 127." <<< "$output"
+ nameservers="$output"
+ # in case of rootless add extra slirp4netns nameserver
+ if is_rootless; then
+ nameservers="nameserver 10.0.2.3
+$output"
+ fi
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --network=private $cid grep nameserver /etc/resolv.conf
+ # check that no 127... nameserver is in resolv.conf
+ assert "$output" !~ "^nameserver 127." "Container contains local nameserver"
+ assert "$nameservers" "Container nameservers match correct host nameservers"
+ if ! is_rootless; then
+ run_buildah mount $cid
+ assert "$output" != ""
+ assert "$(< $output/etc/resolv.conf)" = "" "resolv.conf is empty"
+ fi
+ run_buildah rm -a
+
+ run grep nameserver /etc/resolv.conf
+ nameservers="$output"
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --isolation=chroot --network=host $cid grep nameserver /etc/resolv.conf
+ assert "$nameservers" "Container nameservers match the host nameservers"
+ if ! is_rootless; then
+ run_buildah mount $cid
+ assert "$output" != ""
+ assert "$(< $output/etc/resolv.conf)" = "" "resolv.conf is empty"
+ fi
+ run_buildah rm -a
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah 125 run --isolation=chroot --network=none $cid sh -c 'echo "nameserver 110.110.0.110" >> /etc/resolv.conf; cat /etc/resolv.conf'
+ expect_output --substring "cannot set --network other than host with --isolation chroot"
+ run_buildah rm -a
+}
+
+@test "run --network=none and --isolation chroot must conflict" {
+ skip_if_no_runtime
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ # should fail by default
+ run_buildah 125 run --isolation=chroot --network=none $cid wget google.com
+ expect_output --substring "cannot set --network other than host with --isolation chroot"
+}
+
+@test "run --network=private must mount a fresh /sys" {
+ skip_if_no_runtime
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ # verify there is no /sys/kernel/security in the container, that would mean /sys
+ # was bind mounted from the host.
+ run_buildah 1 run --network=private $cid grep /sys/kernel/security /proc/self/mountinfo
+}
+
+@test "run --network should override build --network" {
+ skip_if_no_runtime
+
+ run_buildah from --network=none --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ # should fail by default
+ run_buildah 1 run $cid wget google.com
+ expect_output --substring "bad"
+ # try pinging external website
+ run_buildah run --network=private $cid wget google.com
+ expect_output --substring "index.html"
+ run_buildah rm -a
+}
+
+@test "run --user" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --user sync $cid whoami
+ expect_output "sync"
+ run_buildah 125 run --user noexist $cid whoami
+ expect_output --substring "unknown user error"
+}
+
+@test "run --runtime --runtime-flag" {
+ skip_if_in_container
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ # Use seccomp to make crun output a warning message because crun writes few logs.
+ cat > ${TEST_SCRATCH_DIR}/seccomp.json << _EOF
+{
+ "defaultAction": "SCMP_ACT_ALLOW",
+ "syscalls": [
+ {
+ "name": "unknown",
+ "action": "SCMP_ACT_KILL"
+ }
+ ]
+}
+_EOF
+ run_buildah from --security-opt seccomp=${TEST_SCRATCH_DIR}/seccomp.json --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+
+ local found_runtime=
+
+ if [ -n "$(command -v runc)" ]; then
+ found_runtime=y
+ run_buildah '?' run --runtime=runc --runtime-flag=debug $cid true
+ if [ "$status" -eq 0 ]; then
+ assert "$output" != "" "Output from running 'true' with --runtime-flag=debug"
+ else
+ # runc fully supports cgroup v2 (unified mode) since v1.0.0-rc93.
+ # older runc doesn't work on cgroup v2.
+ expect_output --substring "this version of runc doesn't work on cgroups v2" "should fail by unsupportability for cgroupv2"
+ fi
+ fi
+
+ if [ -n "$(command -v crun)" ]; then
+ found_runtime=y
+ run_buildah run --runtime=crun --runtime-flag=log=${TEST_SCRATCH_DIR}/oci-log $cid true
+ if test \! -e ${TEST_SCRATCH_DIR}/oci-log; then
+ die "the expected file ${TEST_SCRATCH_DIR}/oci-log was not created"
+ fi
+ fi
+
+ if [ -z "${found_runtime}" ]; then
+ skip "Did not find 'runc' nor 'crun' in \$PATH - could not run this test!"
+ fi
+
+}
+
+@test "run --terminal" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --terminal=true $cid ls --color=auto
+ colored="$output"
+ run_buildah run --terminal=false $cid ls --color=auto
+ uncolored="$output"
+ [ "$colored" != "$uncolored" ]
+}
+
+@test "rootless on cgroupv2 and systemd runs under user.slice" {
+ skip_if_no_runtime
+ skip_if_cgroupsv1
+ skip_if_in_container
+ skip_if_root_environment
+ if test "$DBUS_SESSION_BUS_ADDRESS" = ""; then
+ skip "$test does not work when DBUS_SESSION_BUS_ADDRESS is not defined"
+ fi
+ _prefetch alpine
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run --cgroupns=host $cid cat /proc/self/cgroup
+ expect_output --substring "/user.slice/"
+}
+
+@test "run-inheritable-capabilities" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah run $cid grep ^CapInh: /proc/self/status
+ expect_output "CapInh: 0000000000000000"
+ run_buildah run --cap-add=ALL $cid grep ^CapInh: /proc/self/status
+ expect_output "CapInh: 0000000000000000"
+}
+
+@test "run masks" {
+ skip_if_no_runtime
+
+ _prefetch alpine
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ for mask in /proc/acpi /proc/kcore /proc/keys /proc/latency_stats /proc/sched_debug /proc/scsi /proc/timer_list /proc/timer_stats /sys/dev/block /sys/devices/virtual/powercap /sys/firmware /sys/fs/selinux; do
+ if test -d $mask; then
+ run_buildah run $cid ls $mask
+ expect_output "" "Directories should be empty"
+ fi
+ if test -f $mask; then
+ run_buildah run $cid cat $mask
+ expect_output "" "Directories should be empty"
+ fi
+ done
+}
diff --git a/tests/selinux.bats b/tests/selinux.bats
new file mode 100644
index 0000000..fae9149
--- /dev/null
+++ b/tests/selinux.bats
@@ -0,0 +1,86 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "selinux test" {
+ if ! which selinuxenabled > /dev/null 2> /dev/null ; then
+ skip 'selinuxenabled command not found in $PATH'
+ elif ! selinuxenabled ; then
+ skip "selinux is disabled"
+ fi
+
+ image=alpine
+ _prefetch $image
+
+ # Create a container and read its context as a baseline.
+ run_buildah from --quiet --quiet $WITH_POLICY_JSON $image
+ cid=$output
+ run_buildah run $cid sh -c 'tr \\0 \\n < /proc/self/attr/current'
+ assert "$output" != "" "/proc/self/attr/current cannot be empty"
+ firstlabel="$output"
+
+ # Ensure that we label the same container consistently across multiple "run" instructions.
+ run_buildah run $cid sh -c 'tr \\0 \\n < /proc/self/attr/current'
+ expect_output "$firstlabel" "label of second container == first"
+
+ # Ensure that different containers get different labels.
+ run_buildah from --quiet --quiet $WITH_POLICY_JSON $image
+ cid1=$output
+ run_buildah run $cid1 sh -c 'tr \\0 \\n < /proc/self/attr/current'
+ assert "$output" != "$firstlabel" \
+ "Second container has the same label as first (both '$output')"
+}
+
+@test "selinux spc" {
+ if ! which selinuxenabled > /dev/null 2> /dev/null ; then
+ skip "No selinuxenabled"
+ elif ! selinuxenabled ; then
+ skip "selinux is disabled"
+ fi
+
+ image=alpine
+ _prefetch $image
+
+ # Create a container and read its context as a baseline.
+ run_buildah from --quiet --security-opt label=disable --quiet $WITH_POLICY_JSON $image
+ cid=$output
+ run_buildah run $cid sh -c 'tr \\0 \\n < /proc/self/attr/current'
+ context=$output
+ run id -Z
+ crole=$(secon -r $output)
+ # Role and Type should always be constant. (We don't check user)
+ role=$(awk -F: '{print $2}' <<<$context)
+ expect_output --from="$role" "${crole}" "SELinux role"
+
+ type=$(awk -F: '{print $3}' <<<$context)
+ expect_output --from="$type" "spc_t" "SELinux type"
+
+ # Range should match that of the invoking process
+ my_range=$(id -Z |awk -F: '{print $4 ":" $5}')
+ container_range=$(awk -F: '{print $4 ":" $5}' <<<$context)
+ expect_output --from="$container_range" "$my_range" "SELinux range: container matches process"
+}
+
+@test "selinux specific level" {
+ if ! which selinuxenabled > /dev/null 2> /dev/null ; then
+ skip "No selinuxenabled"
+ elif ! selinuxenabled ; then
+ skip "selinux is disabled"
+ fi
+
+ image=alpine
+ _prefetch $image
+
+ firstlabel="system_u:system_r:container_t:s0:c1,c2"
+ # Create a container and read its context as a baseline.
+ run_buildah from --quiet --security-opt label="level:s0:c1,c2" --quiet $WITH_POLICY_JSON $image
+ cid=$output
+
+ # Inspect image
+ run_buildah inspect --format '{{.ProcessLabel}}' $cid
+ expect_output "$firstlabel"
+
+ # Check actual running context
+ run_buildah run $cid sh -c 'tr \\0 \\n < /proc/self/attr/current'
+ expect_output "$firstlabel" "running container context"
+}
diff --git a/tests/serve/serve.go b/tests/serve/serve.go
new file mode 100644
index 0000000..059e248
--- /dev/null
+++ b/tests/serve/serve.go
@@ -0,0 +1,73 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+func sendThatFile(basepath string) func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ filename := filepath.Join(basepath, filepath.Clean(string([]rune{filepath.Separator})+r.URL.Path))
+ f, err := os.Open(filename)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ http.NotFound(w, r)
+ return
+ }
+ http.Error(w, "whoops", http.StatusInternalServerError)
+ return
+ }
+ finfo, err := f.Stat()
+ if err != nil {
+ http.Error(w, "whoops", http.StatusInternalServerError)
+ return
+ }
+ http.ServeContent(w, r, filename, finfo.ModTime(), f)
+ }
+}
+
+func main() {
+ args := os.Args
+ if len(args) < 2 {
+ log.Fatal("requires subdirectory path [and optional port [and optional port file name]]")
+ }
+ basedir := args[1]
+ port := "0"
+ if len(args) > 2 {
+ port = args[2]
+ }
+ http.HandleFunc("/", sendThatFile(basedir))
+ server := http.Server{
+ Addr: ":" + port,
+ BaseContext: func(l net.Listener) context.Context {
+ if tcp, ok := l.Addr().(*net.TCPAddr); ok {
+ if len(args) > 3 {
+ f, err := os.CreateTemp(filepath.Dir(args[3]), filepath.Base(args[3]))
+ if err != nil {
+ log.Fatalf("%v", err)
+ }
+ tempName := f.Name()
+ bytes := []byte(strconv.Itoa(tcp.Port))
+ if n, err := f.Write(bytes); err != nil || n != len(bytes) {
+ if err != nil {
+ log.Fatalf("%v", err)
+ }
+ log.Fatalf("short write: %d != %d", n, len(bytes))
+ }
+ f.Close()
+ if err := os.Rename(tempName, args[3]); err != nil {
+ log.Fatalf("rename: %v", err)
+ }
+ }
+ }
+ return context.Background()
+ },
+ }
+ log.Fatal(server.ListenAndServe())
+}
diff --git a/tests/sign.bats b/tests/sign.bats
new file mode 100644
index 0000000..a9207d6
--- /dev/null
+++ b/tests/sign.bats
@@ -0,0 +1,97 @@
+#!/usr/bin/env bats
+
+load helpers
+
+function _gpg_setup() {
+ if ! which gpg > /dev/null 2> /dev/null ; then
+ skip 'gpg command not found in $PATH'
+ fi
+
+ export GNUPGHOME=${TEST_SCRATCH_DIR}/.gnupg
+ mkdir -p --mode=0700 $GNUPGHOME
+
+ # gpg on f30 and above needs this, otherwise:
+ # gpg: agent_genkey failed: Inappropriate ioctl for device
+ # ...but gpg on f29 (and, probably, Ubuntu) doesn't grok this
+ GPGOPTS='--pinentry-mode loopback'
+ if gpg --pinentry-mode asdf 2>&1 | grep -qi 'Invalid option'; then
+ GPGOPTS=
+ fi
+
+ cat > ${TEST_SCRATCH_DIR}/genkey-answers <<- EOF
+ %echo Generating a basic OpenPGP key
+ Key-Type: RSA
+ Key-Length: 2048
+ Name-Real: Amanda Lorian
+ Name-Comment: Mandy to her friends
+ Name-Email: amanda@localhost
+ %commit
+ %echo done
+ EOF
+ gpg --batch $GPGOPTS --gen-key --passphrase '' < ${TEST_SCRATCH_DIR}/genkey-answers
+}
+
+
+@test "commit-pull-push-signatures" {
+ _gpg_setup
+ _prefetch alpine
+
+ mkdir -p ${TEST_SCRATCH_DIR}/signed-image ${TEST_SCRATCH_DIR}/unsigned-image
+
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON --sign-by amanda@localhost $cid signed-alpine-image
+
+ # Pushing should preserve the signature.
+ run_buildah push $WITH_POLICY_JSON signed-alpine-image dir:${TEST_SCRATCH_DIR}/signed-image
+ ls -l ${TEST_SCRATCH_DIR}/signed-image/
+ test -s ${TEST_SCRATCH_DIR}/signed-image/signature-1
+
+ # Pushing with --remove-signatures should remove the signature.
+ run_buildah push $WITH_POLICY_JSON --remove-signatures signed-alpine-image dir:${TEST_SCRATCH_DIR}/unsigned-image
+ ls -l ${TEST_SCRATCH_DIR}/unsigned-image/
+ ! test -s ${TEST_SCRATCH_DIR}/unsigned-image/signature-1
+
+ run_buildah commit $WITH_POLICY_JSON $cid unsigned-alpine-image
+ # Pushing with --sign-by should fail add the signature to a dir: location, if it tries to add them.
+ run_buildah 125 push $WITH_POLICY_JSON --sign-by amanda@localhost unsigned-alpine-image dir:${TEST_SCRATCH_DIR}/signed-image
+ expect_output --substring "Cannot determine canonical Docker reference"
+
+ # Clear out images, so that we don't have leftover signatures when we pull in an image that will end up
+ # causing us to merge its contents with the image with the same ID.
+ run_buildah rmi -a -f
+
+ # Pulling with --remove-signatures should remove signatures, and pushing should have none to keep.
+ run_buildah pull $WITH_POLICY_JSON --quiet dir:${TEST_SCRATCH_DIR}/signed-image
+ imageID="$output"
+ run_buildah push $WITH_POLICY_JSON "$imageID" dir:${TEST_SCRATCH_DIR}/unsigned-image
+ ls -l ${TEST_SCRATCH_DIR}/unsigned-image/
+ ! test -s ${TEST_SCRATCH_DIR}/unsigned-image/signature-1
+
+ # Build a manifest list and try to push the list with signatures.
+ run_buildah manifest create list
+ run_buildah manifest add list $imageID
+ run_buildah 125 manifest push $WITH_POLICY_JSON --sign-by amanda@localhost --all list dir:${TEST_SCRATCH_DIR}/signed-image
+ expect_output --substring "Cannot determine canonical Docker reference"
+ run_buildah manifest push $WITH_POLICY_JSON --all list dir:${TEST_SCRATCH_DIR}/unsigned-image
+}
+
+@test "build-with-dockerfile-signatures" {
+ _gpg_setup
+
+ builddir=${TEST_SCRATCH_DIR}/builddir
+ mkdir -p $builddir
+ cat > ${builddir}/Dockerfile <<- EOF
+ FROM scratch
+ ADD Dockerfile /
+ EOF
+
+ # We should be able to sign at build-time.
+ run_buildah bud $WITH_POLICY_JSON --sign-by amanda@localhost -t signed-scratch-image ${builddir}
+
+ mkdir -p ${TEST_SCRATCH_DIR}/signed-image
+ # Pushing should preserve the signature.
+ run_buildah push $WITH_POLICY_JSON signed-scratch-image dir:${TEST_SCRATCH_DIR}/signed-image
+ ls -l ${TEST_SCRATCH_DIR}/signed-image/
+ test -s ${TEST_SCRATCH_DIR}/signed-image/signature-1
+}
diff --git a/tests/source.bats b/tests/source.bats
new file mode 100644
index 0000000..71054cc
--- /dev/null
+++ b/tests/source.bats
@@ -0,0 +1,156 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "source create" {
+ # Create an empty source image and make sure it's properly initialized
+ srcdir=${TEST_SCRATCH_DIR}/newsource
+ run_buildah source create --author="Buildah authors" $srcdir
+
+ # Inspect the index.json
+ run jq -r .manifests[0].mediaType $srcdir/index.json
+ expect_output "application/vnd.oci.image.manifest.v1+json"
+ run jq -r .mediaType $srcdir/index.json
+ expect_output null # TODO: common#839 will change this to "application/vnd.oci.image.index.v1+json"
+ # Digest of manifest
+ run jq -r .manifests[0].digest $srcdir/index.json
+ manifestDigest=${output//sha256:/} # strip off the sha256 prefix
+ run stat $srcdir/blobs/sha256/$manifestDigest
+ assert "$status" -eq 0 "status of stat(manifestDigest)"
+
+ # Inspect the manifest
+ run jq -r .schemaVersion $srcdir/blobs/sha256/$manifestDigest
+ expect_output "2"
+ run jq -r .layers $srcdir/blobs/sha256/$manifestDigest
+ expect_output "null"
+ run jq -r .config.mediaType $srcdir/blobs/sha256/$manifestDigest
+ expect_output "application/vnd.oci.source.image.config.v1+json"
+ run jq -r .mediaType $srcdir/blobs/sha256/$manifestDigest
+ expect_output "application/vnd.oci.image.manifest.v1+json"
+ run jq -r .config.size $srcdir/blobs/sha256/$manifestDigest
+ # let's not check the size (afraid of time-stamp impacts)
+ assert "$status" -eq 0 "status of jq .config.size"
+ # Digest of config
+ run jq -r .config.digest $srcdir/blobs/sha256/$manifestDigest
+ configDigest=${output//sha256:/} # strip off the sha256 prefix
+ run stat $srcdir/blobs/sha256/$configDigest
+ assert "$status" -eq 0 "status of stat(configDigest)"
+
+ # Inspect the config
+ run jq -r .created $srcdir/blobs/sha256/$configDigest
+ assert "$status" -eq 0 "status of jq .created on configDigest"
+ creatd=$output
+ run date --date="$output"
+ assert "$status" -eq 0 "status of date (this should never ever fail)"
+ run jq -r .author $srcdir/blobs/sha256/$configDigest
+ expect_output "Buildah authors"
+
+ # Directory mustn't exist
+ run_buildah 125 source create $srcdir
+ expect_output --substring "creating source image: "
+ expect_output --substring " already exists"
+}
+
+@test "source add" {
+ # Create an empty source image and make sure it's properly initialized.
+ srcdir=${TEST_SCRATCH_DIR}/newsource
+ run_buildah source create $srcdir
+
+ # Digest of initial manifest
+ run jq -r .manifests[0].digest $srcdir/index.json
+ manifestDigestEmpty=${output//sha256:/} # strip off the sha256 prefix
+ run stat $srcdir/blobs/sha256/$manifestDigestEmpty
+ assert "$status" -eq 0 "status of stat(manifestDigestEmpty)"
+
+ # Add layer 1
+ echo 111 > ${TEST_SCRATCH_DIR}/file1
+ run_buildah source add $srcdir ${TEST_SCRATCH_DIR}/file1
+ # Make sure the digest of the manifest changed
+ run jq -r .manifests[0].digest $srcdir/index.json
+ manifestDigestFile1=${output//sha256:/} # strip off the sha256 prefix
+ assert "$manifestDigestEmpty" != "$manifestDigestFile1" \
+ "manifestDigestEmpty should differ from manifestDigestFile1"
+
+ # Inspect layer 1
+ run jq -r .layers[0].mediaType $srcdir/blobs/sha256/$manifestDigestFile1
+ expect_output "application/vnd.oci.image.layer.v1.tar+gzip"
+ run jq -r .layers[0].digest $srcdir/blobs/sha256/$manifestDigestFile1
+ layer1Digest=${output//sha256:/} # strip off the sha256 prefix
+ # Now make sure the reported size matches the actual one
+ run jq -r .layers[0].size $srcdir/blobs/sha256/$manifestDigestFile1
+ assert "$status" -eq 0 "status of jq .layers[0].size on manifestDigestFile1"
+ layer1Size=$output
+ run du -b $srcdir/blobs/sha256/$layer1Digest
+ expect_output --substring "$layer1Size"
+
+ # Add layer 2
+ echo 222222aBitLongerForAdifferentSize > ${TEST_SCRATCH_DIR}/file2
+ run_buildah source add $srcdir ${TEST_SCRATCH_DIR}/file2
+ # Make sure the digest of the manifest changed
+ run jq -r .manifests[0].digest $srcdir/index.json
+ manifestDigestFile2=${output//sha256:/} # strip off the sha256 prefix
+ assert "$manifestDigestEmpty" != "$manifestDigestFile2" \
+ "manifestDigestEmpty should differ from manifestDigestFile2"
+ assert "$manifestDigestFile1" != "$manifestDigestFile2" \
+ "manifestDigestFile1 should differ from manifestDigestFile2"
+
+ # Make sure layer 1 is still in the manifest and remains unchanged
+ run jq -r .layers[0].digest $srcdir/blobs/sha256/$manifestDigestFile2
+ expect_output "sha256:$layer1Digest"
+ run jq -r .layers[0].size $srcdir/blobs/sha256/$manifestDigestFile2
+ expect_output "$layer1Size"
+
+ # Inspect layer 2
+ run jq -r .layers[1].mediaType $srcdir/blobs/sha256/$manifestDigestFile2
+ expect_output "application/vnd.oci.image.layer.v1.tar+gzip"
+ run jq -r .layers[1].digest $srcdir/blobs/sha256/$manifestDigestFile2
+ layer2Digest=${output//sha256:/} # strip off the sha256 prefix
+ # Now make sure the reported size matches the actual one
+ run jq -r .layers[1].size $srcdir/blobs/sha256/$manifestDigestFile2
+ assert "$status" -eq 0 "status of jq .layers[1].size on manifestDigestFile2"
+ layer2Size=$output
+ run du -b $srcdir/blobs/sha256/$layer2Digest
+ expect_output --substring "$layer2Size"
+
+ # Last but not least, make sure the two layers differ
+ assert "$layer1Digest" != "$layer2Digest" "layer1Digest vs layer2Digest"
+ assert "$layer1Size" != "$layer2Size" "layer1Size vs layer2Size"
+}
+
+@test "source push/pull" {
+ # Create an empty source image and make sure it's properly initialized.
+ srcdir=${TEST_SCRATCH_DIR}/newsource
+ run_buildah source create $srcdir
+
+ # Add two layers
+ echo 111 > ${TEST_SCRATCH_DIR}/file1
+ run_buildah source add $srcdir ${TEST_SCRATCH_DIR}/file1
+ echo 222... > ${TEST_SCRATCH_DIR}/file2
+ run_buildah source add $srcdir ${TEST_SCRATCH_DIR}/file2
+
+ start_registry
+
+ # --quiet=true
+ run_buildah source push --quiet --tls-verify=false --creds testuser:testpassword $srcdir localhost:${REGISTRY_PORT}/source:test
+ expect_output ""
+ # --quiet=false (implicit)
+ run_buildah source push --tls-verify=false --creds testuser:testpassword $srcdir localhost:${REGISTRY_PORT}/source:test
+ expect_output --substring "Copying blob"
+ expect_output --substring "Copying config"
+
+ pulldir=${TEST_SCRATCH_DIR}/pulledsource
+ # --quiet=true
+ run_buildah source pull --quiet --tls-verify=false --creds testuser:testpassword localhost:${REGISTRY_PORT}/source:test $pulldir
+ expect_output ""
+ # --quiet=false (implicit)
+ rm -rf $pulldir
+ run_buildah source pull --tls-verify=false --creds testuser:testpassword localhost:${REGISTRY_PORT}/source:test $pulldir
+ expect_output --substring "Copying blob"
+ expect_output --substring "Copying config"
+
+ run diff -r $srcdir $pulldir
+ # FIXME: if there's a nonzero chance of this failing, include actual diffs
+ assert "$status" -eq 0 "status from diff of srcdir vs pulldir"
+
+ stop_registry
+}
diff --git a/tests/squash.bats b/tests/squash.bats
new file mode 100644
index 0000000..3560866
--- /dev/null
+++ b/tests/squash.bats
@@ -0,0 +1,170 @@
+#!/usr/bin/env bats
+
+load helpers
+
+
+function check_lengths() {
+ local image=$1
+ local expect=$2
+
+ # matrix test: check given .Docker.* and .OCIv1.* fields in image
+ for which in Docker OCIv1; do
+ for field in RootFS.DiffIDs History; do
+ run_buildah inspect -t image -f "{{len .$which.$field}}" $image
+ expect_output "$expect"
+ done
+ done
+}
+
+@test "squash" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ run_buildah from scratch
+ cid=$output
+ image=stage0
+ remove=(8 5)
+ for stage in $(seq 10) ; do
+ run_buildah copy "$cid" ${TEST_SCRATCH_DIR}/randomfile /layer${stage}
+ image=stage${stage}
+ if test $stage -eq ${remove[0]} ; then
+ run_buildah mount "$cid"
+ mountpoint=$output
+ rm -f ${mountpoint}/layer${remove[1]}
+ fi
+ run_buildah commit $WITH_POLICY_JSON --rm "$cid" ${image}
+ check_lengths $image $stage
+ run_buildah from --quiet ${image}
+ cid=$output
+ done
+ run_buildah commit $WITH_POLICY_JSON --rm --squash "$cid" squashed
+
+ check_lengths squashed 1
+
+ run_buildah from --quiet squashed
+ cid=$output
+ run_buildah mount $cid
+ mountpoint=$output
+ for stage in $(seq 10) ; do
+ if test $stage -eq ${remove[1]} ; then
+ if test -e $mountpoint/layer${remove[1]} ; then
+ echo file /layer${remove[1]} should not be there
+ exit 1
+ fi
+ continue
+ fi
+ cmp $mountpoint/layer${stage} ${TEST_SCRATCH_DIR}/randomfile
+ done
+}
+
+@test "squash-using-dockerfile" {
+ createrandom ${TEST_SCRATCH_DIR}/randomfile
+ image=stage0
+ from=scratch
+ for stage in $(seq 10) ; do
+ mkdir -p ${TEST_SCRATCH_DIR}/stage${stage}
+ echo FROM ${from} > ${TEST_SCRATCH_DIR}/stage${stage}/Dockerfile
+ cp ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/stage${stage}/
+ echo COPY randomfile /layer${stage} >> ${TEST_SCRATCH_DIR}/stage${stage}/Dockerfile
+ image=stage${stage}
+ from=${image}
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON -t ${image} ${TEST_SCRATCH_DIR}/stage${stage}
+ check_lengths $image $stage
+ done
+
+ mkdir -p ${TEST_SCRATCH_DIR}/squashed
+ echo FROM ${from} > ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ cp ${TEST_SCRATCH_DIR}/randomfile ${TEST_SCRATCH_DIR}/squashed/
+ echo COPY randomfile /layer-squashed >> ${TEST_SCRATCH_DIR}/stage${stage}/Dockerfile
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+
+ check_lengths squashed 1
+
+ run_buildah from --quiet squashed
+ cid=$output
+ run_buildah mount $cid
+ mountpoint=$output
+ for stage in $(seq 10) ; do
+ cmp $mountpoint/layer${stage} ${TEST_SCRATCH_DIR}/randomfile
+ done
+
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash --layers -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - simple image"
+
+ echo FROM ${from} > ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM"
+
+ echo USER root >> ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM and USER"
+
+ echo COPY file / >> ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ echo COPY file / > ${TEST_SCRATCH_DIR}/squashed/file
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM, USER, and 2xCOPY"
+
+ echo FROM ${from} > ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash --layers -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM (--layers)"
+
+ echo USER root >> ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM and USER (--layers)"
+
+ echo COPY file / >> ${TEST_SCRATCH_DIR}/squashed/Dockerfile
+ echo COPY file / > ${TEST_SCRATCH_DIR}/squashed/file
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
+ expect_output "1" "len(DiffIDs) - image with FROM, USER, and 2xCOPY (--layers)"
+
+ run_buildah build-using-dockerfile $WITH_POLICY_JSON --squash --format docker -t squashed ${TEST_SCRATCH_DIR}/squashed
+ run_buildah inspect -t image -f '{{.Docker.Parent}}' squashed
+ expect_output "" "should have no parent image set"
+}
+
+
+@test "bud-squash-should-use-cache" {
+ _prefetch alpine
+ # populate cache from simple build
+ run_buildah build --layers -t test $WITH_POLICY_JSON -f $BUDFILES/layers-squash/Dockerfile.multi-stage
+ # create another squashed build and check if we are using cache for everything.
+ # instead of last instruction in last stage
+ run_buildah build --layers --squash -t testsquash $WITH_POLICY_JSON -f $BUDFILES/layers-squash/Dockerfile.multi-stage
+ expect_output --substring "Using cache"
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' testsquash
+ expect_output "1" "image built with --squash should only include 1 layer"
+ run_buildah rmi -f testsquash
+ run_buildah rmi -f test
+}
+
+# Test build with --squash and --layers and verify number of layers and content inside image
+@test "bud-squash-should-use-cache and verify content inside image" {
+ mkdir -p ${TEST_SCRATCH_DIR}/bud/platform
+
+ cat > ${TEST_SCRATCH_DIR}/bud/platform/Dockerfile << _EOF
+FROM busybox
+RUN touch hello
+ADD . /data
+RUN echo hey && mkdir water
+_EOF
+
+ # Build a first image with --layers and --squash and populate build cache
+ run_buildah build $WITH_POLICY_JSON --squash --layers -t one -f ${TEST_SCRATCH_DIR}/bud/platform/Dockerfile ${TEST_SCRATCH_DIR}/bud/platform
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' one
+ expect_output "1" "image built with --squash should only include 1 layer"
+ # Build again and verify if cache is being used
+ run_buildah build $WITH_POLICY_JSON --squash --layers -t two -f ${TEST_SCRATCH_DIR}/bud/platform/Dockerfile ${TEST_SCRATCH_DIR}/bud/platform
+ expect_output --substring "Using cache"
+ run_buildah inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' two
+ expect_output "1" "image built with --squash should only include 1 layer"
+ run_buildah from two
+ run_buildah run two-working-container ls
+ expect_output --substring "water"
+ expect_output --substring "data"
+ expect_output --substring "hello"
+}
diff --git a/tests/ssh.bats b/tests/ssh.bats
new file mode 100644
index 0000000..e696a37
--- /dev/null
+++ b/tests/ssh.bats
@@ -0,0 +1,77 @@
+#!/usr/bin/env bats
+
+load helpers
+
+
+function setup() {
+ setup_tests
+ unset SSH_AUTH_SOCK
+}
+
+function teardown(){
+ if [[ -n "$SSH_AUTH_SOCK" ]]; then ssh-agent -k;fi
+ teardown_tests
+}
+
+@test "bud with ssh key" {
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ ssh-keygen -b 2048 -t rsa -f $mytmpdir/sshkey -q -N ""
+ fingerprint=$(ssh-keygen -l -f $mytmpdir/sshkey -E md5 | awk '{ print $2; }')
+
+ run_buildah bud --ssh default=$mytmpdir/sshkey $WITH_POLICY_JSON -t sshimg -f $BUDFILES/run-mounts/Dockerfile.ssh $BUDFILES/run-mounts
+ expect_output --substring $fingerprint
+
+ run_buildah from sshimg
+ run_buildah 1 run sshimg-working-container cat /run/buildkit/ssh_agent.0
+ expect_output --substring "cat: can't open '/run/buildkit/ssh_agent.0': No such file or directory"
+ run_buildah rm -a
+}
+
+@test "bud with ssh key secret accessed on second RUN" {
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ ssh-keygen -b 2048 -t rsa -f $mytmpdir/sshkey -q -N ""
+ fingerprint=$(ssh-keygen -l -f $mytmpdir/sshkey -E md5 | awk '{ print $2; }')
+
+ run_buildah 2 bud --ssh default=$mytmpdir/sshkey $WITH_POLICY_JSON -t sshimg -f $BUDFILES/run-mounts/Dockerfile.ssh_access $BUDFILES/run-mounts
+ expect_output --substring "Could not open a connection to your authentication agent."
+}
+
+@test "bud with containerfile ssh options" {
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ ssh-keygen -b 2048 -t rsa -f $mytmpdir/sshkey -q -N ""
+ fingerprint=$(ssh-keygen -l -f $mytmpdir/sshkey -E md5 | awk '{ print $2; }')
+
+ run_buildah bud --ssh default=$mytmpdir/sshkey $WITH_POLICY_JSON -t secretopts -f $BUDFILES/run-mounts/Dockerfile.ssh_options $BUDFILES/run-mounts
+ expect_output --substring "444"
+ expect_output --substring "1000"
+ expect_output --substring "1001"
+}
+
+@test "bud with ssh sock" {
+ _prefetch alpine
+
+ mytmpdir=${TEST_SCRATCH_DIR}/my-dir1
+ mkdir -p ${mytmpdir}
+ ssh-keygen -b 2048 -t rsa -f $mytmpdir/sshkey -q -N ""
+ fingerprint=$(ssh-keygen -l -f $mytmpdir/sshkey -E md5 | awk '{ print $2; }')
+ eval "$(ssh-agent -s)"
+ ssh-add $mytmpdir/sshkey
+
+ run_buildah bud --ssh default=$mytmpdir/sshkey $WITH_POLICY_JSON -t sshimg -f $BUDFILES/run-mounts/Dockerfile.ssh $BUDFILES/run-mounts
+ expect_output --substring $fingerprint
+
+ run_buildah from sshimg
+ run_buildah 1 run sshimg-working-container cat /run/buildkit/ssh_agent.0
+ expect_output --substring "cat: can't open '/run/buildkit/ssh_agent.0': No such file or directory"
+ run_buildah rm -a
+}
+
diff --git a/tests/subscriptions.bats b/tests/subscriptions.bats
new file mode 100644
index 0000000..c639eb9
--- /dev/null
+++ b/tests/subscriptions.bats
@@ -0,0 +1,86 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "bind secrets mounts to container" {
+ skip_if_no_runtime
+
+ # Setup
+ SECRETS_DIR=$TEST_SCRATCH_DIR/rhel/secrets
+ mkdir -p $SECRETS_DIR
+
+ TESTFILE1=$SECRETS_DIR/test.txt
+ TESTFILE_CONTENT="Testing secrets mounts. I am mounted!"
+ echo $TESTFILE_CONTENT > $TESTFILE1
+
+ TESTFILE2=$SECRETS_DIR/file.txt
+ touch $TESTFILE2
+ chmod 604 $TESTFILE2
+
+ TEST_SCRATCH_DIR1=$SECRETS_DIR/test-dir
+ mkdir -m704 $TEST_SCRATCH_DIR1
+
+ TESTFILE3=$TEST_SCRATCH_DIR1/file.txt
+ touch $TESTFILE3
+ chmod 777 $TESTFILE3
+
+ mkdir -p $TEST_SCRATCH_DIR/symlink/target
+ touch $TEST_SCRATCH_DIR/symlink/target/key.pem
+ ln -s $TEST_SCRATCH_DIR/symlink/target $SECRETS_DIR/mysymlink
+
+ # prepare the test mounts file
+ mkdir $TEST_SCRATCH_DIR/containers
+ MOUNTS_PATH=$TEST_SCRATCH_DIR/containers/mounts.conf
+
+ # add the mounts entries
+ echo "$SECRETS_DIR:/run/secrets" > $MOUNTS_PATH
+ echo "$SECRETS_DIR" >> $MOUNTS_PATH
+ echo "$TESTFILE1:/test.txt" >> $MOUNTS_PATH
+
+
+ # setup the test container
+ _prefetch alpine
+ run_buildah --default-mounts-file "$MOUNTS_PATH" \
+ from --quiet --pull $WITH_POLICY_JSON alpine
+ cid=$output
+
+ # test a standard mount to /run/secrets
+ run_buildah run $cid ls /run/secrets
+ expect_output --substring "test.txt"
+
+ # test a mount without destination
+ run_buildah run $cid ls "$TEST_SCRATCH_DIR"/rhel/secrets
+ expect_output --substring "test.txt"
+
+ # test a file-based mount
+ run_buildah run $cid cat /test.txt
+ expect_output "$TESTFILE_CONTENT"
+
+ # test permissions for a file-based mount
+ run_buildah run $cid stat -c %a /run/secrets/file.txt
+ expect_output 604
+
+ # test permissions for a directory-based mount
+ run_buildah run $cid stat -c %a /run/secrets/test-dir
+ expect_output 704
+
+ # test permissions for a file-based mount within a sub-directory
+ run_buildah run $cid stat -c %a /run/secrets/test-dir/file.txt
+ expect_output 777
+
+ cat > $TEST_SCRATCH_DIR/Containerfile << _EOF
+from alpine
+run stat -c %a /run/secrets/file.txt
+run stat -c %a /run/secrets/test-dir
+run stat -c %a /run/secrets/test-dir/file.txt
+_EOF
+
+ run_buildah --default-mounts-file "$MOUNTS_PATH" bud $TEST_SCRATCH_DIR
+ expect_output --substring "604"
+ expect_output --substring "704"
+ expect_output --substring "777"
+
+ # test a symlink
+ run_buildah run $cid ls /run/secrets/mysymlink
+ expect_output --substring "key.pem"
+}
diff --git a/tests/tag.bats b/tests/tag.bats
new file mode 100644
index 0000000..78f3ce9
--- /dev/null
+++ b/tests/tag.bats
@@ -0,0 +1,42 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "tag by name" {
+ run_buildah from --pull=false $WITH_POLICY_JSON scratch
+ cid=$output
+ run_buildah commit $WITH_POLICY_JSON "$cid" scratch-image
+ run_buildah 125 inspect --type image tagged-image
+ run_buildah tag scratch-image tagged-image tagged-also-image named-image
+ run_buildah inspect --type image tagged-image
+ run_buildah inspect --type image tagged-also-image
+ run_buildah inspect --type image named-image
+}
+
+@test "tag by id" {
+ _prefetch busybox
+ run_buildah pull --quiet $WITH_POLICY_JSON busybox
+ id=$output
+
+ # Tag by ID, then make a container from that tag
+ run_buildah tag $id busybox1
+ run_buildah from busybox1 # gives us busybox1-working-container
+
+ # The from-name should be busybox1, but ID should be same as pulled image
+ run_buildah inspect --format '{{ .FromImage }}' busybox1-working-container
+ expect_output "localhost/busybox1:latest"
+ run_buildah inspect --format '{{ .FromImageID }}' busybox1-working-container
+ expect_output $id
+}
+
+# Tagging a manifest list should tag manifest list instead of resolved image
+@test "tag a manifest list" {
+ run_buildah manifest create foobar
+ run_buildah manifest add foobar busybox
+ run_buildah tag foobar foobar2
+ run_buildah manifest inspect foobar
+ foobar_inspect=$output
+ run_buildah manifest inspect foobar2
+ # Output of tagged manifest list should be same
+ expect_output "$foobar_inspect"
+}
diff --git a/tests/test_buildah_authentication.sh b/tests/test_buildah_authentication.sh
new file mode 100755
index 0000000..9cd04f2
--- /dev/null
+++ b/tests/test_buildah_authentication.sh
@@ -0,0 +1,238 @@
+#!/usr/bin/env bash
+# test_buildah_authentication
+# A script to be run at the command line with Buildah installed.
+# This currently needs to be run as root and Docker must be
+# installed on the system.
+# This will test the code and should be run with this command:
+#
+# /bin/bash -v test_buildah_authentication.sh
+
+########
+# System setup - Create dir for creds and start Docker
+########
+mkdir -p /root/auth
+systemctl restart docker
+
+########
+# Create creds and store in /root/auth/htpasswd
+########
+registry=$(buildah from registry:2)
+buildah run $registry -- htpasswd -Bbn testuser testpassword > /root/auth/htpasswd
+
+########
+# Create certificate via openssl
+########
+openssl req -newkey rsa:4096 -nodes -sha256 -keyout /root/auth/domain.key -x509 -days 2 -out /root/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
+
+########
+# Skopeo and buildah both require *.cert file
+########
+cp /root/auth/domain.crt /root/auth/domain.cert
+
+########
+# Create a private registry that uses certificate and creds file
+########
+docker run -d -p 5000:5000 --name registry -v /root/auth:/root/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/root/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/root/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/root/auth/domain.key registry:2
+
+########
+# Pull alpine
+########
+buildah from alpine
+
+buildah containers
+
+buildah images
+
+########
+# Log into docker on local repo
+########
+docker login localhost:5000 --username testuser --password testpassword
+
+########
+# Push to the local repo using cached Docker creds.
+########
+buildah push --cert-dir /root/auth alpine docker://localhost:5000/my-alpine
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Buildah from (pull) using certs and cached Docker creds.
+# Should show two alpine images and containers when done.
+########
+ctrid=$(buildah from --cert-dir /root/auth localhost:5000/my-alpine)
+
+buildah containers
+
+buildah images
+
+########
+# Clean up Buildah
+########
+buildah rm $ctrid
+buildah rmi -f localhost:5000/my-alpine:latest
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Log out of local repo
+########
+docker logout localhost:5000
+
+########
+# Push using only certs, this should FAIL.
+########
+buildah push --cert-dir /root/auth --tls-verify=true alpine docker://localhost:5000/my-alpine
+
+########
+# Push using creds, certs and no transport (docker://), this should work.
+########
+buildah push --cert-dir ~/auth --tls-verify=true --creds=testuser:testpassword alpine localhost:5000/my-alpine
+
+########
+# Push using a bad password , this should FAIL.
+########
+buildah push --cert-dir ~/auth --tls-verify=true --creds=testuser:badpassword alpine localhost:5000/my-alpine
+
+########
+# No creds anywhere, only the certificate, this should FAIL.
+########
+buildah from --cert-dir /root/auth --tls-verify=true localhost:5000/my-alpine
+
+########
+# From with creds and certs, this should work
+########
+ctrid=$(buildah from --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword localhost:5000/my-alpine)
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Clean up Buildah
+########
+buildah rm $ctrid
+buildah rmi -f $(buildah images -q)
+
+########
+# Pull alpine
+########
+buildah from alpine
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Let's test commit
+########
+
+########
+# No credentials, this should FAIL.
+########
+buildah commit --cert-dir /root/auth --tls-verify=true alpine-working-container docker://localhost:5000/my-commit-alpine
+
+########
+# This should work, writing image in registry. Will not create an image locally.
+########
+buildah commit --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword alpine-working-container docker://localhost:5000/my-commit-alpine
+
+########
+# Use bad password on from/pull, this should FAIL
+########
+buildah from --pull-always --cert-dir /root/auth --tls-verify=true --creds=testuser:badpassword localhost:5000/my-commit-alpine
+
+########
+# Pull the new image that we just committed
+########
+buildah from --pull-always --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword localhost:5000/my-commit-alpine
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Create Dockerfile
+########
+FILE=./Dockerfile
+/bin/cat <<EOM >$FILE
+FROM localhost:5000/my-commit-alpine
+EOM
+chmod +x $FILE
+
+########
+# Clean up Buildah
+########
+buildah rm --all
+buildah rmi -f $(buildah images -q)
+
+########
+# Try Buildah bud with creds but no auth, this should FAIL
+########
+buildah bud -f ./Dockerfile --tls-verify=true --creds=testuser:testpassword
+
+########
+# Try Buildah bud with creds and auth, this should work
+########
+buildah bud -f ./Dockerfile --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword
+
+########
+# Show stuff
+########
+docker ps --all
+
+docker images
+
+buildah containers
+
+buildah images
+
+########
+# Clean up
+########
+read -p "Press enter to continue and clean up all"
+
+rm -f ./Dockerfile
+rm -rf ${TESTDIR}/auth
+docker rm -f $(docker ps --all -q)
+docker rmi -f $(docker images -q)
+buildah rm $(buildah containers -q)
+buildah rmi -f $(buildah images -q)
diff --git a/tests/test_buildah_baseline.sh b/tests/test_buildah_baseline.sh
new file mode 100755
index 0000000..cdec939
--- /dev/null
+++ b/tests/test_buildah_baseline.sh
@@ -0,0 +1,244 @@
+#!/usr/bin/env bash
+# test_buildah_baseline.sh
+# A script to be run at the command line with Buildah installed.
+# This should be run against a new kit to provide base level testing
+# on a freshly installed machine with no images or containers in
+# play. This currently needs to be run as root.
+#
+# Commands based on the tutorial provided by William Henry.
+#
+# To run this command:
+#
+# /bin/bash -v test_buildah_baseline.sh
+
+########
+# Next two commands should return blanks
+########
+buildah images
+buildah containers
+
+########
+# Run ls in redis container, this should work
+########
+ctrid=$(buildah from registry.access.redhat.com/rhscl/redis-32-rhel7)
+buildah run $ctrid ls /
+
+
+########
+# Validate touch works after installing httpd, solved selinux
+# issue that should now work.
+########
+ctr=$(buildah from scratch)
+mnt=$(buildah mount $ctr)
+dnf -y install --installroot=$mnt --releasever=30 httpd
+buildah run $ctr touch /test
+
+########
+# Create Fedora based container
+########
+container=$(buildah from fedora)
+echo $container
+
+########
+# Run container and display contents in /etc
+########
+buildah run $container -- ls -alF /etc
+
+########
+# Run Java in the container - should FAIL
+########
+buildah run $container java
+
+########
+# Install java onto the container
+########
+buildah run $container -- dnf -y install java
+
+########
+# Run Java in the container - should show java usage
+########
+buildah run $container java
+
+########
+# Create a scratch container
+########
+newcontainer=$(buildah from scratch)
+
+########
+# Check and find two containers
+########
+buildah containers
+
+########
+# Check images, no "scratch" image
+########
+buildah images
+
+########
+# Run the container - should FAIL
+########
+buildah run $newcontainer bash
+
+########
+# Mount the container's root file system
+########
+scratchmnt=$(buildah mount $newcontainer)
+
+########
+# Show the location, should be /var/lib/containers/storage/overlay/{id}/dif
+########
+echo $scratchmnt
+
+########
+# Install Fedora 30 bash and coreutils
+########
+dnf install --installroot $scratchmnt --release 30 bash coreutils --setopt install_weak_deps=false -y
+
+########
+# Check /usr/bin on the new container
+########
+buildah run $newcontainer -- ls -alF /usr/bin
+
+########
+# Create shell script to test on
+########
+FILE=./runecho.sh
+/bin/cat <<EOM >$FILE
+#!/usr/bin/env bash
+for i in {1..9};
+do
+ echo "This is a new container from ipbabble [" \$i "]"
+done
+EOM
+chmod +x $FILE
+
+########
+# Copy and run file on scratch container
+########
+buildah copy $newcontainer $FILE /usr/bin
+buildah config --cmd /usr/bin/runecho.sh $newcontainer
+buildah run $newcontainer /usr/bin/runecho.sh
+
+########
+# Add configuration information
+########
+buildah config --created-by "ipbabble" $newcontainer
+buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora30-bashecho $newcontainer
+
+########
+# Inspect the container, verifying above was put into it
+########
+buildah inspect $newcontainer
+
+########
+# Unmount the container
+########
+buildah unmount $newcontainer
+
+########
+# Commit the image
+########
+buildah commit $newcontainer fedora-bashecho
+
+########
+# Check the images there should be a fedora-bashecho:latest image
+########
+buildah images
+
+########
+# Inspect the fedora-bashecho image
+########
+buildah inspect --type=image fedora-bashecho
+
+########
+# Remove the container
+########
+buildah rm $newcontainer
+
+########
+# Install Docker, but not for long!
+########
+dnf -y install docker
+systemctl start docker
+
+########
+# Push fedora-bashecho to the Docker daemon
+########
+buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
+
+########
+# Run fedora-bashecho from Docker
+########
+docker run fedora-bashecho
+
+########
+# Time to remove Docker
+########
+dnf -y remove docker
+
+########
+# Build Dockerfiles for OnBuild Test
+# (Thanks @clcollins!)
+########
+FILE=./Dockerfile
+/bin/cat <<EOM >$FILE
+FROM alpine
+RUN touch /foo
+ONBUILD RUN touch /bar
+EOM
+chmod +x $FILE
+
+FILE=./Dockerfile-2
+/bin/cat <<EOM >$FILE
+FROM onbuild-image
+RUN touch /baz
+EOM
+chmod +x $FILE
+
+########
+# Build with Dockerfiles
+########
+buildah bud -f ./Dockerfile --format=docker -t onbuild-image .
+buildah bud -f ./Dockerfile-2 --format=docker -t result-image .
+
+########
+# Build a container to see if the /bar file has been created.
+########
+ctr=$(buildah from result-image)
+
+########
+# Validate that the /bar file has been created in the container.
+########
+buildah run $ctr ls -alF /bar /foo /baz
+
+########
+# Build Dockerfile for WhaleSays
+########
+FILE=./Dockerfile
+/bin/cat <<EOM >$FILE
+FROM docker/whalesay:latest
+RUN apt-get -y update && apt-get install -y fortunes
+CMD /usr/games/fortune -a | cowsay
+EOM
+chmod +x $FILE
+
+########
+# Build with the Dockerfile
+########
+buildah bud -f Dockerfile -t whale-says .
+
+########
+# Create a whalesays container
+########
+whalesays=$(buildah from whale-says)
+
+########
+# Run the container to see what the whale says
+########
+buildah run $whalesays bash -c '/usr/games/fortune -a | cowsay'
+
+########
+# Clean up Buildah
+########
+buildah rm --all
+buildah rmi --all
diff --git a/tests/test_buildah_build_rpm.sh b/tests/test_buildah_build_rpm.sh
new file mode 100755
index 0000000..6b5834b
--- /dev/null
+++ b/tests/test_buildah_build_rpm.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+#
+# test_buildah_build_rpm.sh
+#
+# Meant to run on a freshly installed VM.
+# Installs the latest Git and Buildah and then
+# Builds and installs Buildah's RPM in a Buildah Container.
+# The baseline test is then run on this vm and then the
+# newly created BUILDAH rpm is installed and the baseline
+# test is rerun.
+#
+
+########
+# Setup
+########
+IMAGE=registry.fedoraproject.org/fedora
+SBOX=/tmp/sandbox
+PACKAGES=/tmp/packages
+mkdir -p ${SBOX}/buildah
+GITROOT=${SBOX}/buildah
+TEST_SOURCES=${GITROOT}/tests
+
+# Change packager as appropriate for the platform
+PACKAGER=dnf
+
+${PACKAGER} install -y git
+${PACKAGER} install -y buildah
+
+########
+# Clone buildah from GitHub.com
+########
+cd $SBOX
+git clone https://github.com/containers/buildah.git
+cd $GITROOT
+
+########
+# Build a container to use for building the binaries.
+########
+CTRID=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $IMAGE)
+ROOTMNT=$(buildah mount $CTRID)
+COMMIT=$(git log --format=%H -n 1)
+SHORTCOMMIT=$(echo ${COMMIT} | cut -c-7)
+mkdir -p ${ROOTMNT}/rpmbuild/{SOURCES,SPECS}
+
+########
+# Build the tarball.
+########
+(git archive --format tar.gz --prefix=buildah-${COMMIT}/ ${COMMIT}) > ${ROOTMNT}/rpmbuild/SOURCES/buildah-${SHORTCOMMIT}.tar.gz
+
+########
+# Update the .spec file with the commit ID.
+########
+sed s:REPLACEWITHCOMMITID:${COMMIT}:g ${GITROOT}/contrib/rpm/buildah.spec > ${ROOTMNT}/rpmbuild/SPECS/buildah.spec
+
+########
+# Install build dependencies and build binary packages.
+########
+buildah run $CTRID -- dnf -y install 'dnf-command(builddep)' rpm-build
+buildah run $CTRID -- dnf -y builddep --spec /rpmbuild/SPECS/buildah.spec
+buildah run $CTRID -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
+
+########
+# Build a second new container.
+########
+CTRID2=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $IMAGE)
+ROOTMNT2=$(buildah mount $CTRID2)
+
+########
+# Copy the binary packages from the first container to the second one and to
+# /tmp. Also build a list of their filenames.
+########
+rpms=
+mkdir -p ${ROOTMNT2}/${PACKAGES}
+mkdir -p ${PACKAGES}
+for rpm in ${ROOTMNT}/rpmbuild/RPMS/*/*.rpm ; do
+ cp $rpm ${ROOTMNT2}/${PACKAGES}
+ cp $rpm ${PACKAGES}
+ rpms="$rpms "${PACKAGES}/$(basename $rpm)
+done
+
+########
+# Install the binary packages into the second container.
+########
+buildah run $CTRID2 -- dnf -y install $rpms
+
+########
+# Run the binary package and compare its self-identified version to the one we tried to build.
+########
+id=$(buildah run $CTRID2 -- buildah version | awk '/^Git Commit:/ { print $NF }')
+bv=$(buildah run $CTRID2 -- buildah version | awk '/^Version:/ { print $NF }')
+rv=$(buildah run $CTRID2 -- rpm -q --queryformat '%{version}' buildah)
+echo "short commit: $SHORTCOMMIT"
+echo "id: $id"
+echo "buildah version: $bv"
+echo "buildah rpm version: $rv"
+test $SHORTCOMMIT = $id
+test $bv = $rv
+
+########
+# Clean up Buildah
+########
+buildah rm $(buildah containers -q)
+buildah rmi -f $(buildah images -q)
+
+########
+# Kick off baseline testing against the installed Buildah
+########
+/bin/bash -v ${TEST_SOURCES}/test_buildah_baseline.sh
+
+########
+# Install the Buildah we just built locally and run
+# the baseline tests again.
+########
+${PACKAGER} -y install ${PACKAGES}/*.rpm
+/bin/bash -v ${TEST_SOURCES}/test_buildah_baseline.sh
+
+########
+# Clean up
+########
+rm -rf ${SBOX}
+rm -rf ${PACKAGES}
+buildah rm $(buildah containers -q)
+buildah rmi -f $(buildah images -q)
+${PACKAGER} remove -y buildah
diff --git a/tests/test_buildah_rpm.sh b/tests/test_buildah_rpm.sh
new file mode 100644
index 0000000..71c616a
--- /dev/null
+++ b/tests/test_buildah_rpm.sh
@@ -0,0 +1,115 @@
+#!/usr/bin/env bats
+
+load helpers
+
+# Ensure that any updated/pushed rpm .spec files don't clobber the commit placeholder
+@test "rpm REPLACEWITHCOMMITID placeholder exists in .spec file" {
+ run grep -q "^%global[ ]\+commit[ ]\+REPLACEWITHCOMMITID$" ${TEST_SOURCES}/../contrib/rpm/buildah.spec
+ [ "$status" -eq 0 ]
+}
+
+@test "rpm-build CentOS 7" {
+ skip_if_no_runtime
+
+ # Build a container to use for building the binaries.
+ image=quay.io/libpod/centos:7
+ cid=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $image)
+ root=$(buildah mount $cid)
+ commit=$(git log --format=%H -n 1)
+ shortcommit=$(echo ${commit} | cut -c-7)
+ mkdir -p ${root}/rpmbuild/{SOURCES,SPECS}
+
+ # Build the tarball.
+ (cd ..; git archive --format tar.gz --prefix=buildah-${commit}/ ${commit}) > ${root}/rpmbuild/SOURCES/buildah-${shortcommit}.tar.gz
+
+ # Update the .spec file with the commit ID.
+ sed s:REPLACEWITHCOMMITID:${commit}:g ${TEST_SOURCES}/../contrib/rpm/buildah.spec > ${root}/rpmbuild/SPECS/buildah.spec
+
+ # Install build dependencies and build binary packages.
+ buildah run $cid -- yum -y install rpm-build yum-utils
+ buildah run $cid -- yum-builddep -y rpmbuild/SPECS/buildah.spec
+ buildah run $cid -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
+
+ # Build a second new container.
+ cid2=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $image)
+ root2=$(buildah mount $cid2)
+
+ # Copy the binary packages from the first container to the second one, and build a list of
+ # their filenames relative to the root of the second container.
+ rpms=
+ mkdir -p ${root2}/packages
+ for rpm in ${root}/rpmbuild/RPMS/*/*.rpm ; do
+ cp $rpm ${root2}/packages/
+ rpms="$rpms "/packages/$(basename $rpm)
+ done
+
+ # Install the binary packages into the second container.
+ buildah run $cid2 -- yum -y install $rpms
+
+ # Run the binary package and compare its self-identified version to the one we tried to build.
+ id=$(buildah run $cid2 -- buildah version | awk '/^Git Commit:/ { print $NF }')
+ bv=$(buildah run $cid2 -- buildah version | awk '/^Version:/ { print $NF }')
+ rv=$(buildah run $cid2 -- rpm -q --queryformat '%{version}' buildah)
+ echo "short commit: $shortcommit"
+ echo "id: $id"
+ echo "buildah version: $bv"
+ echo "buildah rpm version: $rv"
+ test $shortcommit = $id
+ test $bv = ${rv} -o $bv = ${rv}-dev
+
+ # Clean up.
+ buildah rm $cid $cid2
+}
+
+@test "rpm-build Fedora latest" {
+ skip_if_no_runtime
+
+ # Build a container to use for building the binaries.
+ image=registry.fedoraproject.org/fedora:latest
+ cid=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $image)
+ root=$(buildah mount $cid)
+ commit=$(git log --format=%H -n 1)
+ shortcommit=$(echo ${commit} | cut -c-7)
+ mkdir -p ${root}/rpmbuild/{SOURCES,SPECS}
+
+ # Build the tarball.
+ (cd ..; git archive --format tar.gz --prefix=buildah-${commit}/ ${commit}) > ${root}/rpmbuild/SOURCES/buildah-${shortcommit}.tar.gz
+
+ # Update the .spec file with the commit ID.
+ sed s:REPLACEWITHCOMMITID:${commit}:g ${TEST_SOURCES}/../contrib/rpm/buildah.spec > ${root}/rpmbuild/SPECS/buildah.spec
+
+ # Install build dependencies and build binary packages.
+ buildah run $cid -- dnf -y install 'dnf-command(builddep)' rpm-build
+ buildah run $cid -- dnf -y builddep --spec rpmbuild/SPECS/buildah.spec
+ buildah run $cid -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
+
+ # Build a second new container.
+ cid2=$(buildah from --pull --signature-policy ${TEST_SOURCES}/policy.json $image)
+ root2=$(buildah mount $cid2)
+
+ # Copy the binary packages from the first container to the second one, and build a list of
+ # their filenames relative to the root of the second container.
+ rpms=
+ mkdir -p ${root2}/packages
+ for rpm in ${root}/rpmbuild/RPMS/*/*.rpm ; do
+ cp $rpm ${root2}/packages/
+ rpms="$rpms "/packages/$(basename $rpm)
+ done
+
+ # Install the binary packages into the second container.
+ buildah run $cid2 -- dnf -y install $rpms
+
+ # Run the binary package and compare its self-identified version to the one we tried to build.
+ id=$(buildah run $cid2 -- buildah version | awk '/^Git Commit:/ { print $NF }')
+ bv=$(buildah run $cid2 -- buildah version | awk '/^Version:/ { print $NF }')
+ rv=$(buildah run $cid2 -- rpm -q --queryformat '%{version}' buildah)
+ echo "short commit: $shortcommit"
+ echo "id: $id"
+ echo "buildah version: $bv"
+ echo "buildah rpm version: $rv"
+ test $shortcommit = $id
+ test $bv = ${rv} -o $bv = ${rv}-dev
+
+ # Clean up.
+ buildah rm $cid $cid2
+}
diff --git a/tests/test_runner.sh b/tests/test_runner.sh
new file mode 100755
index 0000000..a93654b
--- /dev/null
+++ b/tests/test_runner.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+# Default to using /var/tmp for test space, since it's more likely to support
+# labels than /tmp, which is often on tmpfs.
+export TMPDIR=${TMPDIR:-/var/tmp}
+
+# Load the helpers.
+. helpers.bash
+
+function execute() {
+ >&2 echo "++ $@"
+ eval "$@"
+}
+
+# Tests to run. Defaults to all.
+TESTS=${@:-.}
+
+# Run the tests.
+execute time bats --tap $TESTS
diff --git a/tests/testreport/testreport.go b/tests/testreport/testreport.go
new file mode 100644
index 0000000..64a55cf
--- /dev/null
+++ b/tests/testreport/testreport.go
@@ -0,0 +1,449 @@
+package main
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/containers/buildah/tests/testreport/types"
+ "github.com/containers/storage/pkg/mount"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
+ "golang.org/x/sys/unix"
+ "golang.org/x/term"
+)
+
+func getVersion(r *types.TestReport) {
+ r.Spec.Version = fmt.Sprintf("%d.%d.%d%s", specs.VersionMajor, specs.VersionMinor, specs.VersionPatch, specs.VersionDev)
+}
+
+func getHostname(r *types.TestReport) error {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return fmt.Errorf("reading hostname: %w", err)
+ }
+ r.Spec.Hostname = hostname
+ return nil
+}
+
+func getProcessTerminal(r *types.TestReport) error {
+ r.Spec.Process.Terminal = term.IsTerminal(unix.Stdin)
+ return nil
+}
+
+func getProcessConsoleSize(r *types.TestReport) error {
+ if term.IsTerminal(unix.Stdin) {
+ winsize, err := unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ)
+ if err != nil {
+ return fmt.Errorf("reading size of terminal on stdin: %w", err)
+ }
+ if r.Spec.Process.ConsoleSize == nil {
+ r.Spec.Process.ConsoleSize = new(specs.Box)
+ }
+ r.Spec.Process.ConsoleSize.Height = uint(winsize.Row)
+ r.Spec.Process.ConsoleSize.Width = uint(winsize.Col)
+ }
+ return nil
+}
+
+func getProcessUser(r *types.TestReport) error {
+ r.Spec.Process.User.UID = uint32(unix.Getuid())
+ r.Spec.Process.User.GID = uint32(unix.Getgid())
+ groups, err := unix.Getgroups()
+ if err != nil {
+ return fmt.Errorf("reading supplemental groups list: %w", err)
+ }
+ for _, gid := range groups {
+ r.Spec.Process.User.AdditionalGids = append(r.Spec.Process.User.AdditionalGids, uint32(gid))
+ }
+ return nil
+}
+
+func getProcessArgs(r *types.TestReport) error {
+ r.Spec.Process.Args = append([]string{}, os.Args...)
+ return nil
+}
+
+func getProcessEnv(r *types.TestReport) error {
+ r.Spec.Process.Env = append([]string{}, os.Environ()...)
+ return nil
+}
+
+func getProcessCwd(r *types.TestReport) error {
+ cwd := make([]byte, 8192)
+ n, err := unix.Getcwd(cwd)
+ if err != nil {
+ return fmt.Errorf("determining current working directory: %w", err)
+ }
+ for n > 0 && cwd[n-1] == 0 {
+ n--
+ }
+ r.Spec.Process.Cwd = string(cwd[:n])
+ return nil
+}
+
+func getProcessCapabilities(r *types.TestReport) error {
+ capabilities, err := capability.NewPid2(0)
+ if err != nil {
+ return fmt.Errorf("reading current capabilities: %w", err)
+ }
+ if err := capabilities.Load(); err != nil {
+ return fmt.Errorf("loading capabilities: %w", err)
+ }
+ if r.Spec.Process.Capabilities == nil {
+ r.Spec.Process.Capabilities = new(specs.LinuxCapabilities)
+ }
+ caplistMap := map[capability.CapType]*[]string{
+ capability.EFFECTIVE: &r.Spec.Process.Capabilities.Effective,
+ capability.PERMITTED: &r.Spec.Process.Capabilities.Permitted,
+ capability.INHERITABLE: &r.Spec.Process.Capabilities.Inheritable,
+ capability.BOUNDING: &r.Spec.Process.Capabilities.Bounding,
+ capability.AMBIENT: &r.Spec.Process.Capabilities.Ambient,
+ }
+ for capType, capList := range caplistMap {
+ for _, cap := range capability.List() {
+ if capabilities.Get(capType, cap) {
+ *capList = append(*capList, strings.ToUpper("cap_"+cap.String()))
+ }
+ }
+ }
+ return nil
+}
+
+func getProcessRLimits(r *types.TestReport) error {
+ limitsMap := map[string]int{
+ "RLIMIT_AS": unix.RLIMIT_AS,
+ "RLIMIT_CORE": unix.RLIMIT_CORE,
+ "RLIMIT_CPU": unix.RLIMIT_CPU,
+ "RLIMIT_DATA": unix.RLIMIT_DATA,
+ "RLIMIT_FSIZE": unix.RLIMIT_FSIZE,
+ "RLIMIT_LOCKS": unix.RLIMIT_LOCKS,
+ "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK,
+ "RLIMIT_MSGQUEUE": unix.RLIMIT_MSGQUEUE,
+ "RLIMIT_NICE": unix.RLIMIT_NICE,
+ "RLIMIT_NOFILE": unix.RLIMIT_NOFILE,
+ "RLIMIT_NPROC": unix.RLIMIT_NPROC,
+ "RLIMIT_RSS": unix.RLIMIT_RSS,
+ "RLIMIT_RTPRIO": unix.RLIMIT_RTPRIO,
+ "RLIMIT_RTTIME": unix.RLIMIT_RTTIME,
+ "RLIMIT_SIGPENDING": unix.RLIMIT_SIGPENDING,
+ "RLIMIT_STACK": unix.RLIMIT_STACK,
+ }
+ for resourceName, resource := range limitsMap {
+ var rlim unix.Rlimit
+ if err := unix.Getrlimit(resource, &rlim); err != nil {
+ return fmt.Errorf("reading %s limit: %w", resourceName, err)
+ }
+ if rlim.Cur == unix.RLIM_INFINITY && rlim.Max == unix.RLIM_INFINITY {
+ continue
+ }
+ rlimit := specs.POSIXRlimit{
+ Type: resourceName,
+ Soft: rlim.Cur,
+ Hard: rlim.Max,
+ }
+ found := false
+ for i := range r.Spec.Process.Rlimits {
+ if r.Spec.Process.Rlimits[i].Type == resourceName {
+ r.Spec.Process.Rlimits[i] = rlimit
+ found = true
+ }
+ }
+ if !found {
+ r.Spec.Process.Rlimits = append(r.Spec.Process.Rlimits, rlimit)
+ }
+ }
+ return nil
+}
+
+func getProcessNoNewPrivileges(r *types.TestReport) error {
+ // We'd scan /proc/self/status here, but the "NoNewPrivs" line wasn't added until 4.10,
+ // and we want to succeed on older kernels.
+ r1, err := unix.PrctlRetInt(unix.PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)
+ if err != nil {
+ return fmt.Errorf("reading no-new-privs bit: %w", err)
+ }
+ r.Spec.Process.NoNewPrivileges = (r1 != 0)
+ return nil
+}
+
+func getProcessAppArmorProfile(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getProcessOOMScoreAdjust(r *types.TestReport) error {
+ node := "/proc/self/oom_score_adj"
+ score, err := os.ReadFile(node)
+ if err != nil {
+ return fmt.Errorf("reading %q: %w", node, err)
+ }
+ fields := strings.Fields(string(score))
+ if len(fields) != 1 {
+ return fmt.Errorf("badly formatted line %q in %q: expected to find only one field", string(score), node)
+ }
+ oom, err := strconv.Atoi(fields[0])
+ if err != nil {
+ return fmt.Errorf("parsing %q in line %q in %q: %w", fields[0], string(score), node, err)
+ }
+ if oom != 0 {
+ r.Spec.Process.OOMScoreAdj = &oom
+ }
+ return nil
+}
+
+func getProcessSeLinuxLabel(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getProcess(r *types.TestReport) error {
+ if r.Spec.Process == nil {
+ r.Spec.Process = new(specs.Process)
+ }
+ if err := getProcessTerminal(r); err != nil {
+ return err
+ }
+ if err := getProcessConsoleSize(r); err != nil {
+ return err
+ }
+ if err := getProcessUser(r); err != nil {
+ return err
+ }
+ if err := getProcessArgs(r); err != nil {
+ return err
+ }
+ if err := getProcessEnv(r); err != nil {
+ return err
+ }
+ if err := getProcessCwd(r); err != nil {
+ return err
+ }
+ if err := getProcessCapabilities(r); err != nil {
+ return err
+ }
+ if err := getProcessRLimits(r); err != nil {
+ return err
+ }
+ if err := getProcessNoNewPrivileges(r); err != nil {
+ return err
+ }
+ if err := getProcessAppArmorProfile(r); err != nil {
+ return err
+ }
+ if err := getProcessOOMScoreAdjust(r); err != nil {
+ return err
+ }
+ return getProcessSeLinuxLabel(r)
+}
+
+func getMounts(r *types.TestReport) error {
+ infos, err := mount.GetMounts()
+ if err != nil {
+ return fmt.Errorf("reading current list of mounts: %w", err)
+ }
+ for _, info := range infos {
+ mount := specs.Mount{
+ Destination: info.Mountpoint,
+ Type: info.FSType,
+ Source: info.Source,
+ Options: strings.Split(info.Options, ","),
+ }
+ r.Spec.Mounts = append(r.Spec.Mounts, mount)
+ }
+ return nil
+}
+
+func getLinuxIDMappings(r *types.TestReport) error {
+ getIDMapping := func(node string) ([]specs.LinuxIDMapping, error) {
+ var mappings []specs.LinuxIDMapping
+ mapfile, err := os.Open(node)
+ if err != nil {
+ return nil, fmt.Errorf("opening %q: %w", node, err)
+ }
+ defer mapfile.Close()
+ scanner := bufio.NewScanner(mapfile)
+ for scanner.Scan() {
+ line := scanner.Text()
+ fields := strings.Fields(line)
+ if len(fields) != 3 {
+ return nil, fmt.Errorf("badly formatted line %q in %q: expected to find exactly three fields", line, node)
+ }
+ cid, err := strconv.ParseUint(fields[0], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %q in line %q in %q: %w", fields[0], line, node, err)
+ }
+ hid, err := strconv.ParseUint(fields[1], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %q in line %q in %q: %w", fields[1], line, node, err)
+ }
+ size, err := strconv.ParseUint(fields[2], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %q in line %q in %q: %w", fields[2], line, node, err)
+ }
+ mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
+ }
+ return mappings, nil
+ }
+ uidmap, err := getIDMapping("/proc/self/uid_map")
+ if err != nil {
+ return err
+ }
+ gidmap, err := getIDMapping("/proc/self/gid_map")
+ if err != nil {
+ return err
+ }
+ r.Spec.Linux.UIDMappings = uidmap
+ r.Spec.Linux.GIDMappings = gidmap
+ return nil
+}
+
+func getLinuxSysctl(r *types.TestReport) error {
+ if r.Spec.Linux.Sysctl == nil {
+ r.Spec.Linux.Sysctl = make(map[string]string)
+ }
+ walk := func(path string, info os.FileInfo, _ error) error {
+ if info.IsDir() {
+ return nil
+ }
+ value, err := os.ReadFile(path)
+ if err != nil {
+ if pe, ok := err.(*os.PathError); ok {
+ if errno, ok := pe.Err.(syscall.Errno); ok {
+ switch errno {
+ case syscall.EACCES, syscall.EINVAL, syscall.EIO, syscall.EPERM:
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("reading sysctl %q: %w", path, err)
+ }
+ path = strings.TrimPrefix(path, "/proc/sys/")
+ sysctl := strings.Replace(path, "/", ".", -1)
+ val := strings.TrimRight(string(value), "\r\n")
+ if strings.ContainsAny(val, "\r\n") {
+ val = string(value)
+ }
+ r.Spec.Linux.Sysctl[sysctl] = val
+ return nil
+ }
+ return filepath.Walk("/proc/sys", walk)
+}
+
+func getLinuxResources(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxCgroupsPath(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxNamespaces(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxDevices(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxRootfsPropagation(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxMaskedPaths(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxReadOnlyPaths(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxMountLabel(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinuxIntelRdt(r *types.TestReport) error {
+ // TODO
+ return nil
+}
+
+func getLinux(r *types.TestReport) error {
+ if r.Spec.Linux == nil {
+ r.Spec.Linux = new(specs.Linux)
+ }
+ if err := getLinuxIDMappings(r); err != nil {
+ return err
+ }
+ if err := getLinuxSysctl(r); err != nil {
+ return err
+ }
+ if err := getLinuxResources(r); err != nil {
+ return err
+ }
+ if err := getLinuxCgroupsPath(r); err != nil {
+ return err
+ }
+ if err := getLinuxNamespaces(r); err != nil {
+ return err
+ }
+ if err := getLinuxDevices(r); err != nil {
+ return err
+ }
+ if err := getLinuxRootfsPropagation(r); err != nil {
+ return err
+ }
+ if err := getLinuxMaskedPaths(r); err != nil {
+ return err
+ }
+ if err := getLinuxReadOnlyPaths(r); err != nil {
+ return err
+ }
+ if err := getLinuxMountLabel(r); err != nil {
+ return err
+ }
+ return getLinuxIntelRdt(r)
+}
+
+func main() {
+ var r types.TestReport
+
+ if r.Spec == nil {
+ r.Spec = new(specs.Spec)
+ }
+ getVersion(&r)
+ if err := getProcess(&r); err != nil {
+ logrus.Errorf("%v", err)
+ os.Exit(1)
+ }
+ if err := getHostname(&r); err != nil {
+ logrus.Errorf("%v", err)
+ os.Exit(1)
+ }
+ if err := getMounts(&r); err != nil {
+ logrus.Errorf("%v", err)
+ os.Exit(1)
+ }
+ if err := getLinux(&r); err != nil {
+ logrus.Errorf("%v", err)
+ os.Exit(1)
+ }
+
+ if err := json.NewEncoder(os.Stdout).Encode(r); err != nil {
+ logrus.Errorf("%v", err)
+ os.Exit(1)
+ }
+}
diff --git a/tests/testreport/types/types.go b/tests/testreport/types/types.go
new file mode 100644
index 0000000..80c47da
--- /dev/null
+++ b/tests/testreport/types/types.go
@@ -0,0 +1,10 @@
+package types
+
+import (
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// TestReport is an internal type used for testing.
+type TestReport struct {
+ Spec *specs.Spec
+}
diff --git a/tests/tools/Makefile b/tests/tools/Makefile
new file mode 100644
index 0000000..1365f62
--- /dev/null
+++ b/tests/tools/Makefile
@@ -0,0 +1,31 @@
+GO := go
+GO_BUILD=$(GO) build
+
+BUILDDIR := build
+
+all: $(BUILDDIR)
+
+.PHONY: vendor
+vendor:
+ $(GO) mod tidy
+ $(GO) mod vendor
+ $(GO) mod verify
+
+.PHONY: clean
+clean:
+ rm -rf $(BUILDDIR)
+
+.PHONY: $(BUILDDIR)
+$(BUILDDIR): \
+ $(BUILDDIR)/ginkgo \
+ $(BUILDDIR)/go-md2man \
+ $(BUILDDIR)/golangci-lint
+
+$(BUILDDIR)/ginkgo:
+ $(GO_BUILD) -o $@ ./vendor/github.com/onsi/ginkgo/ginkgo
+
+$(BUILDDIR)/go-md2man:
+ $(GO_BUILD) -o $@ ./vendor/github.com/cpuguy83/go-md2man/v2
+
+$(BUILDDIR)/golangci-lint:
+ $(GO_BUILD) -o $@ ./vendor/github.com/golangci/golangci-lint/cmd/golangci-lint
diff --git a/tests/tools/go.mod b/tests/tools/go.mod
new file mode 100644
index 0000000..b28cde7
--- /dev/null
+++ b/tests/tools/go.mod
@@ -0,0 +1,187 @@
+module github.com/containers/buildah/tests/tools
+
+go 1.17
+
+require (
+ github.com/cpuguy83/go-md2man/v2 v2.0.2
+ github.com/golangci/golangci-lint v1.51.2
+ github.com/onsi/ginkgo v1.16.5
+)
+
+require (
+ 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
+ 4d63.com/gochecknoglobals v0.2.1 // indirect
+ github.com/Abirdcfly/dupword v0.0.9 // indirect
+ github.com/Antonboom/errname v0.1.7 // indirect
+ github.com/Antonboom/nilnil v0.1.1 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
+ github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
+ github.com/alexkohler/prealloc v1.0.0 // indirect
+ github.com/alingse/asasalint v0.0.11 // indirect
+ github.com/ashanbrown/forbidigo v1.4.0 // indirect
+ github.com/ashanbrown/makezero v1.1.1 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bkielbasa/cyclop v1.2.0 // indirect
+ github.com/blizzy78/varnamelen v0.8.0 // indirect
+ github.com/bombsimon/wsl/v3 v3.4.0 // indirect
+ github.com/breml/bidichk v0.2.3 // indirect
+ github.com/breml/errchkjson v0.3.0 // indirect
+ github.com/butuzov/ireturn v0.1.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/charithe/durationcheck v0.0.9 // indirect
+ github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 // indirect
+ github.com/curioswitch/go-reassign v0.2.0 // indirect
+ github.com/daixiang0/gci v0.9.1 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/denis-tingaikin/go-header v0.4.3 // indirect
+ github.com/esimonov/ifshort v1.0.4 // indirect
+ github.com/ettle/strcase v0.1.1 // indirect
+ github.com/fatih/color v1.14.1 // indirect
+ github.com/fatih/structtag v1.2.0 // indirect
+ github.com/firefart/nonamedreturns v1.0.4 // indirect
+ github.com/fsnotify/fsnotify v1.5.4 // indirect
+ github.com/fzipp/gocyclo v0.6.0 // indirect
+ github.com/go-critic/go-critic v0.6.7 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
+ github.com/go-toolsmith/astcast v1.1.0 // indirect
+ github.com/go-toolsmith/astcopy v1.0.3 // indirect
+ github.com/go-toolsmith/astequal v1.1.0 // indirect
+ github.com/go-toolsmith/astfmt v1.1.0 // indirect
+ github.com/go-toolsmith/astp v1.1.0 // indirect
+ github.com/go-toolsmith/strparse v1.1.0 // indirect
+ github.com/go-toolsmith/typep v1.1.0 // indirect
+ github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/gofrs/flock v0.8.1 // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
+ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
+ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
+ github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
+ github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
+ github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
+ github.com/golangci/misspell v0.4.0 // indirect
+ github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
+ github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
+ github.com/google/go-cmp v0.5.9 // indirect
+ github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 // indirect
+ github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
+ github.com/gostaticanalysis/comment v1.4.2 // indirect
+ github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
+ github.com/gostaticanalysis/nilerr v0.1.1 // indirect
+ github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hexops/gotextdiff v1.0.3 // indirect
+ github.com/inconshreveable/mousetrap v1.0.1 // indirect
+ github.com/jgautheron/goconst v1.5.1 // indirect
+ github.com/jingyugao/rowserrcheck v1.1.1 // indirect
+ github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
+ github.com/julz/importas v0.1.0 // indirect
+ github.com/junk1tm/musttag v0.4.5 // indirect
+ github.com/kisielk/errcheck v1.6.3 // indirect
+ github.com/kisielk/gotool v1.0.0 // indirect
+ github.com/kkHAIKE/contextcheck v1.1.3 // indirect
+ github.com/kulti/thelper v0.6.3 // indirect
+ github.com/kunwardeep/paralleltest v1.0.6 // indirect
+ github.com/kyoh86/exportloopref v0.1.11 // indirect
+ github.com/ldez/gomoddirectives v0.2.3 // indirect
+ github.com/ldez/tagliatelle v0.4.0 // indirect
+ github.com/leonklingele/grouper v1.1.1 // indirect
+ github.com/lufeee/execinquery v1.2.1 // indirect
+ github.com/magiconair/properties v1.8.6 // indirect
+ github.com/maratori/testableexamples v1.0.0 // indirect
+ github.com/maratori/testpackage v1.1.0 // indirect
+ github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-runewidth v0.0.9 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+ github.com/mbilski/exhaustivestruct v1.2.0 // indirect
+ github.com/mgechev/revive v1.2.5 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moricho/tparallel v0.2.1 // indirect
+ github.com/nakabonne/nestif v0.3.1 // indirect
+ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
+ github.com/nishanths/exhaustive v0.9.5 // indirect
+ github.com/nishanths/predeclared v0.2.2 // indirect
+ github.com/nunnatsa/ginkgolinter v0.8.1 // indirect
+ github.com/nxadm/tail v1.4.8 // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.5 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/polyfloyd/go-errorlint v1.1.0 // indirect
+ github.com/prometheus/client_golang v1.12.1 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/common v0.32.1 // indirect
+ github.com/prometheus/procfs v0.7.3 // indirect
+ github.com/quasilyte/go-ruleguard v0.3.19 // indirect
+ github.com/quasilyte/gogrep v0.5.0 // indirect
+ github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
+ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/ryancurrah/gomodguard v1.3.0 // indirect
+ github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect
+ github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
+ github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect
+ github.com/securego/gosec/v2 v2.15.0 // indirect
+ github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
+ github.com/sirupsen/logrus v1.9.0 // indirect
+ github.com/sivchari/containedctx v1.0.2 // indirect
+ github.com/sivchari/nosnakecase v1.7.0 // indirect
+ github.com/sivchari/tenv v1.7.1 // indirect
+ github.com/sonatard/noctx v0.0.1 // indirect
+ github.com/sourcegraph/go-diff v0.7.0 // indirect
+ github.com/spf13/afero v1.8.2 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
+ github.com/spf13/cobra v1.6.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/viper v1.12.0 // indirect
+ github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
+ github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
+ github.com/stretchr/objx v0.5.0 // indirect
+ github.com/stretchr/testify v1.8.1 // indirect
+ github.com/subosito/gotenv v1.4.1 // indirect
+ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
+ github.com/tdakkota/asciicheck v0.1.1 // indirect
+ github.com/tetafro/godot v1.4.11 // indirect
+ github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e // indirect
+ github.com/timonwong/loggercheck v0.9.3 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.8.0 // indirect
+ github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
+ github.com/ultraware/funlen v0.0.3 // indirect
+ github.com/ultraware/whitespace v0.0.5 // indirect
+ github.com/uudashr/gocognit v1.0.6 // indirect
+ github.com/yagipy/maintidx v1.0.0 // indirect
+ github.com/yeya24/promlinter v0.2.0 // indirect
+ gitlab.com/bosi/decorder v0.2.3 // indirect
+ go.uber.org/atomic v1.7.0 // indirect
+ go.uber.org/multierr v1.6.0 // indirect
+ go.uber.org/zap v1.17.0 // indirect
+ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
+ golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9 // indirect
+ golang.org/x/mod v0.8.0 // indirect
+ golang.org/x/sync v0.1.0 // indirect
+ golang.org/x/sys v0.5.0 // indirect
+ golang.org/x/text v0.7.0 // indirect
+ golang.org/x/tools v0.6.0 // indirect
+ google.golang.org/protobuf v1.28.0 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ honnef.co/go/tools v0.4.2 // indirect
+ mvdan.cc/gofumpt v0.4.0 // indirect
+ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
+ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
+ mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect
+)
diff --git a/tests/tools/go.sum b/tests/tools/go.sum
new file mode 100644
index 0000000..be35849
--- /dev/null
+++ b/tests/tools/go.sum
@@ -0,0 +1,1629 @@
+4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA=
+4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
+4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc=
+4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
+bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w=
+cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Abirdcfly/dupword v0.0.9 h1:MxprGjKq3yDBICXDgEEsyGirIXfMYXkLNT/agPsE1tk=
+github.com/Abirdcfly/dupword v0.0.9/go.mod h1:PzmHVLLZ27MvHSzV7eFmMXSFArWXZPZmfuuziuUrf2g=
+github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako=
+github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU=
+github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=
+github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
+github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts=
+github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA=
+github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
+github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
+github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
+github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
+github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/ashanbrown/forbidigo v1.4.0 h1:spdPbupaSqtWORq1Q4eHBoPBmHtwVyLKwaedbSLc5Sw=
+github.com/ashanbrown/forbidigo v1.4.0/go.mod h1:IvgwB5Y4fzqSAj/WVXKWigoTkB0dzI2FBbpKWuh7ph8=
+github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
+github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
+github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
+github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
+github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
+github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
+github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
+github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
+github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI=
+github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A=
+github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw=
+github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU=
+github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
+github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=
+github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
+github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 h1:cy5GCEZLUCshCGCRRUjxHrDUqkB4l5cuUt3ShEckQEo=
+github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348/go.mod h1:f/miWtG3SSuTxKsNK3o58H1xl+XV6ZIfbC6p7lPPB8U=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ=
+github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
+github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
+github.com/daixiang0/gci v0.9.1 h1:jBrwBmBZTDsGsXiaCTLIe9diotp1X4X64zodFrh7l+c=
+github.com/daixiang0/gci v0.9.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c=
+github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
+github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
+github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=
+github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
+github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
+github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
+github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
+github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
+github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
+github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
+github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
+github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-critic/go-critic v0.6.7 h1:1evPrElnLQ2LZtJfmNDzlieDhjnq36SLgNzisx06oPM=
+github.com/go-critic/go-critic v0.6.7/go.mod h1:fYZUijFdcnxgx6wPjQA2QEjIRaNCT0gO8bhexy6/QmE=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8=
+github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
+github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y=
+github.com/go-toolsmith/astcopy v1.0.3 h1:r0bgSRlMOAgO+BdQnVAcpMSMkrQCnV6ZJmIkrJgcJj0=
+github.com/go-toolsmith/astcopy v1.0.3/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y=
+github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
+github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4=
+github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw=
+github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
+github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco=
+github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
+github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA=
+github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
+github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o=
+github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM=
+github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw=
+github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
+github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
+github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
+github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
+github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
+github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
+github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
+github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
+github.com/golangci/golangci-lint v1.51.2 h1:yIcsT1X9ZYHdSpeWXRT1ORC/FPGSqDHbHsu9uk4FK7M=
+github.com/golangci/golangci-lint v1.51.2/go.mod h1:KH9Q7/3glwpYSknxUgUyLlAv46A8fsSKo1hH2wDvkr8=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
+github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
+github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
+github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
+github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw=
+github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gookit/color v1.5.2/go.mod h1:w8h4bGiHeeBpvQVePTutdbERIUf3oJE5lZ8HM0UgXyg=
+github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U=
+github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
+github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw=
+github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0=
+github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
+github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
+github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI=
+github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
+github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=
+github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
+github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=
+github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
+github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=
+github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
+github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
+github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
+github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
+github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
+github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
+github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
+github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
+github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
+github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
+github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
+github.com/junk1tm/musttag v0.4.5 h1:d+mpJ1vn6WFEVKHwkgJiIedis1u/EawKOuUTygAUtCo=
+github.com/junk1tm/musttag v0.4.5/go.mod h1:XkcL/9O6RmD88JBXb+I15nYRl9W4ExhgQeCBEhfMC8U=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8=
+github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw=
+github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
+github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
+github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g=
+github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
+github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
+github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
+github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
+github.com/ldez/tagliatelle v0.4.0 h1:sylp7d9kh6AdXN2DpVGHBRb5guTVAgOxqNGhbqc4b1c=
+github.com/ldez/tagliatelle v0.4.0/go.mod h1:mNtTfrHy2haaBAw+VT7IBV6VXBThS7TCreYWbBcJ87I=
+github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU=
+github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
+github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
+github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
+github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI=
+github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
+github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=
+github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc=
+github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
+github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
+github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
+github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
+github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
+github.com/mgechev/revive v1.2.5 h1:UF9AR8pOAuwNmhXj2odp4mxv9Nx2qUIwVz8ZsU+Mbec=
+github.com/mgechev/revive v1.2.5/go.mod h1:nFOXent79jMTISAfOAasKfy0Z2Ejq0WX7Qn/KAdYopI=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=
+github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
+github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8=
+github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
+github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
+github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
+github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
+github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
+github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/exhaustive v0.9.5 h1:TzssWan6orBiLYVqewCG8faud9qlFntJE30ACpzmGME=
+github.com/nishanths/exhaustive v0.9.5/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA=
+github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
+github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
+github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
+github.com/nunnatsa/ginkgolinter v0.8.1 h1:/y4o/0hV+ruUHj4xXh89xlFjoaitnI4LnkpuYs02q1c=
+github.com/nunnatsa/ginkgolinter v0.8.1/go.mod h1:FYYLtszIdmzCH8XMaMPyxPVXZ7VCaIm55bA+gugx+14=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
+github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
+github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.0 h1:pAM+oBNPrpXRs+E/8spkeGx9QgekbRVyr74EUvRVOUI=
+github.com/onsi/ginkgo/v2 v2.8.0/go.mod h1:6JsQiECmxCa3V5st74AL/AmsV482EDdVrGaVW6z3oYU=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
+github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
+github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
+github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
+github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
+github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
+github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
+github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
+github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI=
+github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
+github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polyfloyd/go-errorlint v1.1.0 h1:VKoEFg5yxSgJ2yFPVhxW7oGz+f8/OVcuMeNvcPIi6Eg=
+github.com/polyfloyd/go-errorlint v1.1.0/go.mod h1:Uss7Bc/izYG0leCMRx3WVlrpqWedSZk7V/FUQW6VJ6U=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
+github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
+github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30=
+github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc=
+github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
+github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
+github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
+github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
+github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
+github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=
+github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
+github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls=
+github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls=
+github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw=
+github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
+github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
+github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
+github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
+github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
+github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
+github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
+github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
+github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0=
+github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw=
+github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
+github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=
+github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw=
+github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8=
+github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
+github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak=
+github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=
+github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
+github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
+github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
+github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
+github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
+github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
+github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
+github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
+github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
+github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
+github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo=
+github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
+github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8=
+github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
+github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A=
+github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
+github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
+github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
+github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
+github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
+github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
+github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
+github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e h1:MV6KaVu/hzByHP0UvJ4HcMGE/8a6A4Rggc/0wx2AvJo=
+github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
+github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI=
+github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw=
+github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
+github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tomarrell/wrapcheck/v2 v2.8.0 h1:qDzbir0xmoE+aNxGCPrn+rUSxAX+nG6vREgbbXAR81I=
+github.com/tomarrell/wrapcheck/v2 v2.8.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg=
+github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
+github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
+github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
+github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
+github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
+github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
+github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
+github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
+github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
+github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
+github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
+github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
+github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
+github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
+github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0=
+gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
+go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
+go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
+go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
+go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
+go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
+golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20221002003631-540bb7301a08/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9 h1:6WHiuFL9FNjg8RljAaT7FNUuKDbvMqS1i5cr2OE2sLQ=
+golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.4.1-0.20221208213631-3f74d914ae6d/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
+golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.4.2 h1:6qXr+R5w+ktL5UkwEbPp+fEvfyoMPche6GkOpGHZcLc=
+honnef.co/go/tools v0.4.2/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
+mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
+mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
+mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w=
+mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/tests/tools/tools.go b/tests/tools/tools.go
new file mode 100644
index 0000000..c7549cd
--- /dev/null
+++ b/tests/tools/tools.go
@@ -0,0 +1,13 @@
+//go:build tools
+// +build tools
+
+package tools
+
+// Importing the packages here will allow to vendor those via
+// `go mod vendor`.
+
+import (
+ _ "github.com/cpuguy83/go-md2man/v2"
+ _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
+ _ "github.com/onsi/ginkgo/ginkgo"
+)
diff --git a/tests/tutorial.bats b/tests/tutorial.bats
new file mode 100644
index 0000000..352ca4e
--- /dev/null
+++ b/tests/tutorial.bats
@@ -0,0 +1,25 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "tutorial-cgroups" {
+ # confidence check for the sake of packages that consume our library
+ skip_if_no_runtime
+ skip_if_cgroupsv1
+ skip_if_rootless_environment
+ skip_if_chroot
+
+ _prefetch quay.io/libpod/alpine
+ run ${TUTORIAL_BINARY}
+ buildoutput="$output"
+ # shouldn't have the "root" scope in our cgroups
+ echo "build output:"
+ echo "${output}"
+ ! grep -q init.scope <<< "$buildoutput"
+ run sed -e '0,/^CUT START/d' -e '/^CUT END/,//d' <<< "$buildoutput"
+ # should've found a /sys/fs/cgroup with stuff in it
+ echo "contents of /sys/fs/cgroup:"
+ echo "${output}"
+ echo "number of lines: ${#lines[@]}"
+ test "${#lines[@]}" -gt 2
+}
diff --git a/tests/tutorial/tutorial.go b/tests/tutorial/tutorial.go
new file mode 100644
index 0000000..f1841fa
--- /dev/null
+++ b/tests/tutorial/tutorial.go
@@ -0,0 +1,65 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/buildah"
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/imagebuildah"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/unshare"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func main() {
+ if buildah.InitReexec() {
+ return
+ }
+ unshare.MaybeReexecUsingUserNamespace(false)
+
+ buildStoreOptions, err := storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
+ if err != nil {
+ panic(err)
+ }
+
+ buildStore, err := storage.GetStore(buildStoreOptions)
+ if err != nil {
+ panic(err)
+ }
+ defer func() {
+ if _, err := buildStore.Shutdown(false); err != nil {
+ if !errors.Is(err, storage.ErrLayerUsedByContainer) {
+ fmt.Printf("failed to shutdown storage: %q", err)
+ }
+ }
+ }()
+
+ d, err := os.MkdirTemp("", "")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(d)
+ dockerfile := filepath.Join(d, "Dockerfile")
+ f, err := os.Create(dockerfile)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintf(f, "FROM quay.io/libpod/alpine\nRUN echo CUT START; find /sys/fs/cgroup -print | sort ; echo CUT END")
+ f.Close()
+
+ buildOptions := define.BuildOptions{
+ ContextDirectory: d,
+ NamespaceOptions: []define.NamespaceOption{
+ {Name: string(specs.NetworkNamespace), Host: true},
+ },
+ }
+
+ _, _, err = imagebuildah.BuildDockerfiles(context.TODO(), buildStore, buildOptions, dockerfile)
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/tests/umount.bats b/tests/umount.bats
new file mode 100644
index 0000000..d59928e
--- /dev/null
+++ b/tests/umount.bats
@@ -0,0 +1,68 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "umount-flags-order-verification" {
+ run_buildah 125 umount cnt1 -a
+ check_options_flag_err "-a"
+
+ run_buildah 125 umount cnt1 --all cnt2
+ check_options_flag_err "--all"
+
+ run_buildah 125 umount cnt1 cnt2 --all
+ check_options_flag_err "--all"
+}
+
+@test "umount one image" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid=$output
+ run_buildah mount "$cid"
+ run_buildah umount "$cid"
+}
+
+@test "umount bad image" {
+ run_buildah 125 umount badcontainer
+}
+
+@test "umount multi images" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah mount "$cid1"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah mount "$cid2"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah mount "$cid3"
+ run_buildah umount "$cid1" "$cid2" "$cid3"
+}
+
+@test "umount all images" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah mount "$cid1"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah mount "$cid2"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah mount "$cid3"
+ run_buildah umount --all
+}
+
+@test "umount multi images one bad" {
+ _prefetch alpine
+ run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine
+ cid1=$output
+ run_buildah mount "$cid1"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid2=$output
+ run_buildah mount "$cid2"
+ run_buildah from --quiet --pull-never $WITH_POLICY_JSON alpine
+ cid3=$output
+ run_buildah mount "$cid3"
+ run_buildah 125 umount "$cid1" badcontainer "$cid2" "$cid3"
+}
diff --git a/tests/validate/buildahimages-are-sane b/tests/validate/buildahimages-are-sane
new file mode 100755
index 0000000..de58fe0
--- /dev/null
+++ b/tests/validate/buildahimages-are-sane
@@ -0,0 +1,74 @@
+#!/bin/bash
+#
+# buildahimages-are-sane - validate changes against buildah-images Dockerfiles
+#
+ME=$(basename $0)
+
+# HEAD should be good enough, but the CIRRUS envariable allows us to test
+head=${CIRRUS_CHANGE_IN_REPO:-HEAD}
+# Base of this PR. Here we absolutely rely on cirrus.
+base=$(git merge-base ${GITVALIDATE_EPOCH:-main} $head)
+
+# Sanity check:
+if [[ -z "$base" ]]; then
+ echo "$(basename $0): internal error: could not determine merge-base"
+ echo " head = $head"
+ echo " CIRRUS_CHANGE_IN_REPO = $CIRRUS_CHANGE_IN_REPO"
+ echo " GITVALIDATE_EPOCH = $GITVALIDATE_EPOCH"
+ exit 1
+fi
+
+# Helper function: confirms that shadow-utils is sane in the built image
+function build_and_check() {
+ local dir=$1
+
+ echo "$ME: Checking $dir"
+
+ # Clean up preexisting image
+ bin/buildah rmi -f buildah &>/dev/null || true
+
+ # Quiet by default, but show logs if anything fails.
+ logfile=$(mktemp --tmpdir $ME.build.XXXXXXX)
+ bin/buildah bud -t buildah $dir > $logfile 2>&1
+ if [[ $? -ne 0 ]]; then
+ echo "$ME: buildah-bud failed:"
+ sed -e 's/^/ /' <$logfile
+ exit 1
+ fi
+
+ ctr=$(/usr/bin/buildah from buildah)
+ rpmqv=$(/usr/bin/buildah run $ctr rpm -qV shadow-utils)
+ if [[ -n "$rpmqv" ]]; then
+ echo "$ME: rpm-qv failed on $dir:"
+ echo " $rpmqv"
+ exit 1
+ fi
+
+ owner=$(buildah run $ctr stat -c "%U:%G" /home/build/.local/share/containers)
+ if [[ "${owner}" != "build:build" ]]; then
+ echo "$ME: ownership of /home/build/.local/share/containers failed on $dir:"
+ echo " ${owner}"
+ exit 1
+ fi
+
+ bin/buildah rm $ctr &>/dev/null
+ bin/buildah rmi buildah &>/dev/null
+}
+
+# This gives us a list of files touched in all commits, e.g.
+# A file1
+# M subdir/file2
+# We look for Added or Modified files under contrib/buildahimage; if there
+# aren't any, we have nothing to do.
+#
+# Notes:
+# --no-renames ensures that renamed files show up as 'A'dded.
+# we omit 'stablebyhand' because it does not have a Containerfile
+touched=$(git diff --name-status --no-renames $base $head |\
+ grep -v /stablebyhand |\
+ sed -n -E -e 's;^[AM][[:space:]]+(contrib/buildahimage/[^/]+)/.*;\1;p' |\
+ uniq)
+
+for dir in $touched; do
+ build_and_check $dir
+done
diff --git a/tests/validate/pr-should-include-tests b/tests/validate/pr-should-include-tests
new file mode 100755
index 0000000..5b8a99a
--- /dev/null
+++ b/tests/validate/pr-should-include-tests
@@ -0,0 +1,88 @@
+#!/bin/bash
+#
+# Intended for use in CI: check git commits, barf if no tests added.
+#
+
+# Docs-only changes are excused
+if [[ "${CIRRUS_CHANGE_TITLE}" =~ CI:DOCS ]]; then
+ exit 0
+fi
+
+# So are PRs where 'NO NEW TESTS NEEDED' appears in the Github message
+if [[ "${CIRRUS_CHANGE_MESSAGE}" =~ NO.NEW.TESTS.NEEDED ]]; then
+ exit 0
+fi
+
+# HEAD should be good enough, but the CIRRUS envariable allows us to test
+head=${CIRRUS_CHANGE_IN_REPO:-HEAD}
+# Base of this PR. Here we absolutely rely on cirrus.
+base=$(git merge-base ${GITVALIDATE_EPOCH:-main} $head)
+
+# Sanity check:
+if [[ -z "$base" ]]; then
+ echo "$(basename $0): internal error: could not determine merge-base"
+ echo " head = $head"
+ echo " CIRRUS_CHANGE_IN_REPO = $CIRRUS_CHANGE_IN_REPO"
+ echo " GITVALIDATE_EPOCH = $GITVALIDATE_EPOCH"
+ exit 1
+fi
+
+# This gives us a list of files touched in all commits, e.g.
+# A foo.c
+# M bar.c
+# We look for Added or Modified (not Deleted!) files under 'tests'.
+# --no-renames ensures that renamed tests show up as 'A'dded.
+if git diff --name-status --no-renames $base $head | egrep -q '^[AM]\s+(tests/|.*_test\.go)'; then
+ exit 0
+fi
+
+# Nothing changed under test subdirectory.
+#
+# This is OK if the only files being touched are "safe" ones.
+filtered_changes=$(git diff --name-status $base $head |
+ awk '{print $2}' |
+ fgrep -vx .cirrus.yml |
+ fgrep -vx .gitignore |
+ fgrep -vx changelog.txt |
+ fgrep -vx go.mod |
+ fgrep -vx go.sum |
+ fgrep -vx buildah.spec.rpkg |
+ fgrep -vx .golangci.yml |
+ fgrep -vx Makefile |
+ egrep -v '^[^/]+\.md$' |
+ egrep -v '^\.github/' |
+ egrep -v '^contrib/' |
+ egrep -v '^docs/' |
+ egrep -v '^hack/' |
+ egrep -v '^nix/' |
+ egrep -v '^vendor/')
+if [[ -z "$filtered_changes" ]]; then
+ exit 0
+fi
+
+# One last chance: perhaps the developer included the magic '[NO TESTS NEEDED]'
+# string in an amended commit.
+if git log --format=%B ${base}..${head} | fgrep '[NO NEW TESTS NEEDED]'; then
+ exit 0
+fi
+if git log --format=%B ${base}..${head} | fgrep '[NO TESTS NEEDED]'; then
+ exit 0
+fi
+
+cat <<EOF
+$(basename $0): PR does not include changes in the 'tests' directory
+
+Please write a regression test for what you're fixing. Even if it
+seems trivial or obvious, try to add a test that will prevent
+regressions.
+
+If your change is minor, feel free to piggyback on already-written
+tests, possibly just adding a small step to a similar existing test.
+Every second counts in CI.
+
+If your commit really, truly does not need tests, you can proceed
+by adding '[NO NEW TESTS NEEDED]' to the body of your commit message.
+Please think carefully before doing so.
+EOF
+
+exit 1
diff --git a/tests/validate/pr-should-include-tests.t b/tests/validate/pr-should-include-tests.t
new file mode 100755
index 0000000..b3057e0
--- /dev/null
+++ b/tests/validate/pr-should-include-tests.t
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+# tests for pr-should-include-tests.t
+#
+# FIXME: I don't think this will work in CI, because IIRC the git-checkout
+# is a shallow one. But it works fine in a developer tree.
+#
+ME=$(basename $0)
+
+###############################################################################
+# BEGIN test cases
+#
+# Feel free to add as needed. Syntax is:
+# <exit status> <sha of commit> <branch>=<sha of merge base> # comments
+#
+# Where:
+# exit status is the expected exit status of the script
+# sha of merge base is the SHA of the branch point of the commit
+# sha of commit is the SHA of a real commit in the podman repo
+#
+# We need the actual sha of the merge base because once a branch is
+# merged 'git merge-base' (used in our test script) becomes useless.
+#
+#
+# FIXME: as of 2021-01-07 we don't have "no tests needed" in our git
+# commit history, but once we do, please add a new '0' test here.
+#
+tests="
+0 f466086d 88bc27df PR 2955: two commits, includes tests
+1 c5870ff8 520c7815 PR 2973: single commit, no tests
+0 d460e2ed 371e4ca6 PR 2886: .cirrus.yml and contrib/cirrus/*
+0 88bc27df c5870ff8 PR 2972: vendor only
+0 d4c696af faa86c4f PR 2470: CI:DOCS as well as only a .md change
+0 d460e2ed f52762a9 PR 2927: .md only, without CI:DOCS
+"
+
+# The script we're testing
+test_script=$(dirname $0)/$(basename $0 .t)
+
+# END test cases
+###############################################################################
+# BEGIN test-script runner and status checker
+
+function run_test_script() {
+ local expected_rc=$1
+ local testname=$2
+
+ testnum=$(( testnum + 1 ))
+
+ # DO NOT COMBINE 'local output=...' INTO ONE LINE. If you do, you lose $?
+ local output
+ output=$( $test_script )
+ local actual_rc=$?
+
+ if [[ $actual_rc != $expected_rc ]]; then
+ echo "not ok $testnum $testname"
+ echo "# expected rc $expected_rc"
+ echo "# actual rc $actual_rc"
+ if [[ -n "$output" ]]; then
+ echo "# script output: $output"
+ fi
+ rc=1
+ else
+ if [[ $expected_rc == 1 ]]; then
+ # Confirm we get an error message
+ if [[ ! "$output" =~ "Please write a regression test" ]]; then
+ echo "not ok $testnum $testname"
+ echo "# Expected: ~ 'Please write a regression test'"
+ echo "# Actual: $output"
+ rc=1
+ else
+ echo "ok $testnum $testname"
+ fi
+ else
+ echo "ok $testnum $testname"
+ fi
+ fi
+
+ # If we expect an error, confirm that we can override it. We only need
+ # to do this once.
+ if [[ $expected_rc == 1 ]]; then
+ if [[ -z "$tested_override" ]]; then
+ testnum=$(( testnum + 1 ))
+
+ CIRRUS_CHANGE_TITLE="[CI:DOCS] hi there" $test_script &>/dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "not ok $testnum $rest (override with CI:DOCS)"
+ rc=1
+ else
+ echo "ok $testnum $rest (override with CI:DOCS)"
+ fi
+
+ testnum=$(( testnum + 1 ))
+ CIRRUS_CHANGE_MESSAGE="hi there [NO TESTS NEEDED] bye" $test_script &>/dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "not ok $testnum $rest (override with '[NO TESTS NEEDED]')"
+ rc=1
+ else
+ echo "ok $testnum $rest (override with '[NO TESTS NEEDED]')"
+ fi
+
+ tested_override=1
+ fi
+ fi
+}
+
+# END test-script runner and status checker
+###############################################################################
+# BEGIN test-case parsing
+
+rc=0
+testnum=0
+tested_override=
+
+while read expected_rc parent_sha commit_sha rest; do
+ # Skip blank lines
+ test -z "$expected_rc" && continue
+
+ export DEST_BRANCH=$parent_sha
+ export CIRRUS_CHANGE_IN_REPO=$commit_sha
+ export CIRRUS_CHANGE_TITLE=$(git log -1 --format=%s $commit_sha)
+ export CIRRUS_CHANGE_MESSAGE=
+
+ run_test_script $expected_rc "$rest"
+done <<<"$tests"
+
+echo "1..$testnum"
+exit $rc
+
+# END Test-case parsing
+###############################################################################
diff --git a/tests/validate/whitespace.sh b/tests/validate/whitespace.sh
new file mode 100755
index 0000000..6be4bd9
--- /dev/null
+++ b/tests/validate/whitespace.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+# Check for one or more whitespace characters at the end of a line in a markdown or text file.
+# gofmt is already going to be doing the same for source code.
+#
+status=0
+if find * -name '*.md' -o -name "*.txt" | grep -v vendor/ | xargs egrep -q '[[:space:]]+$' ; then
+ echo "** ERROR: dangling whitespace found in these files: **"
+ find * -name '*.md' -o -name "*.txt" | grep -v vendor/ | xargs egrep -n '[[:space:]]+$'
+ echo "** ERROR: try running \"sed -i -E -e 's,[[:space:]]+$,,'\" on the affected files **"
+ status=1
+fi
+exit $status
diff --git a/troubleshooting.md b/troubleshooting.md
new file mode 100644
index 0000000..1299b35
--- /dev/null
+++ b/troubleshooting.md
@@ -0,0 +1,158 @@
+![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
+
+# Troubleshooting
+
+## A list of common issues and solutions for Buildah
+
+---
+### 1) No such image
+
+When doing a `buildah pull` or `buildah build` command and a "common" image can not be pulled,
+it is likely that the `/etc/containers/registries.conf` file is either not installed or possibly
+misconfigured. This issue might also indicate that other required files as listed in the
+[Configuration Files](https://github.com/containers/buildah/blob/main/install.md#configuration-files)
+section of the Installation Instructions are also not installed.
+
+#### Symptom
+```console
+$ sudo buildah build -f Dockerfile .
+STEP 1: FROM alpine
+error creating build container: 2 errors occurred:
+
+* Error determining manifest MIME type for docker://localhost/alpine:latest: pinging docker registry returned: Get https://localhost/v2/: dial tcp [::1]:443: connect: connection refused
+* Error determining manifest MIME type for docker://registry.access.redhat.com/alpine:latest: Error reading manifest latest in registry.access.redhat.com/alpine: unknown: Not Found
+error building: error creating build container: no such image "alpine" in registry: image not known
+```
+
+#### Solution
+
+ * Verify that the `/etc/containers/registries.conf` file exists. If not, verify that the containers-common package is installed.
+ * Verify that the entries in the `[registries.search]` section of the /etc/containers/registries file are valid and reachable.
+ * Verify that the image you requested is either fully qualified, or that it exists on one of your search registries.
+ * Verify that the image is public or that you have logged in to at least one search registry which contains the private image.
+ * Verify that the other required [Configuration Files](https://github.com/containers/buildah/blob/main/install.md#configuration-files) are installed.
+
+---
+### 2) http: server gave HTTP response to HTTPS client
+
+When doing a Buildah command such as `build`, `commit`, `from`, or `push` to a registry,
+tls verification is turned on by default. If authentication is not used with
+those commands, this error can occur.
+
+#### Symptom
+```console
+# buildah push alpine docker://localhost:5000/myalpine:latest
+Getting image source signatures
+Get https://localhost:5000/v2/: http: server gave HTTP response to HTTPS client
+```
+
+#### Solution
+
+By default tls verification is turned on when communicating to registries from
+Buildah. If the registry does not require authentication the Buildah commands
+such as `build`, `commit`, `from` and `pull` will fail unless tls verification is turned
+off using the `--tls-verify` option. **NOTE:** It is not at all recommended to
+communicate with a registry and not use tls verification.
+
+ * Turn off tls verification by passing false to the tls-verification option.
+ * I.e. `buildah push --tls-verify=false alpine docker://localhost:5000/myalpine:latest`
+
+---
+### 3) `buildah run` command fails with pipe or output redirection
+
+When doing a `buildah run` command while using a pipe ('|') or output redirection ('>>'),
+the command will fail, often times with a `command not found` type of error.
+
+#### Symptom
+When executing a `buildah run` command with a pipe or output redirection such as the
+following commands:
+
+```console
+# buildah run $whalecontainer /usr/games/fortune -a | cowsay
+# buildah run $newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf
+# buildah run $newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html
+```
+the `buildah run` command will not complete and an error will be raised.
+
+#### Solution
+There are two solutions to this problem. The
+[`podman run`](https://github.com/containers/podman/blob/main/docs/podman-run.1.md)
+command can be used in place of `buildah run`. To still use `buildah run`, surround
+the command with single quotes and use `bash -c`. The previous examples would be
+changed to:
+
+```console
+# buildah run $whalecontainer bash -c '/usr/games/fortune -a | cowsay'
+# buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf'
+# buildah run $newcontainer bash -c 'echo "nginx on Fedora" > /usr/share/nginx/html/index.html'
+```
+
+---
+### 4) `buildah push alpine oci:~/myalpine:latest` fails with lstat error
+
+When doing a `buildah push` command and the target image has a tilde (`~`) character
+in it, an lstat error will be raised stating there is no such file or directory.
+This is expected behavior for shell expansion of the tilde character as it is only
+expanded at the start of a word. This behavior is documented
+[here](https://www.gnu.org/software/libc/manual/html_node/Tilde-Expansion.html).
+
+#### Symptom
+```console
+$ sudo pull alpine
+$ sudo buildah push alpine oci:~/myalpine:latest
+lstat /home/myusername/~: no such file or directory
+```
+
+#### Solution
+
+ * Replace `~` with `$HOME` or the fully specified directory `/home/myusername`.
+ * `$ sudo buildah push alpine oci:${HOME}/myalpine:latest`
+
+
+---
+### 5) Rootless buildah build fails EPERM on NFS:
+
+NFS enforces file creation on different UIDs on the server side and does not understand user namespace, which rootless Podman requires. When a container root process like YUM attempts to create a file owned by a different UID, NFS Server denies the creation. NFS is also a problem for the file locks when the storage is on it. Other distributed file systems (for example: Lustre, Spectrum Scale, the General Parallel File System (GPFS)) are also not supported when running in rootless mode as these file systems do not understand user namespace.
+
+#### Symptom
+```console
+$ buildah build .
+ERRO[0014] Error while applying layer: ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
+error creating build container: Error committing the finished image: error adding layer with blob "sha256:a02a4930cb5d36f3290eb84f4bfa30668ef2e9fe3a1fb73ec015fc58b9958b17": ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
+```
+
+#### Solution
+Choose one of the following:
+ * Setup containers/storage in a different directory, not on an NFS share.
+ * Otherwise just run buildah as root, via `sudo buildah`
+---
+### 6) Rootless buildah build fails when using OverlayFS:
+
+The Overlay file system (OverlayFS) requires the ability to call the `mknod` command when creating whiteout files
+when extracting an image. However, a rootless user does not have the privileges to use `mknod` in this capacity.
+
+#### Symptom
+```console
+buildah build --storage-driver overlay .
+STEP 1: FROM docker.io/ubuntu:xenial
+Getting image source signatures
+Copying blob edf72af6d627 done
+Copying blob 3e4f86211d23 done
+Copying blob 8d3eac894db4 done
+Copying blob f7277927d38a done
+Copying config 5e13f8dd4c done
+Writing manifest to image destination
+Storing signatures
+Error: error creating build container: Error committing the finished image: error adding layer with blob "sha256:8d3eac894db4dc4154377ad28643dfe6625ff0e54bcfa63e0d04921f1a8ef7f8": Error processing tar file(exit status 1): operation not permitted
+$ buildah build .
+ERRO[0014] Error while applying layer: ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
+error creating build container: Error committing the finished image: error adding layer with blob "sha256:a02a4930cb5d36f3290eb84f4bfa30668ef2e9fe3a1fb73ec015fc58b9958b17": ApplyLayer exit status 1 stdout: stderr: open /root/.bash_logout: permission denied
+```
+
+#### Solution
+Choose one of the following:
+ * Complete the build operation as a privileged user.
+ * Install and configure fuse-overlayfs.
+ * Install the fuse-overlayfs package for your Linux Distribution.
+ * Add `mount_program = "/usr/bin/fuse-overlayfs"` under `[storage.options]` in your `~/.config/containers/storage.conf` file.
+---
diff --git a/unmount.go b/unmount.go
new file mode 100644
index 0000000..66c8ce4
--- /dev/null
+++ b/unmount.go
@@ -0,0 +1,17 @@
+package buildah
+
+import "fmt"
+
+// Unmount unmounts a build container.
+func (b *Builder) Unmount() error {
+ _, err := b.store.Unmount(b.ContainerID, false)
+ if err != nil {
+ return fmt.Errorf("unmounting build container %q: %w", b.ContainerID, err)
+ }
+ b.MountPoint = ""
+ err = b.Save()
+ if err != nil {
+ return fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
+ }
+ return nil
+}
diff --git a/util.go b/util.go
new file mode 100644
index 0000000..ddc97cc
--- /dev/null
+++ b/util.go
@@ -0,0 +1,228 @@
+package buildah
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/buildah/copier"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/reexec"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/sirupsen/logrus"
+)
+
+// InitReexec is a wrapper for reexec.Init(). It should be called at
+// the start of main(), and if it returns true, main() should return
+// immediately.
+func InitReexec() bool {
+ return reexec.Init()
+}
+
+func copyStringStringMap(m map[string]string) map[string]string {
+ n := map[string]string{}
+ for k, v := range m {
+ n[k] = v
+ }
+ return n
+}
+
+func copyStringSlice(s []string) []string {
+ t := make([]string, len(s))
+ copy(t, s)
+ return t
+}
+
+func copyHistory(history []v1.History) []v1.History {
+ if len(history) == 0 {
+ return nil
+ }
+ h := make([]v1.History, 0, len(history))
+ for _, entry := range history {
+ created := entry.Created
+ if created != nil {
+ timestamp := *created
+ created = &timestamp
+ }
+ h = append(h, v1.History{
+ Created: created,
+ CreatedBy: entry.CreatedBy,
+ Author: entry.Author,
+ Comment: entry.Comment,
+ EmptyLayer: entry.EmptyLayer,
+ })
+ }
+ return h
+}
+
+func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) {
+ uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap))
+ gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap))
+ for _, m := range UIDMap {
+ uidmap = append(uidmap, rspec.LinuxIDMapping{
+ HostID: uint32(m.HostID),
+ ContainerID: uint32(m.ContainerID),
+ Size: uint32(m.Size),
+ })
+ }
+ for _, m := range GIDMap {
+ gidmap = append(gidmap, rspec.LinuxIDMapping{
+ HostID: uint32(m.HostID),
+ ContainerID: uint32(m.ContainerID),
+ Size: uint32(m.Size),
+ })
+ }
+ return uidmap, gidmap
+}
+
+func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMap, []idtools.IDMap) {
+ uidmap := make([]idtools.IDMap, 0, len(UIDMap))
+ gidmap := make([]idtools.IDMap, 0, len(GIDMap))
+ for _, m := range UIDMap {
+ uidmap = append(uidmap, idtools.IDMap{
+ HostID: int(m.HostID),
+ ContainerID: int(m.ContainerID),
+ Size: int(m.Size),
+ })
+ }
+ for _, m := range GIDMap {
+ gidmap = append(gidmap, idtools.IDMap{
+ HostID: int(m.HostID),
+ ContainerID: int(m.ContainerID),
+ Size: int(m.Size),
+ })
+ }
+ return uidmap, gidmap
+}
+
+// isRegistryBlocked checks if the named registry is marked as blocked
+func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
+ reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
+ if err != nil {
+ return false, fmt.Errorf("unable to parse the registries configuration (%s): %w", sysregistriesv2.ConfigPath(sc), err)
+ }
+ if reginfo != nil {
+ if reginfo.Blocked {
+ logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc))
+ } else {
+ logrus.Debugf("registry %q is not marked as blocked in registries configuration %q", registry, sysregistriesv2.ConfigPath(sc))
+ }
+ return reginfo.Blocked, nil
+ }
+ logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's not blocked", registry, sysregistriesv2.ConfigPath(sc))
+ return false, nil
+}
+
+// isReferenceSomething checks if the registry part of a reference is insecure or blocked
+func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) {
+ if ref != nil {
+ if named := ref.DockerReference(); named != nil {
+ if domain := reference.Domain(named); domain != "" {
+ return what(domain, sc)
+ }
+ }
+ }
+ return false, nil
+}
+
+// isReferenceBlocked checks if the registry part of a reference is blocked
+func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool, error) {
+ if ref != nil && ref.Transport() != nil {
+ switch ref.Transport().Name() {
+ case "docker":
+ return isReferenceSomething(ref, sc, isRegistryBlocked)
+ }
+ }
+ return false, nil
+}
+
+// ReserveSELinuxLabels reads containers storage and reserves SELinux contexts
+// which are already being used by buildah containers.
+func ReserveSELinuxLabels(store storage.Store, id string) error {
+ if selinuxGetEnabled() {
+ containers, err := store.Containers()
+ if err != nil {
+ return fmt.Errorf("getting list of containers: %w", err)
+ }
+
+ for _, c := range containers {
+ if id == c.ID {
+ continue
+ } else {
+ b, err := OpenBuilder(store, c.ID)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // Ignore not exist errors since containers probably created by other tool
+ // TODO, we need to read other containers json data to reserve their SELinux labels
+ continue
+ }
+ return err
+ }
+ // Prevent different containers from using same MCS label
+ if err := label.ReserveLabel(b.ProcessLabel); err != nil {
+ return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// IsContainer identifies if the specified container id is a buildah container
+// in the specified store.
+func IsContainer(id string, store storage.Store) (bool, error) {
+ cdir, err := store.ContainerDirectory(id)
+ if err != nil {
+ return false, err
+ }
+ // Assuming that if the stateFile exists, that this is a Buildah
+ // container.
+ if _, err = os.Stat(filepath.Join(cdir, stateFile)); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
+// Copy content from the directory "src" to the directory "dest", ensuring that
+// content from outside of "root" (which is a parent of "src" or "src" itself)
+// isn't read.
+func extractWithTar(root, src, dest string) error {
+ var getErr, putErr error
+ var wg sync.WaitGroup
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ wg.Add(1)
+ go func() {
+ getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter)
+ pipeWriter.Close()
+ wg.Done()
+ }()
+ wg.Add(1)
+ go func() {
+ putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader)
+ pipeReader.Close()
+ wg.Done()
+ }()
+ wg.Wait()
+
+ if getErr != nil {
+ return fmt.Errorf("reading %q: %w", src, getErr)
+ }
+ if putErr != nil {
+ return fmt.Errorf("copying contents of %q to %q: %w", src, dest, putErr)
+ }
+ return nil
+}
diff --git a/util/types.go b/util/types.go
new file mode 100644
index 0000000..91c9ace
--- /dev/null
+++ b/util/types.go
@@ -0,0 +1,20 @@
+package util
+
+import (
+ "github.com/containers/buildah/define"
+)
+
+const (
+ // DefaultRuntime if containers.conf fails.
+ DefaultRuntime = define.DefaultRuntime
+)
+
+var (
+ // Deprecated: DefaultCapabilities values should be retrieved from
+ // github.com/containers/common/pkg/config
+ DefaultCapabilities = define.DefaultCapabilities //nolint
+
+ // Deprecated: DefaultNetworkSysctl values should be retrieved from
+ // github.com/containers/common/pkg/config
+ DefaultNetworkSysctl = define.DefaultNetworkSysctl //nolint
+)
diff --git a/util/util.go b/util/util.go
new file mode 100644
index 0000000..bec8614
--- /dev/null
+++ b/util/util.go
@@ -0,0 +1,475 @@
+package util
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/util"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/shortnames"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ minimumTruncatedIDLength = 3
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
+)
+
+var (
+ // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
+ // to prepend to image names that only contain a single path component.
+ RegistryDefaultPathPrefix = map[string]string{
+ "index.docker.io": "library",
+ "docker.io": "library",
+ }
+)
+
+// StringInSlice is deprecated, use github.com/containers/common/pkg/util.StringInSlice
+func StringInSlice(s string, slice []string) bool {
+ return util.StringInSlice(s, slice)
+}
+
+// resolveName checks if name is a valid image name, and if that name doesn't
+// include a domain portion, returns a list of the names which it might
+// correspond to in the set of configured registries, and the transport used to
+// pull the image.
+//
+// The returned image names never include a transport: prefix, and if transport != "",
+// (transport, image) should be a valid input to alltransports.ParseImageName.
+// transport == "" indicates that image that already exists in a local storage,
+// and the name is valid for store.Image() / storage.Transport.ParseStoreReference().
+//
+// NOTE: The "list of search registries is empty" check does not count blocked registries,
+// and neither the implied "localhost" nor a possible firstRegistry are counted
+func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) {
+ if name == "" {
+ return nil, "", nil
+ }
+
+ // Maybe it's a truncated image ID. Don't prepend a registry name, then.
+ if len(name) >= minimumTruncatedIDLength {
+ if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
+ // It's a truncated version of the ID of an image that's present in local storage;
+ // we need only expand the ID.
+ return []string{img.ID}, "", nil
+ }
+ }
+ // If we're referring to an image by digest, it *must* be local and we
+ // should not have any fall through/back logic.
+ if strings.HasPrefix(name, "sha256:") {
+ d, err := digest.Parse(name)
+ if err != nil {
+ return nil, "", err
+ }
+ img, err := store.Image(d.Encoded())
+ if err != nil {
+ return nil, "", err
+ }
+ return []string{img.ID}, "", nil
+ }
+
+ // Transports are not supported for local image look ups.
+ srcRef, err := alltransports.ParseImageName(name)
+ if err == nil {
+ return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil
+ }
+
+ var candidates []string
+ // Local short-name resolution.
+ namedCandidates, err := shortnames.ResolveLocally(sc, name)
+ if err != nil {
+ return nil, "", err
+ }
+ for _, named := range namedCandidates {
+ candidates = append(candidates, named.String())
+ }
+
+ return candidates, DefaultTransport, nil
+}
+
+// ExpandNames takes unqualified names, parses them as image names, and returns
+// the fully expanded result, including a tag. Names which don't include a registry
+// name will be marked for the most-preferred registry (i.e., the first one in our
+// configuration).
+func ExpandNames(names []string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
+ expanded := make([]string, 0, len(names))
+ for _, n := range names {
+ var name reference.Named
+ nameList, _, err := resolveName(n, systemContext, store)
+ if err != nil {
+ return nil, fmt.Errorf("parsing name %q: %w", n, err)
+ }
+ if len(nameList) == 0 {
+ named, err := reference.ParseNormalizedNamed(n)
+ if err != nil {
+ return nil, fmt.Errorf("parsing name %q: %w", n, err)
+ }
+ name = named
+ } else {
+ named, err := reference.ParseNormalizedNamed(nameList[0])
+ if err != nil {
+ return nil, fmt.Errorf("parsing name %q: %w", nameList[0], err)
+ }
+ name = named
+ }
+ name = reference.TagNameOnly(name)
+ expanded = append(expanded, name.String())
+ }
+ return expanded, nil
+}
+
+// FindImage locates the locally-stored image which corresponds to a given name.
+// Please note that the `firstRegistry` argument has been deprecated and has no
+// effect anymore.
+func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ localImage, _, err := runtime.LookupImage(image, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ ref, err := localImage.StorageReference()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return ref, localImage.StorageImage(), nil
+}
+
+// resolveNameToReferences tries to create a list of possible references
+// (including their transports) from the provided image name.
+func ResolveNameToReferences(
+ store storage.Store,
+ systemContext *types.SystemContext,
+ image string,
+) (refs []types.ImageReference, err error) {
+ names, transport, err := resolveName(image, systemContext, store)
+ if err != nil {
+ return nil, fmt.Errorf("parsing name %q: %w", image, err)
+ }
+
+ if transport != DefaultTransport {
+ transport += ":"
+ }
+
+ for _, name := range names {
+ ref, err := alltransports.ParseImageName(transport + name)
+ if err != nil {
+ logrus.Debugf("error parsing reference to image %q: %v", name, err)
+ continue
+ }
+ refs = append(refs, ref)
+ }
+ if len(refs) == 0 {
+ return nil, fmt.Errorf("locating images with names %v", names)
+ }
+ return refs, nil
+}
+
+// AddImageNames adds the specified names to the specified image. Please note
+// that the `firstRegistry` argument has been deprecated and has no effect
+// anymore.
+func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return err
+ }
+
+ localImage, _, err := runtime.LookupImage(image.ID, nil)
+ if err != nil {
+ return err
+ }
+
+ for _, tag := range addNames {
+ if err := localImage.Tag(tag); err != nil {
+ return fmt.Errorf("tagging image %s: %w", image.ID, err)
+ }
+ }
+
+ return nil
+}
+
+// GetFailureCause checks the type of the error "err" and returns a new
+// error message that reflects the reason of the failure.
+// In case err type is not a familiar one the error "defaultError" is returned.
+func GetFailureCause(err, defaultError error) error {
+ switch nErr := err.(type) {
+ case errcode.Errors:
+ return err
+ case errcode.Error, *url.Error:
+ return nErr
+ default:
+ return defaultError
+ }
+}
+
+// WriteError writes `lastError` into `w` if not nil and return the next error `err`
+func WriteError(w io.Writer, err error, lastError error) error {
+ if lastError != nil {
+ fmt.Fprintln(w, lastError)
+ }
+ return err
+}
+
+// Runtime is the default command to use to run the container.
+func Runtime() string {
+ runtime := os.Getenv("BUILDAH_RUNTIME")
+ if runtime != "" {
+ return runtime
+ }
+
+ conf, err := config.Default()
+ if err != nil {
+ logrus.Warnf("Error loading container config when searching for local runtime: %v", err)
+ return define.DefaultRuntime
+ }
+ return conf.Engine.OCIRuntime
+}
+
+// GetContainerIDs uses ID mappings to compute the container-level IDs that will
+// correspond to a UID/GID pair on the host.
+func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
+ uidMapped := true
+ for _, m := range uidmap {
+ uidMapped = false
+ if uid >= m.HostID && uid < m.HostID+m.Size {
+ uid = (uid - m.HostID) + m.ContainerID
+ uidMapped = true
+ break
+ }
+ }
+ if !uidMapped {
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
+ }
+ gidMapped := true
+ for _, m := range gidmap {
+ gidMapped = false
+ if gid >= m.HostID && gid < m.HostID+m.Size {
+ gid = (gid - m.HostID) + m.ContainerID
+ gidMapped = true
+ break
+ }
+ }
+ if !gidMapped {
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
+ }
+ return uid, gid, nil
+}
+
+// GetHostIDs uses ID mappings to compute the host-level IDs that will
+// correspond to a UID/GID pair in the container.
+func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
+ uidMapped := true
+ for _, m := range uidmap {
+ uidMapped = false
+ if uid >= m.ContainerID && uid < m.ContainerID+m.Size {
+ uid = (uid - m.ContainerID) + m.HostID
+ uidMapped = true
+ break
+ }
+ }
+ if !uidMapped {
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
+ }
+ gidMapped := true
+ for _, m := range gidmap {
+ gidMapped = false
+ if gid >= m.ContainerID && gid < m.ContainerID+m.Size {
+ gid = (gid - m.ContainerID) + m.HostID
+ gidMapped = true
+ break
+ }
+ }
+ if !gidMapped {
+ return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
+ }
+ return uid, gid, nil
+}
+
+// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will
+// correspond to UID/GID 0/0 in the container.
+func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) {
+ if spec == nil || spec.Linux == nil {
+ return 0, 0, nil
+ }
+ return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0)
+}
+
+// GetPolicyContext sets up, initializes and returns a new context for the specified policy
+func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
+ policy, err := signature.DefaultPolicy(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return nil, err
+ }
+ return policyContext, nil
+}
+
+// logIfNotErrno logs the error message unless err is either nil or one of the
+// listed syscall.Errno values. It returns true if it logged an error.
+func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) {
+ if err == nil {
+ return false
+ }
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ for _, ignore := range ignores {
+ if errno == ignore {
+ return false
+ }
+ }
+ }
+ logrus.Error(what)
+ return true
+}
+
+// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN
+// syscall.Errno. Returns "true" if we can continue.
+func LogIfNotRetryable(err error, what string) (retry bool) {
+ return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN)
+}
+
+// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR
+// or EAGAIN or EIO syscall.Errno.
+func LogIfUnexpectedWhileDraining(err error, what string) {
+ logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO)
+}
+
+// TruncateString trims the given string to the provided maximum amount of
+// characters and shortens it with `...`.
+func TruncateString(str string, to int) string {
+ newStr := str
+ if len(str) > to {
+ const tr = "..."
+ if to > len(tr) {
+ to -= len(tr)
+ }
+ newStr = str[0:to] + tr
+ }
+ return newStr
+}
+
+var (
+ isUnifiedOnce sync.Once
+ isUnified bool
+ isUnifiedErr error
+)
+
+// fileExistsAndNotADir - Check to see if a file exists
+// and that it is not a directory.
+func fileExistsAndNotADir(path string) (bool, error) {
+ file, err := os.Stat(path)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return false, nil
+ }
+ return false, err
+ }
+ return !file.IsDir(), nil
+}
+
+// FindLocalRuntime find the local runtime of the
+// system searching through the config file for
+// possible locations.
+func FindLocalRuntime(runtime string) string {
+ var localRuntime string
+ conf, err := config.Default()
+ if err != nil {
+ logrus.Debugf("Error loading container config when searching for local runtime.")
+ return localRuntime
+ }
+ for _, val := range conf.Engine.OCIRuntimes[runtime] {
+ exists, err := fileExistsAndNotADir(val)
+ if err != nil {
+ logrus.Errorf("Failed to determine if file exists and is not a directory: %v", err)
+ }
+ if exists {
+ localRuntime = val
+ break
+ }
+ }
+ return localRuntime
+}
+
+// MergeEnv merges two lists of environment variables, avoiding duplicates.
+func MergeEnv(defaults, overrides []string) []string {
+ s := make([]string, 0, len(defaults)+len(overrides))
+ index := make(map[string]int)
+ for _, envSpec := range append(defaults, overrides...) {
+ envVar := strings.SplitN(envSpec, "=", 2)
+ if i, ok := index[envVar[0]]; ok {
+ s[i] = envSpec
+ continue
+ }
+ s = append(s, envSpec)
+ index[envVar[0]] = len(s) - 1
+ }
+ return s
+}
+
+type byDestination []specs.Mount
+
+func (m byDestination) Len() int {
+ return len(m)
+}
+
+func (m byDestination) Less(i, j int) bool {
+ iparts, jparts := m.parts(i), m.parts(j)
+ switch {
+ case iparts < jparts:
+ return true
+ case iparts > jparts:
+ return false
+ }
+ return filepath.Clean(m[i].Destination) < filepath.Clean(m[j].Destination)
+}
+
+func (m byDestination) Swap(i, j int) {
+ m[i], m[j] = m[j], m[i]
+}
+
+func (m byDestination) parts(i int) int {
+ return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
+}
+
+func SortMounts(m []specs.Mount) []specs.Mount {
+ sort.Stable(byDestination(m))
+ return m
+}
+
+func VerifyTagName(imageSpec string) (types.ImageReference, error) {
+ ref, err := alltransports.ParseImageName(imageSpec)
+ if err != nil {
+ if ref, err = alltransports.ParseImageName(DefaultTransport + imageSpec); err != nil {
+ return nil, err
+ }
+ }
+ return ref, nil
+}
diff --git a/util/util_linux.go b/util/util_linux.go
new file mode 100644
index 0000000..cca1f9e
--- /dev/null
+++ b/util/util_linux.go
@@ -0,0 +1,20 @@
+package util
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
+func IsCgroup2UnifiedMode() (bool, error) {
+ isUnifiedOnce.Do(func() {
+ var st syscall.Statfs_t
+ if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil {
+ isUnified, isUnifiedErr = false, err
+ } else {
+ isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil
+ }
+ })
+ return isUnified, isUnifiedErr
+}
diff --git a/util/util_test.go b/util/util_test.go
new file mode 100644
index 0000000..5ef05e0
--- /dev/null
+++ b/util/util_test.go
@@ -0,0 +1,113 @@
+package util
+
+import (
+ "os"
+ "strconv"
+ "testing"
+
+ "github.com/containers/common/pkg/config"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMergeEnv(t *testing.T) {
+ tests := [][3][]string{
+ {
+ []string{"A=B", "B=C", "C=D"},
+ nil,
+ []string{"A=B", "B=C", "C=D"},
+ },
+ {
+ nil,
+ []string{"A=B", "B=C", "C=D"},
+ []string{"A=B", "B=C", "C=D"},
+ },
+ {
+ []string{"A=B", "B=C", "C=D", "E=F"},
+ []string{"B=O", "F=G"},
+ []string{"A=B", "B=O", "C=D", "E=F", "F=G"},
+ },
+ }
+ for i, test := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ result := MergeEnv(test[0], test[1])
+ if len(result) != len(test[2]) {
+ t.Fatalf("expected %v, got %v", test[2], result)
+ }
+ for i := range result {
+ if result[i] != test[2][i] {
+ t.Fatalf("expected %v, got %v", test[2], result)
+ }
+ }
+ })
+ }
+}
+
+func TestRuntime(t *testing.T) {
+ os.Setenv("CONTAINERS_CONF", "/dev/null")
+ conf, _ := config.Default()
+ defaultRuntime := conf.Engine.OCIRuntime
+ runtime := Runtime()
+ if runtime != defaultRuntime {
+ t.Fatalf("expected %v, got %v", runtime, defaultRuntime)
+ }
+ defaultRuntime = "myoci"
+ os.Setenv("BUILDAH_RUNTIME", defaultRuntime)
+ runtime = Runtime()
+ if runtime != defaultRuntime {
+ t.Fatalf("expected %v, got %v", runtime, defaultRuntime)
+ }
+}
+
+func TestMountsSort(t *testing.T) {
+ mounts1a := []specs.Mount{
+ {
+ Source: "/a/bb/c",
+ Destination: "/a/bb/c",
+ },
+ {
+ Source: "/a/b/c",
+ Destination: "/a/b/c",
+ },
+ {
+ Source: "/a",
+ Destination: "/a",
+ },
+ {
+ Source: "/a/b",
+ Destination: "/a/b",
+ },
+ {
+ Source: "/d/e",
+ Destination: "/a/c",
+ },
+ {
+ Source: "/b",
+ Destination: "/b",
+ },
+ {
+ Source: "/",
+ Destination: "/",
+ },
+ {
+ Source: "/a/b/c",
+ Destination: "/aa/b/c",
+ },
+ }
+ mounts1b := []string{
+ "/",
+ "/a",
+ "/b",
+ "/a/b",
+ "/a/c",
+ "/a/b/c",
+ "/a/bb/c",
+ "/aa/b/c",
+ }
+ sorted := SortMounts(mounts1a)
+ sortedDests := make([]string, len(mounts1a))
+ for i := range sorted {
+ sortedDests[i] = sorted[i].Destination
+ }
+ assert.Equalf(t, mounts1b, sortedDests, "sort returned results in unexpected by-destination order")
+}
diff --git a/util/util_unix.go b/util/util_unix.go
new file mode 100644
index 0000000..85fc6dd
--- /dev/null
+++ b/util/util_unix.go
@@ -0,0 +1,17 @@
+//go:build linux || darwin || freebsd
+// +build linux darwin freebsd
+
+package util
+
+import (
+ "os"
+ "syscall"
+)
+
+func UID(st os.FileInfo) int {
+ return int(st.Sys().(*syscall.Stat_t).Uid)
+}
+
+func GID(st os.FileInfo) int {
+ return int(st.Sys().(*syscall.Stat_t).Gid)
+}
diff --git a/util/util_unsupported.go b/util/util_unsupported.go
new file mode 100644
index 0000000..05a68f6
--- /dev/null
+++ b/util/util_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux
+
+package util
+
+// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
+func IsCgroup2UnifiedMode() (bool, error) {
+ return false, nil
+}
diff --git a/util/util_windows.go b/util/util_windows.go
new file mode 100644
index 0000000..d11e894
--- /dev/null
+++ b/util/util_windows.go
@@ -0,0 +1,16 @@
+//go:build !linux && !darwin
+// +build !linux,!darwin
+
+package util
+
+import (
+ "os"
+)
+
+func UID(st os.FileInfo) int {
+ return 0
+}
+
+func GID(st os.FileInfo) int {
+ return 0
+}