summaryrefslogtreecommitdiffstats
path: root/pkg
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
commit9ec46d47bedefa10bdaaa8a587ddb1851ef396ec (patch)
treeba7545ee99b384a6fc3e5ea028ae4c643648d683 /pkg
parentInitial commit. (diff)
downloadgolang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.tar.xz
golang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.zip
Adding upstream version 1.33.5+ds1.upstream/1.33.5+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'pkg')
-rw-r--r--pkg/blobcache/blobcache.go31
-rw-r--r--pkg/chrootuser/user.go116
-rw-r--r--pkg/chrootuser/user_basic.go32
-rw-r--r--pkg/chrootuser/user_test.go40
-rw-r--r--pkg/chrootuser/user_unix.go314
-rw-r--r--pkg/cli/build.go477
-rw-r--r--pkg/cli/common.go584
-rw-r--r--pkg/cli/common_test.go142
-rw-r--r--pkg/cli/exec_codes.go13
-rw-r--r--pkg/completion/completion.go23
-rw-r--r--pkg/dummy/dummy_test.go8
-rw-r--r--pkg/formats/formats.go166
-rw-r--r--pkg/formats/formats_test.go44
-rw-r--r--pkg/formats/templates.go82
-rw-r--r--pkg/jail/jail.go180
-rw-r--r--pkg/jail/jail_int32.go20
-rw-r--r--pkg/jail/jail_int64.go19
-rw-r--r--pkg/manifests/compat.go28
-rw-r--r--pkg/overlay/overlay.go242
-rw-r--r--pkg/overlay/overlay_freebsd.go31
-rw-r--r--pkg/overlay/overlay_linux.go80
-rw-r--r--pkg/parse/parse.go1198
-rw-r--r--pkg/parse/parse_test.go224
-rw-r--r--pkg/parse/parse_unix.go49
-rw-r--r--pkg/parse/parse_unsupported.go18
-rw-r--r--pkg/rusage/rusage.go48
-rw-r--r--pkg/rusage/rusage_test.go48
-rw-r--r--pkg/rusage/rusage_unix.go35
-rw-r--r--pkg/rusage/rusage_unsupported.go18
-rw-r--r--pkg/sshagent/sshagent.go254
-rw-r--r--pkg/sshagent/sshagent_test.go55
-rw-r--r--pkg/supplemented/compat.go26
-rw-r--r--pkg/umask/umask.go13
-rw-r--r--pkg/util/resource_unix.go38
-rw-r--r--pkg/util/resource_unix_test.go32
-rw-r--r--pkg/util/resource_windows.go16
-rw-r--r--pkg/util/test/test1/Containerfile1
-rw-r--r--pkg/util/test/test1/Dockerfile1
-rw-r--r--pkg/util/test/test2/Dockerfile1
-rw-r--r--pkg/util/uptime_darwin.go10
-rw-r--r--pkg/util/uptime_freebsd.go25
-rw-r--r--pkg/util/uptime_linux.go28
-rw-r--r--pkg/util/uptime_windows.go10
-rw-r--r--pkg/util/util.go82
-rw-r--r--pkg/util/util_test.go32
-rw-r--r--pkg/util/version_unix.go19
-rw-r--r--pkg/util/version_windows.go10
-rw-r--r--pkg/volumes/volumes.go13
48 files changed, 4976 insertions, 0 deletions
diff --git a/pkg/blobcache/blobcache.go b/pkg/blobcache/blobcache.go
new file mode 100644
index 0000000..fa60619
--- /dev/null
+++ b/pkg/blobcache/blobcache.go
@@ -0,0 +1,31 @@
+package blobcache
+
+import (
+ imageBlobCache "github.com/containers/image/v5/pkg/blobcache"
+ "github.com/containers/image/v5/types"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+type BlobCache interface {
+ types.ImageReference
+ // HasBlob checks if a blob that matches the passed-in digest (and
+ // size, if not -1), is present in the cache.
+ HasBlob(types.BlobInfo) (bool, int64, error)
+ // Directories returns the list of cache directories.
+ Directory() string
+ // ClearCache() clears the contents of the cache directories. Note
+ // that this also clears content which was not placed there by this
+ // cache implementation.
+ ClearCache() error
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specified directory or a temporary directory.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) {
+ return imageBlobCache.NewBlobCache(ref, directory, compress)
+}
diff --git a/pkg/chrootuser/user.go b/pkg/chrootuser/user.go
new file mode 100644
index 0000000..4614ecf
--- /dev/null
+++ b/pkg/chrootuser/user.go
@@ -0,0 +1,116 @@
+package chrootuser
+
+import (
+ "errors"
+ "fmt"
+ "os/user"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ErrNoSuchUser indicates that the user provided by the caller does not
+ // exist in /etc/passws
+ ErrNoSuchUser = errors.New("user does not exist in /etc/passwd")
+)
+
+// GetUser will return the uid, gid of the user specified in the userspec
+// it will use the /etc/passwd and /etc/group files inside of the rootdir
+// to return this information.
+// userspec format [user | user:group | uid | uid:gid | user:gid | uid:group ]
+func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
+ var gid64 uint64
+ var gerr error = user.UnknownGroupError("error looking up group")
+
+ spec := strings.SplitN(userspec, ":", 2)
+ userspec = spec[0]
+ groupspec := ""
+
+ if userspec == "" {
+ userspec = "0"
+ }
+
+ if len(spec) > 1 {
+ groupspec = spec[1]
+ }
+
+ uid64, uerr := strconv.ParseUint(userspec, 10, 32)
+ if uerr == nil && groupspec == "" {
+ // We parsed the user name as a number, and there's no group
+ // component, so try to look up the primary GID of the user who
+ // has this UID.
+ var name string
+ name, gid64, gerr = lookupGroupForUIDInContainer(rootdir, uid64)
+ if gerr == nil {
+ userspec = name
+ } else {
+ // Leave userspec alone, but swallow the error and just
+ // use GID 0.
+ gid64 = 0
+ gerr = nil
+ }
+ }
+ if uerr != nil {
+ // The user ID couldn't be parsed as a number, so try to look
+ // up the user's UID and primary GID.
+ uid64, gid64, uerr = lookupUserInContainer(rootdir, userspec)
+ gerr = uerr
+ }
+
+ if groupspec != "" {
+ // We have a group name or number, so parse it.
+ gid64, gerr = strconv.ParseUint(groupspec, 10, 32)
+ if gerr != nil {
+ // The group couldn't be parsed as a number, so look up
+ // the group's GID.
+ gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
+ }
+ }
+
+ homedir, err := lookupHomedirInContainer(rootdir, uid64)
+ if err != nil {
+ homedir = "/"
+ }
+
+ if uerr == nil && gerr == nil {
+ return uint32(uid64), uint32(gid64), homedir, nil
+ }
+
+ err = fmt.Errorf("determining run uid: %w", uerr)
+ if uerr == nil {
+ err = fmt.Errorf("determining run gid: %w", gerr)
+ }
+
+ return 0, 0, homedir, err
+}
+
+// GetGroup returns the gid by looking it up in the /etc/group file
+// groupspec format [ group | gid ]
+func GetGroup(rootdir, groupspec string) (uint32, error) {
+ gid64, gerr := strconv.ParseUint(groupspec, 10, 32)
+ if gerr != nil {
+ // The group couldn't be parsed as a number, so look up
+ // the group's GID.
+ gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
+ }
+ if gerr != nil {
+ return 0, fmt.Errorf("looking up group for gid %q: %w", groupspec, gerr)
+ }
+ return uint32(gid64), nil
+}
+
+// GetAdditionalGroupsForUser returns a list of gids that userid is associated with
+func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) {
+ gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid)
+ if err != nil {
+ return nil, fmt.Errorf("looking up supplemental groups for uid %d: %w", userid, err)
+ }
+ return gids, nil
+}
+
+// LookupUIDInContainer returns username and gid associated with a UID in a container
+// it will use the /etc/passwd files inside of the rootdir
+// to return this information.
+func LookupUIDInContainer(rootdir string, uid uint64) (user string, gid uint64, err error) {
+ return lookupUIDInContainer(rootdir, uid)
+}
diff --git a/pkg/chrootuser/user_basic.go b/pkg/chrootuser/user_basic.go
new file mode 100644
index 0000000..5655a54
--- /dev/null
+++ b/pkg/chrootuser/user_basic.go
@@ -0,0 +1,32 @@
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
+
+package chrootuser
+
+import (
+ "errors"
+)
+
+func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) {
+ return 0, 0, errors.New("user lookup not supported")
+}
+
+func lookupGroupInContainer(rootdir, groupname string) (uint64, error) {
+ return 0, errors.New("group lookup not supported")
+}
+
+func lookupGroupForUIDInContainer(rootdir string, userid uint64) (string, uint64, error) {
+ return "", 0, errors.New("primary group lookup by uid not supported")
+}
+
+func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) {
+ return nil, errors.New("supplemental groups list lookup by uid not supported")
+}
+
+func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
+ return "", 0, errors.New("UID lookup not supported")
+}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ return "", errors.New("Home directory lookup not supported")
+}
diff --git a/pkg/chrootuser/user_test.go b/pkg/chrootuser/user_test.go
new file mode 100644
index 0000000..6b0dafa
--- /dev/null
+++ b/pkg/chrootuser/user_test.go
@@ -0,0 +1,40 @@
+package chrootuser
+
+import (
+ "bufio"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var testGroupData = `# comment
+ # indented comment
+wheel:*:0:root
+daemon:*:1:
+kmem:*:2:
+`
+
+func TestParseStripComments(t *testing.T) {
+ // Test reading group file, ignoring comment lines
+ rc := bufio.NewScanner(strings.NewReader(testGroupData))
+ line, ok := scanWithoutComments(rc)
+ assert.Equal(t, ok, true)
+ assert.Equal(t, line, "wheel:*:0:root")
+}
+
+func TestParseNextGroup(t *testing.T) {
+ // Test parsing group file
+ rc := bufio.NewScanner(strings.NewReader(testGroupData))
+ expected := []lookupGroupEntry{
+ lookupGroupEntry{"wheel", 0, "root"},
+ lookupGroupEntry{"daemon", 1, ""},
+ lookupGroupEntry{"kmem", 2, ""},
+ }
+ for _, exp := range expected {
+ grp := parseNextGroup(rc)
+ assert.NotNil(t, grp)
+ assert.Equal(t, *grp, exp)
+ }
+ assert.Nil(t, parseNextGroup(rc))
+}
diff --git a/pkg/chrootuser/user_unix.go b/pkg/chrootuser/user_unix.go
new file mode 100644
index 0000000..0ccaf8a
--- /dev/null
+++ b/pkg/chrootuser/user_unix.go
@@ -0,0 +1,314 @@
+//go:build linux || freebsd
+// +build linux freebsd
+
+package chrootuser
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/containers/storage/pkg/reexec"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ openChrootedCommand = "chrootuser-open"
+)
+
+func init() {
+ reexec.Register(openChrootedCommand, openChrootedFileMain)
+}
+
+func openChrootedFileMain() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v", err)
+ os.Exit(1)
+ }
+ // Anything else is a file we want to dump out.
+ for _, filename := range flag.Args()[1:] {
+ f, err := os.Open(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "open(%q): %v", filename, err)
+ status = 1
+ continue
+ }
+ _, err = io.Copy(os.Stdout, f)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "read(%q): %v", filename, err)
+ }
+ f.Close()
+ }
+ os.Exit(status)
+}
+
+func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error) {
+ // The child process expects a chroot and one or more filenames that
+ // will be consulted relative to the chroot directory and concatenated
+ // to its stdout. Start it up.
+ cmd := reexec.Command(openChrootedCommand, rootdir, filename)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+ err = cmd.Start()
+ if err != nil {
+ return nil, nil, err
+ }
+ // Hand back the child's stdout for reading, and the child to reap.
+ return cmd, stdout, nil
+}
+
+var (
+ lookupUser, lookupGroup sync.Mutex
+)
+
+type lookupPasswdEntry struct {
+ name string
+ uid uint64
+ gid uint64
+ home string
+}
+type lookupGroupEntry struct {
+ name string
+ gid uint64
+ user string
+}
+
+func scanWithoutComments(rc *bufio.Scanner) (string, bool) {
+ for {
+ if !rc.Scan() {
+ return "", false
+ }
+ line := rc.Text()
+ if strings.HasPrefix(strings.TrimSpace(line), "#") {
+ continue
+ }
+ return line, true
+ }
+}
+
+func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry {
+ if !rc.Scan() {
+ return nil
+ }
+ line := rc.Text()
+ fields := strings.Split(line, ":")
+ if len(fields) != 7 {
+ return nil
+ }
+ uid, err := strconv.ParseUint(fields[2], 10, 32)
+ if err != nil {
+ return nil
+ }
+ gid, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil
+ }
+ return &lookupPasswdEntry{
+ name: fields[0],
+ uid: uid,
+ gid: gid,
+ home: fields[5],
+ }
+}
+
+func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry {
+ // On FreeBSD, /etc/group may contain comments:
+ // https://man.freebsd.org/cgi/man.cgi?query=group&sektion=5&format=html
+ // We need to ignore those lines rather than trying to parse them.
+ line, ok := scanWithoutComments(rc)
+ if !ok {
+ return nil
+ }
+ fields := strings.Split(line, ":")
+ if len(fields) != 4 {
+ return nil
+ }
+ gid, err := strconv.ParseUint(fields[2], 10, 32)
+ if err != nil {
+ return nil
+ }
+ return &lookupGroupEntry{
+ name: fields[0],
+ gid: gid,
+ user: fields[3],
+ }
+}
+
+func lookupUserInContainer(rootdir, username string) (uid uint64, gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return 0, 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.name != username {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.uid, pwd.gid, nil
+ }
+
+ return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username))
+}
+
+func lookupGroupForUIDInContainer(rootdir string, userid uint64) (username string, gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != userid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.name, pwd.gid, nil
+ }
+
+ return "", 0, ErrNoSuchUser
+}
+
+func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) {
+ // Get the username associated with userid
+ username, _, err := lookupGroupForUIDInContainer(rootdir, userid)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd, f, err := openChrootedFile(rootdir, "/etc/group")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupGroup.Lock()
+ defer lookupGroup.Unlock()
+
+ grp := parseNextGroup(rc)
+ for grp != nil {
+ if strings.Contains(grp.user, username) {
+ gid = append(gid, uint32(grp.gid))
+ }
+ grp = parseNextGroup(rc)
+ }
+ return gid, nil
+}
+
+func lookupGroupInContainer(rootdir, groupname string) (gid uint64, err error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/group")
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupGroup.Lock()
+ defer lookupGroup.Unlock()
+
+ grp := parseNextGroup(rc)
+ for grp != nil {
+ if grp.name != groupname {
+ grp = parseNextGroup(rc)
+ continue
+ }
+ return grp.gid, nil
+ }
+
+ return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname))
+}
+
+func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", 0, err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != uid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.name, pwd.gid, nil
+ }
+
+ return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up uid %q", uid))
+}
+
+func lookupHomedirInContainer(rootdir string, uid uint64) (string, error) {
+ cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ _ = cmd.Wait()
+ }()
+ rc := bufio.NewScanner(f)
+ defer f.Close()
+
+ lookupUser.Lock()
+ defer lookupUser.Unlock()
+
+ pwd := parseNextPasswd(rc)
+ for pwd != nil {
+ if pwd.uid != uid {
+ pwd = parseNextPasswd(rc)
+ continue
+ }
+ return pwd.home, nil
+ }
+
+ return "", user.UnknownUserError(fmt.Sprintf("error looking up uid %q for homedir", uid))
+}
diff --git a/pkg/cli/build.go b/pkg/cli/build.go
new file mode 100644
index 0000000..e58e755
--- /dev/null
+++ b/pkg/cli/build.go
@@ -0,0 +1,477 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/buildah/pkg/util"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type BuildOptions struct {
+ *LayerResults
+ *BudResults
+ *UserNSResults
+ *FromAndBudResults
+ *NameSpaceResults
+ Logwriter *os.File
+}
+
+const (
+ MaxPullPushRetries = 3
+ PullPushRetryDelay = 2 * time.Second
+)
+
+// GenBuildOptions translates command line flags into a BuildOptions structure
+func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (define.BuildOptions, []string, []string, error) {
+ options := define.BuildOptions{}
+
+ var removeAll []string
+
+ output := ""
+ cleanTmpFile := false
+ tags := []string{}
+ if iopts.Network == "none" {
+ if c.Flag("dns").Changed {
+ return options, nil, nil, errors.New("the --dns option cannot be used with --network=none")
+ }
+ if c.Flag("dns-option").Changed {
+ return options, nil, nil, errors.New("the --dns-option option cannot be used with --network=none")
+ }
+ if c.Flag("dns-search").Changed {
+ return options, nil, nil, errors.New("the --dns-search option cannot be used with --network=none")
+ }
+
+ }
+ if c.Flag("tag").Changed {
+ tags = iopts.Tag
+ if len(tags) > 0 {
+ output = tags[0]
+ tags = tags[1:]
+ }
+ if c.Flag("manifest").Changed {
+ for _, tag := range tags {
+ if tag == iopts.Manifest {
+ return options, nil, nil, errors.New("the same name must not be specified for both '--tag' and '--manifest'")
+ }
+ }
+ }
+ }
+ if err := auth.CheckAuthFile(iopts.BudResults.Authfile); err != nil {
+ return options, nil, nil, err
+ }
+
+ if c.Flag("logsplit").Changed {
+ if !c.Flag("logfile").Changed {
+ return options, nil, nil, errors.New("cannot use --logsplit without --logfile")
+ }
+ }
+
+ iopts.BudResults.Authfile, cleanTmpFile = util.MirrorToTempFileIfPathIsDescriptor(iopts.BudResults.Authfile)
+ if cleanTmpFile {
+ removeAll = append(removeAll, iopts.BudResults.Authfile)
+ }
+
+ // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
+ // --pull-always and --pull-never. The --pull-never and --pull-always options
+ // will not be documented.
+ pullPolicy := define.PullIfMissing
+ if strings.EqualFold(strings.TrimSpace(iopts.Pull), "true") {
+ pullPolicy = define.PullIfNewer
+ }
+ if iopts.PullAlways || strings.EqualFold(strings.TrimSpace(iopts.Pull), "always") {
+ pullPolicy = define.PullAlways
+ }
+ if iopts.PullNever || strings.EqualFold(strings.TrimSpace(iopts.Pull), "never") {
+ pullPolicy = define.PullNever
+ }
+ logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
+
+ args := make(map[string]string)
+ if c.Flag("build-arg-file").Changed {
+ for _, argfile := range iopts.BuildArgFile {
+ if err := readBuildArgFile(argfile, args); err != nil {
+ return options, nil, nil, err
+ }
+ }
+ }
+ if c.Flag("build-arg").Changed {
+ for _, arg := range iopts.BuildArg {
+ readBuildArg(arg, args)
+ }
+ }
+
+ additionalBuildContext := make(map[string]*define.AdditionalBuildContext)
+ if c.Flag("build-context").Changed {
+ for _, contextString := range iopts.BuildContext {
+ av := strings.SplitN(contextString, "=", 2)
+ if len(av) > 1 {
+ parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err)
+ }
+ additionalBuildContext[av[0]] = &parseAdditionalBuildContext
+ } else {
+ return options, nil, nil, fmt.Errorf("while parsing additional build context: %q, accepts value in the form of key=value", av)
+ }
+ }
+ }
+
+ containerfiles := getContainerfiles(iopts.File)
+ format, err := GetFormat(iopts.Format)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ layers := UseLayers()
+ if c.Flag("layers").Changed {
+ layers = iopts.Layers
+ }
+ contextDir := ""
+ cliArgs := inputArgs
+
+ // Nothing provided, we assume the current working directory as build
+ // context
+ if len(cliArgs) == 0 {
+ contextDir, err = os.Getwd()
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to choose current working directory as build context: %w", err)
+ }
+ } else {
+ // The context directory could be a URL. Try to handle that.
+ tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("prepping temporary context directory: %w", err)
+ }
+ if tempDir != "" {
+ // We had to download it to a temporary directory.
+ // Delete it later.
+ removeAll = append(removeAll, tempDir)
+ contextDir = filepath.Join(tempDir, subDir)
+ } else {
+ // Nope, it was local. Use it as is.
+ absDir, err := filepath.Abs(cliArgs[0])
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("determining path to directory: %w", err)
+ }
+ contextDir = absDir
+ }
+ }
+
+ if len(containerfiles) == 0 {
+ // Try to find the Containerfile/Dockerfile within the contextDir
+ containerfile, err := util.DiscoverContainerfile(contextDir)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ containerfiles = append(containerfiles, containerfile)
+ contextDir = filepath.Dir(containerfile)
+ }
+
+ contextDir, err = filepath.EvalSymlinks(contextDir)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("evaluating symlinks in build context path: %w", err)
+ }
+
+ var stdin io.Reader
+ if iopts.Stdin {
+ stdin = os.Stdin
+ }
+
+ var stdout, stderr, reporter *os.File
+ stdout = os.Stdout
+ stderr = os.Stderr
+ reporter = os.Stderr
+ if iopts.Logwriter != nil {
+ logrus.SetOutput(iopts.Logwriter)
+ stdout = iopts.Logwriter
+ stderr = iopts.Logwriter
+ reporter = iopts.Logwriter
+ }
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("building system context: %w", err)
+ }
+
+ isolation, err := parse.IsolationOption(iopts.Isolation)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ runtimeFlags := []string{}
+ for _, arg := range iopts.RuntimeFlags {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+
+ commonOpts, err := parse.CommonBuildOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ pullFlagsCount := 0
+ if c.Flag("pull").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-always").Changed {
+ pullFlagsCount++
+ }
+ if c.Flag("pull-never").Changed {
+ pullFlagsCount++
+ }
+
+ if pullFlagsCount > 1 {
+ return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
+ }
+
+ if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
+ return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
+ }
+
+ if c.Flag("compress").Changed {
+ logrus.Debugf("--compress option specified but is ignored")
+ }
+
+ compression := define.Gzip
+ if iopts.DisableCompression {
+ compression = define.Uncompressed
+ }
+
+ if c.Flag("disable-content-trust").Changed {
+ logrus.Debugf("--disable-content-trust option specified but is ignored")
+ }
+
+ namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("parsing ID mapping options: %w", err)
+ }
+ namespaceOptions.AddOrReplace(usernsOption...)
+
+ platforms, err := parse.PlatformsFromOptions(c)
+ if err != nil {
+ return options, nil, nil, err
+ }
+
+ decryptConfig, err := DecryptConfig(iopts.DecryptionKeys)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
+ }
+
+ var excludes []string
+ if iopts.IgnoreFile != "" {
+ if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile, containerfiles); err != nil {
+ return options, nil, nil, err
+ }
+ }
+ var timestamp *time.Time
+ if c.Flag("timestamp").Changed {
+ t := time.Unix(iopts.Timestamp, 0).UTC()
+ timestamp = &t
+ }
+ if c.Flag("output").Changed {
+ buildOption, err := parse.GetBuildOutput(iopts.BuildOutput)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ if buildOption.IsStdout {
+ iopts.Quiet = true
+ }
+ }
+ var confidentialWorkloadOptions define.ConfidentialWorkloadOptions
+ if c.Flag("cw").Changed {
+ confidentialWorkloadOptions, err = parse.GetConfidentialWorkloadOptions(iopts.CWOptions)
+ if err != nil {
+ return options, nil, nil, err
+ }
+ }
+ var cacheTo []reference.Named
+ var cacheFrom []reference.Named
+ cacheTo = nil
+ cacheFrom = nil
+ if c.Flag("cache-to").Changed {
+ cacheTo, err = parse.RepoNamesToNamedReferences(iopts.CacheTo)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err)
+ }
+ }
+ if c.Flag("cache-from").Changed {
+ cacheFrom, err = parse.RepoNamesToNamedReferences(iopts.CacheFrom)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err)
+ }
+ }
+ var cacheTTL time.Duration
+ if c.Flag("cache-ttl").Changed {
+ cacheTTL, err = time.ParseDuration(iopts.CacheTTL)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", iopts.CacheTTL, err)
+ }
+ // If user explicitly specified `--cache-ttl=0s`
+ // it would effectively mean that user is asking
+ // to use no cache at all. In such use cases
+ // buildah can skip looking for cache entirely
+ // by setting `--no-cache=true` internally.
+ if int64(cacheTTL) == 0 {
+ logrus.Debug("Setting --no-cache=true since --cache-ttl was set to 0s which effectively means user wants to ignore cache")
+ if c.Flag("no-cache").Changed && !iopts.NoCache {
+ return options, nil, nil, fmt.Errorf("cannot use --cache-ttl with duration as 0 and --no-cache=false")
+ }
+ iopts.NoCache = true
+ }
+ }
+ var pullPushRetryDelay time.Duration
+ pullPushRetryDelay, err = time.ParseDuration(iopts.RetryDelay)
+ if err != nil {
+ return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.RetryDelay, err)
+ }
+ // Following log line is used in integration test.
+ logrus.Debugf("Setting MaxPullPushRetries to %d and PullPushRetryDelay to %v", iopts.Retry, pullPushRetryDelay)
+
+ if c.Flag("network").Changed && c.Flag("isolation").Changed {
+ if isolation == define.IsolationChroot {
+ if ns := namespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ if !ns.Host {
+ return options, nil, nil, fmt.Errorf("cannot set --network other than host with --isolation %s", c.Flag("isolation").Value.String())
+ }
+ }
+ }
+ }
+
+ options = define.BuildOptions{
+ AddCapabilities: iopts.CapAdd,
+ AdditionalBuildContexts: additionalBuildContext,
+ AdditionalTags: tags,
+ AllPlatforms: iopts.AllPlatforms,
+ Annotations: iopts.Annotation,
+ Architecture: systemContext.ArchitectureChoice,
+ Args: args,
+ BlobDirectory: iopts.BlobCache,
+ BuildOutput: iopts.BuildOutput,
+ CacheFrom: cacheFrom,
+ CacheTo: cacheTo,
+ CacheTTL: cacheTTL,
+ CNIConfigDir: iopts.CNIConfigDir,
+ CNIPluginPath: iopts.CNIPlugInPath,
+ ConfidentialWorkload: confidentialWorkloadOptions,
+ CPPFlags: iopts.CPPFlags,
+ CommonBuildOpts: commonOpts,
+ Compression: compression,
+ ConfigureNetwork: networkPolicy,
+ ContextDirectory: contextDir,
+ Devices: iopts.Devices,
+ DropCapabilities: iopts.CapDrop,
+ Err: stderr,
+ Excludes: excludes,
+ ForceRmIntermediateCtrs: iopts.ForceRm,
+ From: iopts.From,
+ GroupAdd: iopts.GroupAdd,
+ IDMappingOptions: idmappingOptions,
+ IIDFile: iopts.Iidfile,
+ IgnoreFile: iopts.IgnoreFile,
+ In: stdin,
+ Isolation: isolation,
+ Jobs: &iopts.Jobs,
+ Labels: iopts.Label,
+ LayerLabels: iopts.LayerLabel,
+ Layers: layers,
+ LogFile: iopts.Logfile,
+ LogRusage: iopts.LogRusage,
+ LogSplitByPlatform: iopts.LogSplitByPlatform,
+ Manifest: iopts.Manifest,
+ MaxPullPushRetries: iopts.Retry,
+ NamespaceOptions: namespaceOptions,
+ NoCache: iopts.NoCache,
+ OS: systemContext.OSChoice,
+ OSFeatures: iopts.OSFeatures,
+ OSVersion: iopts.OSVersion,
+ OciDecryptConfig: decryptConfig,
+ Out: stdout,
+ Output: output,
+ OutputFormat: format,
+ Platforms: platforms,
+ PullPolicy: pullPolicy,
+ PullPushRetryDelay: pullPushRetryDelay,
+ Quiet: iopts.Quiet,
+ RemoveIntermediateCtrs: iopts.Rm,
+ ReportWriter: reporter,
+ Runtime: iopts.Runtime,
+ RuntimeArgs: runtimeFlags,
+ RusageLogFile: iopts.RusageLogFile,
+ SignBy: iopts.SignBy,
+ SignaturePolicyPath: iopts.SignaturePolicy,
+ SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages),
+ Squash: iopts.Squash,
+ SystemContext: systemContext,
+ Target: iopts.Target,
+ Timestamp: timestamp,
+ TransientMounts: iopts.Volumes,
+ UnsetEnvs: iopts.UnsetEnvs,
+ UnsetLabels: iopts.UnsetLabels,
+ }
+ if iopts.Quiet {
+ options.ReportWriter = io.Discard
+ }
+
+ options.Envs = LookupEnvVarReferences(iopts.Envs, os.Environ())
+
+ return options, containerfiles, removeAll, nil
+}
+
+func readBuildArgFile(buildargfile string, args map[string]string) error {
+ argfile, err := os.ReadFile(buildargfile)
+ if err != nil {
+ return err
+ }
+ for _, arg := range strings.Split(string(argfile), "\n") {
+ if len(arg) == 0 || arg[0] == '#' {
+ continue
+ }
+ readBuildArg(arg, args)
+ }
+ return err
+}
+
+func readBuildArg(buildarg string, args map[string]string) {
+ av := strings.SplitN(buildarg, "=", 2)
+ if len(av) > 1 {
+ args[av[0]] = av[1]
+ } else {
+ // check if the env is set in the local environment and use that value if it is
+ if val, present := os.LookupEnv(av[0]); present {
+ args[av[0]] = val
+ } else {
+ delete(args, av[0])
+ }
+ }
+}
+
+func getContainerfiles(files []string) []string {
+ var containerfiles []string
+ for _, f := range files {
+ if f == "-" {
+ containerfiles = append(containerfiles, "/dev/stdin")
+ } else {
+ containerfiles = append(containerfiles, f)
+ }
+ }
+ return containerfiles
+}
diff --git a/pkg/cli/common.go b/pkg/cli/common.go
new file mode 100644
index 0000000..76e03ba
--- /dev/null
+++ b/pkg/cli/common.go
@@ -0,0 +1,584 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/pkg/completion"
+ "github.com/containers/buildah/pkg/parse"
+ commonComp "github.com/containers/common/pkg/completion"
+ "github.com/containers/common/pkg/config"
+ encconfig "github.com/containers/ocicrypt/config"
+ enchelpers "github.com/containers/ocicrypt/helpers"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/pflag"
+)
+
+// LayerResults represents the results of the layer flags
+type LayerResults struct {
+ ForceRm bool
+ Layers bool
+}
+
+// UserNSResults represents the results for the UserNS flags
+type UserNSResults struct {
+ UserNS string
+ GroupAdd []string
+ UserNSUIDMap []string
+ UserNSGIDMap []string
+ UserNSUIDMapUser string
+ UserNSGIDMapGroup string
+}
+
+// NameSpaceResults represents the results for Namespace flags
+type NameSpaceResults struct {
+ Cgroup string
+ IPC string
+ Network string
+ CNIConfigDir string
+ CNIPlugInPath string
+ PID string
+ UTS string
+}
+
+// BudResults represents the results for Build flags
+type BudResults struct {
+ AllPlatforms bool
+ Annotation []string
+ Authfile string
+ BuildArg []string
+ BuildArgFile []string
+ BuildContext []string
+ CacheFrom []string
+ CacheTo []string
+ CacheTTL string
+ CertDir string
+ Compress bool
+ Creds string
+ CPPFlags []string
+ DisableCompression bool
+ DisableContentTrust bool
+ IgnoreFile string
+ File []string
+ Format string
+ From string
+ Iidfile string
+ Label []string
+ LayerLabel []string
+ Logfile string
+ LogSplitByPlatform bool
+ Manifest string
+ NoHostname bool
+ NoHosts bool
+ NoCache bool
+ Timestamp int64
+ OmitHistory bool
+ OCIHooksDir []string
+ Pull string
+ PullAlways bool
+ PullNever bool
+ Quiet bool
+ IdentityLabel bool
+ Rm bool
+ Runtime string
+ RuntimeFlags []string
+ Secrets []string
+ SSH []string
+ SignaturePolicy string
+ SignBy string
+ Squash bool
+ SkipUnusedStages bool
+ Stdin bool
+ Tag []string
+ BuildOutput string
+ Target string
+ TLSVerify bool
+ Jobs int
+ LogRusage bool
+ RusageLogFile string
+ UnsetEnvs []string
+ UnsetLabels []string
+ Envs []string
+ OSFeatures []string
+ OSVersion string
+ CWOptions string
+}
+
+// FromAndBugResults represents the results for common flags
+// in build and from
+type FromAndBudResults struct {
+ AddHost []string
+ BlobCache string
+ CapAdd []string
+ CapDrop []string
+ CgroupParent string
+ CPUPeriod uint64
+ CPUQuota int64
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares uint64
+ DecryptionKeys []string
+ Devices []string
+ DNSSearch []string
+ DNSServers []string
+ DNSOptions []string
+ HTTPProxy bool
+ Isolation string
+ Memory string
+ MemorySwap string
+ Retry int
+ RetryDelay string
+ SecurityOpt []string
+ ShmSize string
+ Ulimit []string
+ Volumes []string
+}
+
+// GetUserNSFlags returns the common flags for usernamespace
+func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
+ usernsFlags := pflag.FlagSet{}
+ usernsFlags.StringSliceVar(&flags.GroupAdd, "group-add", nil, "add additional groups to the primary container process. 'keep-groups' allows container processes to use supplementary groups.")
+ usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
+ usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace")
+ usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
+ usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
+ return usernsFlags
+}
+
+// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags
+func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["group-add"] = commonComp.AutocompleteNone
+ flagCompletion["userns"] = completion.AutocompleteNamespaceFlag
+ flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName
+ flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName
+ return flagCompletion
+}
+
+// GetNameSpaceFlags returns the common flags for a namespace menu
+func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringVar(&flags.Cgroup, "cgroupns", "", "'private', or 'host'")
+ fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
+ fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
+ fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", "", "`directory` of CNI configuration files")
+ _ = fs.MarkHidden("cni-config-dir")
+ fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", "", "`path` of CNI network plugins")
+ _ = fs.MarkHidden("cni-plugin-path")
+ fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
+ fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
+ return fs
+}
+
+// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
+func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["cgroupns"] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
+ return flagCompletion
+}
+
+// GetLayerFlags returns the common flags for layers
+func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.ForceRm, "force-rm", false, "always remove intermediate containers after a build, even if the build is unsuccessful.")
+ fs.BoolVar(&flags.Layers, "layers", UseLayers(), "use intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")
+ return fs
+}
+
+// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags
+
+// GetBudFlags returns common build flags
+func GetBudFlags(flags *BudResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms")
+ fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
+ fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
+ fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
+ fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
+ fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringArrayVar(&flags.BuildArgFile, "build-arg-file", []string{}, "`argfile.conf` containing lines of argument=value to supply to the builder")
+ fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
+ fs.StringArrayVar(&flags.CacheFrom, "cache-from", []string{}, "remote repository list to utilise as potential cache source.")
+ fs.StringArrayVar(&flags.CacheTo, "cache-to", []string{}, "remote repository list to utilise as potential cache destination.")
+ fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.")
+ fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
+ fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
+ fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ fs.StringVarP(&flags.CWOptions, "cw", "", "", "confidential workload `options`")
+ fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
+ fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP")
+ fs.StringArrayVar(&flags.Envs, "env", []string{}, "set environment variable for the image")
+ fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile")
+ fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file")
+ fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
+ fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
+ fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
+ fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
+ fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])")
+ fs.StringArrayVar(&flags.LayerLabel, "layer-label", []string{}, "set metadata for an intermediate image (default [])")
+ fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
+ fs.BoolVar(&flags.LogSplitByPlatform, "logsplit", false, "split logfile to different files for each platform")
+ fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
+ if err := fs.MarkHidden("loglevel"); err != nil {
+ panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
+ if err := fs.MarkHidden("log-rusage"); err != nil {
+ panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
+ }
+ fs.StringVar(&flags.RusageLogFile, "rusage-logfile", "", "destination file to which rusage should be logged to instead of stdout (= the default).")
+ if err := fs.MarkHidden("rusage-logfile"); err != nil {
+ panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err))
+ }
+ fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist")
+ fs.BoolVar(&flags.NoCache, "no-cache", false, "do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
+ fs.BoolVar(&flags.NoHostname, "no-hostname", false, "do not create new /etc/hostname file for RUN instructions, use the one from the base image.")
+ fs.BoolVar(&flags.NoHosts, "no-hosts", false, "do not create new /etc/hosts file for RUN instructions, use the one from the base image.")
+ fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
+ fs.StringArrayVar(&flags.OSFeatures, "os-feature", []string{}, "set required OS `feature` for the target image in addition to values from the base image")
+ fs.StringVar(&flags.OSVersion, "os-version", "", "set required OS `version` for the target image instead of the value from the base image")
+ fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
+ fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
+ fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
+ if err := fs.MarkHidden("pull-always"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
+ if err := fs.MarkHidden("pull-never"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
+ }
+ fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
+ fs.BoolVar(&flags.OmitHistory, "omit-history", false, "omit build history information from built image")
+ fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label")
+ fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build")
+ // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
+ fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
+ fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build")
+ fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
+ fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := fs.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.SkipUnusedStages, "skip-unused-stages", true, "skips stages in multi-stage builds which do not affect the final target")
+ fs.BoolVar(&flags.Squash, "squash", false, "squash all image layers into a single layer")
+ fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])")
+ fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers")
+ fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
+ fs.StringVarP(&flags.BuildOutput, "output", "o", "", "output destination (format: type=local,dest=path)")
+ fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
+ fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
+ fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ fs.String("variant", "", "override the `variant` of the specified image")
+ fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image")
+ fs.StringSliceVar(&flags.UnsetLabels, "unsetlabel", nil, "unset label when inheriting labels from base image")
+ return fs
+}
+
+// GetBudFlagsCompletions returns the FlagCompletions for the common build flags
+func GetBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["annotation"] = commonComp.AutocompleteNone
+ flagCompletion["arch"] = commonComp.AutocompleteNone
+ flagCompletion["authfile"] = commonComp.AutocompleteDefault
+ flagCompletion["build-arg"] = commonComp.AutocompleteNone
+ flagCompletion["build-arg-file"] = commonComp.AutocompleteDefault
+ flagCompletion["build-context"] = commonComp.AutocompleteNone
+ flagCompletion["cache-from"] = commonComp.AutocompleteNone
+ flagCompletion["cache-to"] = commonComp.AutocompleteNone
+ flagCompletion["cache-ttl"] = commonComp.AutocompleteNone
+ flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
+ flagCompletion["cpp-flag"] = commonComp.AutocompleteNone
+ flagCompletion["creds"] = commonComp.AutocompleteNone
+ flagCompletion["cw"] = commonComp.AutocompleteNone
+ flagCompletion["env"] = commonComp.AutocompleteNone
+ flagCompletion["file"] = commonComp.AutocompleteDefault
+ flagCompletion["format"] = commonComp.AutocompleteNone
+ flagCompletion["from"] = commonComp.AutocompleteDefault
+ flagCompletion["hooks-dir"] = commonComp.AutocompleteNone
+ flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
+ flagCompletion["iidfile"] = commonComp.AutocompleteDefault
+ flagCompletion["jobs"] = commonComp.AutocompleteNone
+ flagCompletion["label"] = commonComp.AutocompleteNone
+ flagCompletion["layer-label"] = commonComp.AutocompleteNone
+ flagCompletion["logfile"] = commonComp.AutocompleteDefault
+ flagCompletion["manifest"] = commonComp.AutocompleteDefault
+ flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["os-feature"] = commonComp.AutocompleteNone
+ flagCompletion["os-version"] = commonComp.AutocompleteNone
+ flagCompletion["output"] = commonComp.AutocompleteNone
+ flagCompletion["pull"] = commonComp.AutocompleteDefault
+ flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
+ flagCompletion["secret"] = commonComp.AutocompleteNone
+ flagCompletion["sign-by"] = commonComp.AutocompleteNone
+ flagCompletion["signature-policy"] = commonComp.AutocompleteNone
+ flagCompletion["ssh"] = commonComp.AutocompleteNone
+ flagCompletion["tag"] = commonComp.AutocompleteNone
+ flagCompletion["target"] = commonComp.AutocompleteNone
+ flagCompletion["timestamp"] = commonComp.AutocompleteNone
+ flagCompletion["unsetenv"] = commonComp.AutocompleteNone
+ flagCompletion["unsetlabel"] = commonComp.AutocompleteNone
+ flagCompletion["variant"] = commonComp.AutocompleteNone
+ return flagCompletion
+}
+
+// GetFromAndBudFlags returns from and build flags
+func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) {
+ fs := pflag.FlagSet{}
+ defaultContainerConfig, err := config.Default()
+ if err != nil {
+ return fs, fmt.Errorf("failed to get container config: %w", err)
+ }
+
+ fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
+ fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ if err := fs.MarkHidden("blob-cache"); err != nil {
+ panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
+ }
+ fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
+ fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
+ fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
+ fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
+ fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
+ fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
+ fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
+ fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
+ fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices.Get(), "additional devices to be used within containers (default [])")
+ fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches.Get(), "set custom DNS search domains")
+ fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers.Get(), "set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
+ fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions.Get(), "set custom DNS options")
+ fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables")
+ fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
+ fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
+ fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
+ fs.IntVar(&flags.Retry, "retry", MaxPullPushRetries, "number of times to retry in case of failure when performing push/pull")
+ fs.StringVar(&flags.RetryDelay, "retry-delay", PullPushRetryDelay.String(), "delay between retries in case of push/pull failures")
+ fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
+ fs.String("os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images")
+ fs.StringSlice("platform", []string{parse.DefaultPlatform()}, "set the `OS/ARCH[/VARIANT]` of the image to the provided value instead of the current operating system and architecture of the host (for example \"linux/arm\")")
+ fs.String("variant", "", "override the `variant` of the specified image")
+ fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
+ fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.")
+ fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits.Get(), "ulimit options")
+ fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Volumes(), "bind mount a volume into the container")
+
+ // Add in the usernamespace and namespaceflags
+ usernsFlags := GetUserNSFlags(usernsResults)
+ namespaceFlags := GetNameSpaceFlags(namespaceResults)
+ fs.AddFlagSet(&usernsFlags)
+ fs.AddFlagSet(&namespaceFlags)
+
+ return fs, nil
+}
+
+// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and build flags
+func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["arch"] = commonComp.AutocompleteNone
+ flagCompletion["add-host"] = commonComp.AutocompleteNone
+ flagCompletion["blob-cache"] = commonComp.AutocompleteNone
+ flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?!
+ flagCompletion["cpu-period"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-quota"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-shares"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone
+ flagCompletion["decryption-key"] = commonComp.AutocompleteNone
+ flagCompletion["device"] = commonComp.AutocompleteDefault
+ flagCompletion["dns-search"] = commonComp.AutocompleteNone
+ flagCompletion["dns"] = commonComp.AutocompleteNone
+ flagCompletion["dns-option"] = commonComp.AutocompleteNone
+ flagCompletion["isolation"] = commonComp.AutocompleteNone
+ flagCompletion["memory"] = commonComp.AutocompleteNone
+ flagCompletion["memory-swap"] = commonComp.AutocompleteNone
+ flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["platform"] = commonComp.AutocompleteNone
+ flagCompletion["retry"] = commonComp.AutocompleteNone
+ flagCompletion["retry-delay"] = commonComp.AutocompleteNone
+ flagCompletion["security-opt"] = commonComp.AutocompleteNone
+ flagCompletion["shm-size"] = commonComp.AutocompleteNone
+ flagCompletion["ulimit"] = commonComp.AutocompleteNone
+ flagCompletion["volume"] = commonComp.AutocompleteDefault
+ flagCompletion["variant"] = commonComp.AutocompleteNone
+
+ // Add in the usernamespace and namespace flag completions
+ userNsComp := GetUserNSFlagsCompletions()
+ for name, comp := range userNsComp {
+ flagCompletion[name] = comp
+ }
+ namespaceComp := GetNameSpaceFlagsCompletions()
+ for name, comp := range namespaceComp {
+ flagCompletion[name] = comp
+ }
+
+ return flagCompletion
+}
+
+// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
+// otherwise it returns false
+func UseLayers() bool {
+ layers := os.Getenv("BUILDAH_LAYERS")
+ if strings.ToLower(layers) == "true" || layers == "1" {
+ return true
+ }
+ return false
+}
+
+// DefaultFormat returns the default image format
+func DefaultFormat() string {
+ format := os.Getenv("BUILDAH_FORMAT")
+ if format != "" {
+ return format
+ }
+ return define.OCI
+}
+
+// DefaultIsolation returns the default image format
+func DefaultIsolation() string {
+ isolation := os.Getenv("BUILDAH_ISOLATION")
+ if isolation != "" {
+ return isolation
+ }
+ if unshare.IsRootless() {
+ return "rootless"
+ }
+ return define.OCI
+}
+
+// DefaultHistory returns the default add-history setting
+func DefaultHistory() bool {
+ history := os.Getenv("BUILDAH_HISTORY")
+ if strings.ToLower(history) == "true" || history == "1" {
+ return true
+ }
+ return false
+}
+
+func VerifyFlagsArgsOrder(args []string) error {
+ for _, arg := range args {
+ if strings.HasPrefix(arg, "-") {
+ return fmt.Errorf("no options (%s) can be specified after the image or container name", arg)
+ }
+ }
+ return nil
+}
+
+// AliasFlags is a function to handle backwards compatibility with old flags
+func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "net":
+ name = "network"
+ case "override-arch":
+ name = "arch"
+ case "override-os":
+ name = "os"
+ case "purge":
+ name = "rm"
+ case "tty":
+ name = "terminal"
+ }
+ return pflag.NormalizedName(name)
+}
+
+// LookupEnvVarReferences returns a copy of specs with keys and values resolved
+// from environ. Strings are in "key=value" form, the same as [os.Environ].
+//
+// - When a string in specs lacks "=", it is treated as a key and the value
+// is retrieved from environ. When the key is missing from environ, neither
+// the key nor value are returned.
+//
+// - When a string in specs lacks "=" and ends with "*", it is treated as
+// a key prefix and any keys with the same prefix in environ are returned.
+//
+// - When a string in specs is exactly "*", all keys and values in environ
+// are returned.
+func LookupEnvVarReferences(specs, environ []string) []string {
+ result := make([]string, 0, len(specs))
+
+ for _, spec := range specs {
+ if key, _, ok := strings.Cut(spec, "="); ok {
+ result = append(result, spec)
+
+ } else if key == "*" {
+ result = append(result, environ...)
+
+ } else {
+ prefix := key + "="
+ if strings.HasSuffix(key, "*") {
+ prefix = strings.TrimSuffix(key, "*")
+ }
+
+ for _, spec := range environ {
+ if strings.HasPrefix(spec, prefix) {
+ result = append(result, spec)
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// DecryptConfig translates decryptionKeys into a DescriptionConfig structure
+func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
+ var decryptConfig *encconfig.DecryptConfig
+ if len(decryptionKeys) > 0 {
+ // decryption
+ dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
+ if err != nil {
+ return nil, fmt.Errorf("invalid decryption keys: %w", err)
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
+ decryptConfig = cc.DecryptConfig
+ }
+
+ return decryptConfig, nil
+}
+
+// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure
+func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) {
+ var encLayers *[]int
+ var encConfig *encconfig.EncryptConfig
+
+ if len(encryptionKeys) > 0 {
+ // encryption
+ encLayers = &encryptLayers
+ ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
+ }
+ cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
+ encConfig = cc.EncryptConfig
+ }
+ return encConfig, encLayers, nil
+}
+
+// GetFormat translates format string into either docker or OCI format constant
+func GetFormat(format string) (string, error) {
+ switch format {
+ case define.OCI:
+ return define.OCIv1ImageManifest, nil
+ case define.DOCKER:
+ return define.Dockerv2ImageManifest, nil
+ default:
+ return "", fmt.Errorf("unrecognized image type %q", format)
+ }
+}
diff --git a/pkg/cli/common_test.go b/pkg/cli/common_test.go
new file mode 100644
index 0000000..7449a6c
--- /dev/null
+++ b/pkg/cli/common_test.go
@@ -0,0 +1,142 @@
+package cli
+
+import (
+ "testing"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/pkg/completion"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+func testFlagCompletion(t *testing.T, flags pflag.FlagSet, flagCompletions completion.FlagCompletions) {
+ // lookup if for each flag a flag completion function exists
+ flags.VisitAll(func(f *pflag.Flag) {
+ // skip hidden and deprecated flags
+ if f.Hidden || len(f.Deprecated) > 0 {
+ return
+ }
+ if _, ok := flagCompletions[f.Name]; !ok && f.Value.Type() != "bool" {
+ t.Errorf("Flag %q has no shell completion function set.", f.Name)
+ } else if ok && f.Value.Type() == "bool" {
+ // make sure bool flags don't have a completion function
+ t.Errorf(`Flag %q is a bool flag but has a shell completion function set.
+ You have to remove this shell completion function.`, f.Name)
+ return
+
+ }
+ })
+
+ // make sure no unnecessary flag completion functions are defined
+ for name := range flagCompletions {
+ if flag := flags.Lookup(name); flag == nil {
+ t.Errorf("Flag %q does not exist but has a shell completion function set.", name)
+ }
+ }
+}
+
+func TestUserNsFlagsCompletion(t *testing.T) {
+ flags := GetUserNSFlags(&UserNSResults{})
+ flagCompletions := GetUserNSFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestNameSpaceFlagsCompletion(t *testing.T) {
+ flags := GetNameSpaceFlags(&NameSpaceResults{})
+ flagCompletions := GetNameSpaceFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestBudFlagsCompletion(t *testing.T) {
+ flags := GetBudFlags(&BudResults{})
+ flagCompletions := GetBudFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestFromAndBudFlagsCompletions(t *testing.T) {
+ flags, err := GetFromAndBudFlags(&FromAndBudResults{}, &UserNSResults{}, &NameSpaceResults{})
+ if err != nil {
+ t.Error("Could load the from and build flags.")
+ }
+ flagCompletions := GetFromAndBudFlagsCompletions()
+ testFlagCompletion(t, flags, flagCompletions)
+}
+
+func TestLookupEnvVarReferences(t *testing.T) {
+ t.Run("EmptyInput", func(t *testing.T) {
+ assert.Empty(t, LookupEnvVarReferences(nil, nil))
+ assert.Empty(t, LookupEnvVarReferences([]string{}, nil))
+ })
+
+ t.Run("EmptyEnvironment", func(t *testing.T) {
+ assert.Equal(t, []string{"a=b"}, LookupEnvVarReferences([]string{"a=b"}, nil))
+ assert.Equal(t, []string{"a="}, LookupEnvVarReferences([]string{"a="}, nil))
+ assert.Equal(t, []string{}, LookupEnvVarReferences([]string{"a"}, nil))
+ assert.Equal(t, []string{}, LookupEnvVarReferences([]string{"*"}, nil))
+ })
+
+ t.Run("MissingEnvironment", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c="},
+ LookupEnvVarReferences([]string{"a=b", "c="}, []string{"x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b"},
+ LookupEnvVarReferences([]string{"a=b", "c"}, []string{"x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"x=y"}))
+ })
+
+ t.Run("MatchingEnvironment", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c="},
+ LookupEnvVarReferences([]string{"a=b", "c="}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d"},
+ LookupEnvVarReferences([]string{"a=b", "c"}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"c=d", "x=y"}))
+
+ assert.Equal(t,
+ []string{"a=b", "c=d", "cg=i"},
+ LookupEnvVarReferences([]string{"a=b", "c*"}, []string{"c=d", "x=y", "cg=i"}))
+ })
+
+ t.Run("MultipleMatches", func(t *testing.T) {
+ assert.Equal(t,
+ []string{"a=b", "c=d", "cg=i", "c=d", "x=y", "cg=i", "cg=i"},
+ LookupEnvVarReferences([]string{"a=b", "c*", "*", "cg*"}, []string{"c=d", "x=y", "cg=i"}))
+ })
+}
+
+func TestDecryptConfig(t *testing.T) {
+ // Just a smoke test for the default path.
+ res, err := DecryptConfig(nil)
+ assert.NoError(t, err)
+ assert.Nil(t, res)
+}
+
+func TestEncryptConfig(t *testing.T) {
+ // Just a smoke test for the default path.
+ cfg, layers, err := EncryptConfig(nil, nil)
+ assert.NoError(t, err)
+ assert.Nil(t, cfg)
+ assert.Nil(t, layers)
+}
+
+func TestGetFormat(t *testing.T) {
+ _, err := GetFormat("bogus")
+ assert.NotNil(t, err)
+
+ format, err := GetFormat("oci")
+ assert.Nil(t, err)
+ assert.Equalf(t, define.OCIv1ImageManifest, format, "expected oci format but got %v.", format)
+ format, err = GetFormat("docker")
+ assert.Nil(t, err)
+ assert.Equalf(t, define.Dockerv2ImageManifest, format, "expected docker format but got %v.", format)
+}
diff --git a/pkg/cli/exec_codes.go b/pkg/cli/exec_codes.go
new file mode 100644
index 0000000..7ba42e9
--- /dev/null
+++ b/pkg/cli/exec_codes.go
@@ -0,0 +1,13 @@
+package cli
+
+const (
+ // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed
+ // prior to calling the runtime
+ ExecErrorCodeGeneric = 125
+ // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command
+ // an example of this can be found by trying to execute a directory:
+ // `podman exec -l /etc`
+ ExecErrorCodeCannotInvoke = 126
+ // ExecErrorCodeNotFound is the error code to return when a command cannot be found
+ ExecErrorCodeNotFound = 127
+)
diff --git a/pkg/completion/completion.go b/pkg/completion/completion.go
new file mode 100644
index 0000000..a7812d2
--- /dev/null
+++ b/pkg/completion/completion.go
@@ -0,0 +1,23 @@
+package completion
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+/* Autocomplete Functions for cobra ValidArgsFunction */
+
+// AutocompleteNamespaceFlag - Autocomplete the userns flag.
+// -> host, private, container, ns:[path], [path]
+func AutocompleteNamespaceFlag(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var completions []string
+ // If we don't filter on "toComplete", zsh and fish will not do file completion
+ // even if the prefix typed by the user does not match the returned completions
+ for _, comp := range []string{"host", "private", "container", "ns:"} {
+ if strings.HasPrefix(comp, toComplete) {
+ completions = append(completions, comp)
+ }
+ }
+ return completions, cobra.ShellCompDirectiveDefault
+}
diff --git a/pkg/dummy/dummy_test.go b/pkg/dummy/dummy_test.go
new file mode 100644
index 0000000..4326a92
--- /dev/null
+++ b/pkg/dummy/dummy_test.go
@@ -0,0 +1,8 @@
+package dummy
+
+import (
+ "testing"
+)
+
+func TestDummy(t *testing.T) {
+}
diff --git a/pkg/formats/formats.go b/pkg/formats/formats.go
new file mode 100644
index 0000000..676da30
--- /dev/null
+++ b/pkg/formats/formats.go
@@ -0,0 +1,166 @@
+package formats
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "text/tabwriter"
+ "text/template"
+
+ "golang.org/x/term"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ // JSONString const to save on duplicate variable names
+ JSONString = "json"
+ // IDString const to save on duplicates for Go templates
+ IDString = "{{.ID}}"
+
+ parsingErrorStr = "Template parsing error"
+)
+
+// Writer interface for outputs
+type Writer interface {
+ Out() error
+}
+
+// JSONStructArray for JSON output
+type JSONStructArray struct {
+ Output []interface{}
+}
+
+// StdoutTemplateArray for Go template output
+type StdoutTemplateArray struct {
+ Output []interface{}
+ Template string
+ Fields map[string]string
+}
+
+// JSONStruct for JSON output
+type JSONStruct struct {
+ Output interface{}
+}
+
+// StdoutTemplate for Go template output
+type StdoutTemplate struct {
+ Output interface{}
+ Template string
+ Fields map[string]string
+}
+
+// YAMLStruct for YAML output
+type YAMLStruct struct {
+ Output interface{}
+}
+
+func setJSONFormatEncoder(isTerminal bool, w io.Writer) *json.Encoder {
+ enc := json.NewEncoder(w)
+ enc.SetIndent("", " ")
+ if isTerminal {
+ enc.SetEscapeHTML(false)
+ }
+ return enc
+}
+
+// Out method for JSON Arrays
+func (j JSONStructArray) Out() error {
+ buf := bytes.NewBuffer(nil)
+ enc := setJSONFormatEncoder(term.IsTerminal(int(os.Stdout.Fd())), buf)
+ if err := enc.Encode(j.Output); err != nil {
+ return err
+ }
+ data := buf.Bytes()
+
+ // JSON returns a byte array with a literal null [110 117 108 108] in it
+ // if it is passed empty data. We used bytes.Compare to see if that is
+ // the case.
+ if diff := bytes.Compare(data, []byte("null")); diff == 0 {
+ data = []byte("[]")
+ }
+
+ // If the we did get NULL back, we should spit out {} which is
+ // at least valid JSON for the consumer.
+ fmt.Printf("%s", data)
+ humanNewLine()
+ return nil
+}
+
+// Out method for Go templates
+func (t StdoutTemplateArray) Out() error {
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
+ if strings.HasPrefix(t.Template, "table") {
+ // replace any spaces with tabs in template so that tabwriter can align it
+ t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1)
+ headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ err = headerTmpl.Execute(w, t.Fields)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(w, "")
+ }
+ t.Template = strings.Replace(t.Template, " ", "\t", -1)
+ tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ for _, raw := range t.Output {
+ basicTmpl := tmpl.Funcs(basicFunctions)
+ if err := basicTmpl.Execute(w, raw); err != nil {
+ return fmt.Errorf("%v: %w", parsingErrorStr, err)
+ }
+ fmt.Fprintln(w, "")
+ }
+ return w.Flush()
+}
+
+// Out method for JSON struct
+func (j JSONStruct) Out() error {
+ data, err := json.MarshalIndent(j.Output, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s", data)
+ humanNewLine()
+ return nil
+}
+
+//Out method for Go templates
+func (t StdoutTemplate) Out() error {
+ tmpl, err := template.New("image").Parse(t.Template)
+ if err != nil {
+ return fmt.Errorf("template parsing error: %w", err)
+ }
+ err = tmpl.Execute(os.Stdout, t.Output)
+ if err != nil {
+ return err
+ }
+ humanNewLine()
+ return nil
+}
+
+// Out method for YAML
+func (y YAMLStruct) Out() error {
+ var buf []byte
+ var err error
+ buf, err = yaml.Marshal(y.Output)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s", string(buf))
+ humanNewLine()
+ return nil
+}
+
+// humanNewLine prints a new line at the end of the output only if stdout is the terminal
+func humanNewLine() {
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ fmt.Println()
+ }
+}
diff --git a/pkg/formats/formats_test.go b/pkg/formats/formats_test.go
new file mode 100644
index 0000000..628da01
--- /dev/null
+++ b/pkg/formats/formats_test.go
@@ -0,0 +1,44 @@
+package formats
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+type ImageData struct {
+ Author string `json:"Author"`
+}
+
+func TestSetJSONFormatEncoder(t *testing.T) {
+ tt := []struct {
+ name string
+ imageData *ImageData
+ expected string
+ isTerminal bool
+ }{
+ {
+ name: "HTML tags are not escaped",
+ imageData: &ImageData{Author: "dave <dave@corp.io>"},
+ expected: `"Author": "dave <dave@corp.io>"`,
+ isTerminal: true,
+ },
+ {
+ name: "HTML tags are escaped",
+ imageData: &ImageData{Author: "dave <dave@corp.io>"},
+ expected: `"Author": "dave \u003cdave@corp.io\u003e"`,
+ isTerminal: false,
+ },
+ }
+
+ for _, tc := range tt {
+ buf := bytes.NewBuffer(nil)
+ enc := setJSONFormatEncoder(tc.isTerminal, buf)
+ if err := enc.Encode(tc.imageData); err != nil {
+ t.Errorf("test %#v failed encoding: %s", tc.name, err)
+ }
+ if !strings.Contains(buf.String(), tc.expected) {
+ t.Errorf("test %#v expected output to contain %#v. Output:\n%v\n", tc.name, tc.expected, buf.String())
+ }
+ }
+}
diff --git a/pkg/formats/templates.go b/pkg/formats/templates.go
new file mode 100644
index 0000000..66f3ba3
--- /dev/null
+++ b/pkg/formats/templates.go
@@ -0,0 +1,82 @@
+package formats
+
+import (
+ "bytes"
+ "encoding/json"
+ "strings"
+ "text/template"
+)
+
+// basicFunctions are the set of initial
+// functions provided to every template.
+var basicFunctions = template.FuncMap{
+ "json": func(v interface{}) string {
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ _ = enc.Encode(v)
+ // Remove the trailing new line added by the encoder
+ return strings.TrimSpace(buf.String())
+ },
+ "split": strings.Split,
+ "join": strings.Join,
+ // strings.Title is deprecated since go 1.18
+ // However for our use case it is still fine. The recommended replacement
+ // is adding about 400kb binary size so lets keep using this for now.
+ //nolint:staticcheck
+ "title": strings.Title,
+ "lower": strings.ToLower,
+ "upper": strings.ToUpper,
+ "pad": padWithSpace,
+ "truncate": truncateWithLength,
+}
+
+// HeaderFunctions are used to created headers of a table.
+// This is a replacement of basicFunctions for header generation
+// because we want the header to remain intact.
+// Some functions like `split` are irrelevant so not added.
+var headerFunctions = template.FuncMap{
+ "json": func(v string) string {
+ return v
+ },
+ "title": func(v string) string {
+ return v
+ },
+ "lower": func(v string) string {
+ return v
+ },
+ "upper": func(v string) string {
+ return v
+ },
+ "truncate": func(v string, l int) string {
+ return v
+ },
+}
+
+// Parse creates a new anonymous template with the basic functions
+// and parses the given format.
+func Parse(format string) (*template.Template, error) {
+ return NewParse("", format)
+}
+
+// NewParse creates a new tagged template with the basic functions
+// and parses the given format.
+func NewParse(tag, format string) (*template.Template, error) {
+ return template.New(tag).Funcs(basicFunctions).Parse(format)
+}
+
+// padWithSpace adds whitespace to the input if the input is non-empty
+func padWithSpace(source string, prefix, suffix int) string {
+ if source == "" {
+ return source
+ }
+ return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix)
+}
+
+// truncateWithLength truncates the source string up to the length provided by the input
+func truncateWithLength(source string, length int) string {
+ if len(source) < length {
+ return source
+ }
+ return source[:length]
+}
diff --git a/pkg/jail/jail.go b/pkg/jail/jail.go
new file mode 100644
index 0000000..fdaca5a
--- /dev/null
+++ b/pkg/jail/jail.go
@@ -0,0 +1,180 @@
+//go:build freebsd
+// +build freebsd
+
+package jail
+
+import (
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type NS int32
+
+const (
+ DISABLED NS = 0
+ NEW NS = 1
+ INHERIT NS = 2
+
+ JAIL_CREATE = 0x01
+ JAIL_UPDATE = 0x02
+ JAIL_ATTACH = 0x04
+)
+
+type config struct {
+ params map[string]interface{}
+}
+
+func NewConfig() *config {
+ return &config{
+ params: make(map[string]interface{}),
+ }
+}
+
+func handleBoolSetting(key string, val bool) (string, interface{}) {
+ // jail doesn't deal with booleans - it uses paired parameter
+ // names, e.g. "persist"/"nopersist". If the key contains '.',
+ // the "no" prefix is applied to the last element.
+ if val == false {
+ parts := strings.Split(key, ".")
+ parts[len(parts)-1] = "no" + parts[len(parts)-1]
+ key = strings.Join(parts, ".")
+ }
+ return key, nil
+}
+
+func (c *config) Set(key string, value interface{}) {
+ // Normalise integer types to int32
+ switch v := value.(type) {
+ case int:
+ value = int32(v)
+ case uint32:
+ value = int32(v)
+ }
+
+ switch key {
+ case "jid", "devfs_ruleset", "enforce_statfs", "children.max", "securelevel":
+ if _, ok := value.(int32); !ok {
+ logrus.Fatalf("value for parameter %s must be an int32", key)
+ }
+ case "ip4", "ip6", "host", "vnet":
+ nsval, ok := value.(NS)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be a jail.NS", key)
+ }
+ if (key == "host" || key == "vnet") && nsval == DISABLED {
+ logrus.Fatalf("value for parameter %s cannot be DISABLED", key)
+ }
+ case "persist", "sysvmsg", "sysvsem", "sysvshm":
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ default:
+ if strings.HasPrefix(key, "allow.") {
+ bval, ok := value.(bool)
+ if !ok {
+ logrus.Fatalf("value for parameter %s must be bool", key)
+ }
+ key, value = handleBoolSetting(key, bval)
+ } else {
+ if _, ok := value.(string); !ok {
+ logrus.Fatalf("value for parameter %s must be a string", key)
+ }
+ }
+ }
+ c.params[key] = value
+}
+
+func (c *config) getIovec() ([]syscall.Iovec, error) {
+ jiov := make([]syscall.Iovec, 0)
+ for key, value := range c.params {
+ iov, err := stringToIovec(key)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ switch v := value.(type) {
+ case string:
+ iov, err := stringToIovec(v)
+ if err != nil {
+ return nil, err
+ }
+ jiov = append(jiov, iov)
+ case int32:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ case NS:
+ jiov = append(jiov, syscall.Iovec{
+ Base: (*byte)(unsafe.Pointer(&v)),
+ Len: 4,
+ })
+ default:
+ jiov = append(jiov, syscall.Iovec{
+ Base: nil,
+ Len: 0,
+ })
+ }
+ }
+ return jiov, nil
+}
+
+type jail struct {
+ jid int32
+}
+
+func jailSet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_SET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func jailGet(jconf *config, flags int) (*jail, error) {
+ jiov, err := jconf.getIovec()
+ if err != nil {
+ return nil, err
+ }
+
+ jid, _, errno := syscall.Syscall(unix.SYS_JAIL_GET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
+ if errno != 0 {
+ return nil, errno
+ }
+ return &jail{
+ jid: int32(jid),
+ }, nil
+}
+
+func Create(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE)
+}
+
+func CreateAndAttach(jconf *config) (*jail, error) {
+ return jailSet(jconf, JAIL_CREATE|JAIL_ATTACH)
+}
+
+func FindByName(name string) (*jail, error) {
+ jconf := NewConfig()
+ jconf.Set("name", name)
+ return jailGet(jconf, 0)
+}
+
+func (j *jail) Set(jconf *config) error {
+ jconf.Set("jid", j.jid)
+ _, err := jailSet(jconf, JAIL_UPDATE)
+ return err
+}
diff --git a/pkg/jail/jail_int32.go b/pkg/jail/jail_int32.go
new file mode 100644
index 0000000..3e56bb6
--- /dev/null
+++ b/pkg/jail/jail_int32.go
@@ -0,0 +1,20 @@
+//go:build (386 || arm) && freebsd
+// +build 386 arm
+// +build freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint32(len(bs))
+ return res, nil
+}
diff --git a/pkg/jail/jail_int64.go b/pkg/jail/jail_int64.go
new file mode 100644
index 0000000..dace13f
--- /dev/null
+++ b/pkg/jail/jail_int64.go
@@ -0,0 +1,19 @@
+//go:build !(386 || arm) && freebsd
+// +build !386,!arm,freebsd
+
+package jail
+
+import (
+ "syscall"
+)
+
+func stringToIovec(val string) (syscall.Iovec, error) {
+ bs, err := syscall.ByteSliceFromString(val)
+ if err != nil {
+ return syscall.Iovec{}, err
+ }
+ var res syscall.Iovec
+ res.Base = &bs[0]
+ res.Len = uint64(len(bs))
+ return res, nil
+}
diff --git a/pkg/manifests/compat.go b/pkg/manifests/compat.go
new file mode 100644
index 0000000..dfb63b3
--- /dev/null
+++ b/pkg/manifests/compat.go
@@ -0,0 +1,28 @@
+// This package is deprecated. Its functionality has been moved to
+// github.com/containers/common/pkg/manifests, which provides the same API.
+// The stubs and aliases here are present for compatibility with older code.
+// New implementations should use github.com/containers/common/pkg/manifests
+// directly.
+package manifests
+
+import "github.com/containers/common/pkg/manifests"
+
+// List is an alias for github.com/containers/common/pkg/manifests.List.
+type List = manifests.List
+
+var (
+ // ErrDigestNotFound is an alias for github.com/containers/common/pkg/manifests.ErrDigestNotFound.
+ ErrDigestNotFound = manifests.ErrDigestNotFound
+ // ErrManifestTypeNotSupported is an alias for github.com/containers/common/pkg/manifests.ErrManifestTypeNotSupported.
+ ErrManifestTypeNotSupported = manifests.ErrManifestTypeNotSupported
+)
+
+// Create wraps github.com/containers/common/pkg/manifests.Create().
+func Create() List {
+ return manifests.Create()
+}
+
+// FromBlob wraps github.com/containers/common/pkg/manifests.FromBlob().
+func FromBlob(manifestBytes []byte) (List, error) {
+ return manifests.FromBlob(manifestBytes)
+}
diff --git a/pkg/overlay/overlay.go b/pkg/overlay/overlay.go
new file mode 100644
index 0000000..e416ecd
--- /dev/null
+++ b/pkg/overlay/overlay.go
@@ -0,0 +1,242 @@
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "errors"
+
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// Options type holds various configuration options for overlay
+// MountWithOptions accepts following type so it is easier to specify
+// more verbose configuration for overlay mount.
+type Options struct {
+ // The Upper directory is normally writable layer in an overlay mount.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to UpperDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behaviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ UpperDirOptionFragment string
+ // The Workdir is used to prepare files as they are switched between the layers.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to WorkDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behaviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ WorkDirOptionFragment string
+ // Graph options relayed from podman, will be responsible for choosing mount program
+ GraphOpts []string
+ // Mark if following overlay is read only
+ ReadOnly bool
+ // RootUID is not used yet but keeping it here for legacy reasons.
+ RootUID int
+ // RootGID is not used yet but keeping it here for legacy reasons.
+ RootGID int
+}
+
+// TempDir generates an overlay Temp directory in the container content
+func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
+ contentDir := filepath.Join(containerDir, "overlay")
+ if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
+ }
+
+ contentDir, err := os.MkdirTemp(contentDir, "")
+ if err != nil {
+ return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
+ }
+
+ return generateOverlayStructure(contentDir, rootUID, rootGID)
+}
+
+// GenerateStructure generates an overlay directory structure for container content
+func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) {
+ contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name)
+ if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
+ }
+
+ return generateOverlayStructure(contentDir, rootUID, rootGID)
+}
+
+// generateOverlayStructure generates upper, work and merge directory structure for overlay directory
+func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string, error) {
+ upperDir := filepath.Join(containerDir, "upper")
+ workDir := filepath.Join(containerDir, "work")
+ if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err)
+ }
+ if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err)
+ }
+ mergeDir := filepath.Join(containerDir, "merge")
+ if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
+ return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err)
+ }
+
+ return containerDir, nil
+}
+
+// Mount creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
+}
+
+// MountReadOnly creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller. Note that no
+// upper layer will be created rendering it a read-only mount
+func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
+}
+
+// findMountProgram finds if any mount program is specified in the graph options.
+func findMountProgram(graphOptions []string) string {
+ mountMap := map[string]bool{
+ ".mount_program": true,
+ "overlay.mount_program": true,
+ "overlay2.mount_program": true,
+ }
+
+ for _, i := range graphOptions {
+ s := strings.SplitN(i, "=", 2)
+ if len(s) != 2 {
+ continue
+ }
+ key := s[0]
+ val := s[1]
+ if mountMap[key] {
+ return val
+ }
+ }
+
+ return ""
+}
+
+// mountWithMountProgram mount an overlay at mergeDir using the specified mount program
+// and overlay options.
+func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error {
+ cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir)
+
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("exec %s: %w", mountProgram, err)
+ }
+ return nil
+}
+
+// Convert ":" to "\:", the path which will be overlay mounted need to be escaped
+func escapeColon(source string) string {
+ return strings.ReplaceAll(source, ":", "\\:")
+}
+
+// RemoveTemp removes temporary mountpoint and all content from its parent
+// directory
+func RemoveTemp(contentDir string) error {
+ if err := Unmount(contentDir); err != nil {
+ return err
+ }
+
+ return os.RemoveAll(contentDir)
+}
+
+// Unmount the overlay mountpoint
+func Unmount(contentDir string) error {
+ mergeDir := filepath.Join(contentDir, "merge")
+
+ if unshare.IsRootless() {
+ // Attempt to unmount the FUSE mount using either fusermount or fusermount3.
+ // If they fail, fallback to unix.Unmount
+ for _, v := range []string{"fusermount3", "fusermount"} {
+ err := exec.Command(v, "-u", mergeDir).Run()
+ if err != nil && !errors.Is(err, exec.ErrNotFound) {
+ logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err)
+ }
+ if err == nil {
+ return nil
+ }
+ }
+ // If fusermount|fusermount3 failed to unmount the FUSE file system, attempt unmount
+ }
+
+ // Ignore EINVAL as the specified merge dir is not a mount point
+ if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL {
+ return fmt.Errorf("unmount overlay %s: %w", mergeDir, err)
+ }
+ return nil
+}
+
+func recreate(contentDir string) error {
+ st, err := system.Stat(contentDir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("failed to stat overlay upper directory: %w", err)
+ }
+
+ if err := os.RemoveAll(contentDir); err != nil {
+ return err
+ }
+
+ if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil {
+ return fmt.Errorf("failed to create overlay directory: %w", err)
+ }
+ return nil
+}
+
+// CleanupMount removes all temporary mountpoint content
+func CleanupMount(contentDir string) (Err error) {
+ if err := recreate(filepath.Join(contentDir, "upper")); err != nil {
+ return err
+ }
+ if err := recreate(filepath.Join(contentDir, "work")); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CleanupContent removes all temporary mountpoint and all content from
+// directory
+func CleanupContent(containerDir string) (Err error) {
+ contentDir := filepath.Join(containerDir, "overlay")
+
+ files, err := os.ReadDir(contentDir)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
+ return fmt.Errorf("read directory: %w", err)
+ }
+ for _, f := range files {
+ dir := filepath.Join(contentDir, f.Name())
+ if err := Unmount(dir); err != nil {
+ return err
+ }
+ }
+
+ if err := os.RemoveAll(contentDir); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("failed to cleanup overlay directory: %w", err)
+ }
+ return nil
+}
diff --git a/pkg/overlay/overlay_freebsd.go b/pkg/overlay/overlay_freebsd.go
new file mode 100644
index 0000000..e814a32
--- /dev/null
+++ b/pkg/overlay/overlay_freebsd.go
@@ -0,0 +1,31 @@
+package overlay
+
+import (
+ //"fmt"
+ //"os"
+ //"path/filepath"
+ //"strings"
+ //"syscall"
+ "errors"
+
+ //"github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// MountWithOptions creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+// But allows api to set custom workdir, upperdir and other overlay options
+// Following API is being used by podman at the moment
+func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
+ if opts.ReadOnly {
+ // Read-only overlay mounts can be simulated with nullfs
+ mount.Source = source
+ mount.Destination = dest
+ mount.Type = "nullfs"
+ mount.Options = []string{"ro"}
+ return mount, nil
+ } else {
+ return mount, errors.New("read/write overlay mounts not supported on freebsd")
+ }
+}
diff --git a/pkg/overlay/overlay_linux.go b/pkg/overlay/overlay_linux.go
new file mode 100644
index 0000000..9bd72bc
--- /dev/null
+++ b/pkg/overlay/overlay_linux.go
@@ -0,0 +1,80 @@
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containers/storage/pkg/unshare"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// MountWithOptions creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+// But allows api to set custom workdir, upperdir and other overlay options
+// Following API is being used by podman at the moment
+func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
+ mergeDir := filepath.Join(contentDir, "merge")
+
+ // Create overlay mount options for rw/ro.
+ var overlayOptions string
+ if opts.ReadOnly {
+ // Read-only overlay mounts require two lower layer.
+ lowerTwo := filepath.Join(contentDir, "lower")
+ if err := os.Mkdir(lowerTwo, 0755); err != nil {
+ return mount, err
+ }
+ overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
+ } else {
+ // Read-write overlay mounts want a lower, upper and a work layer.
+ workDir := filepath.Join(contentDir, "work")
+ upperDir := filepath.Join(contentDir, "upper")
+
+ if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
+ workDir = opts.WorkDirOptionFragment
+ upperDir = opts.UpperDirOptionFragment
+ }
+
+ st, err := os.Stat(source)
+ if err != nil {
+ return mount, err
+ }
+ if err := os.Chmod(upperDir, st.Mode()); err != nil {
+ return mount, err
+ }
+ if stat, ok := st.Sys().(*syscall.Stat_t); ok {
+ if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
+ return mount, err
+ }
+ }
+ overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
+ }
+
+ mountProgram := findMountProgram(opts.GraphOpts)
+ if mountProgram != "" {
+ if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
+ return mount, err
+ }
+
+ mount.Source = mergeDir
+ mount.Destination = dest
+ mount.Type = "bind"
+ mount.Options = []string{"bind", "slave"}
+ return mount, nil
+ }
+
+ if unshare.IsRootless() {
+ /* If a mount_program is not specified, fallback to try mounting native overlay. */
+ overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
+ }
+
+ mount.Source = mergeDir
+ mount.Destination = dest
+ mount.Type = "overlay"
+ mount.Options = strings.Split(overlayOptions, ",")
+
+ return mount, nil
+}
diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go
new file mode 100644
index 0000000..d865f50
--- /dev/null
+++ b/pkg/parse/parse.go
@@ -0,0 +1,1198 @@
+package parse
+
+// this package should contain functions that parse and validate
+// user input and is shared either amongst buildah subcommands or
+// would be useful to projects vendoring buildah
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containers/buildah/define"
+ mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
+ internalParse "github.com/containers/buildah/internal/parse"
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/common/pkg/config"
+ "github.com/containers/common/pkg/parse"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/unshare"
+ storageTypes "github.com/containers/storage/types"
+ securejoin "github.com/cyphar/filepath-securejoin"
+ units "github.com/docker/go-units"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/term"
+)
+
+const (
+ // SeccompDefaultPath defines the default seccomp path
+ SeccompDefaultPath = config.SeccompDefaultPath
+ // SeccompOverridePath if this exists it overrides the default seccomp path
+ SeccompOverridePath = config.SeccompOverridePath
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+ // TypeCache is the type for mounting a common persistent cache from host
+ TypeCache = "cache"
+ // mount=type=cache must create a persistent directory on host so it's available for all consecutive builds.
+ // Lifecycle of following directory will be inherited from how host machine treats temporary directory
+ BuildahCacheDir = "buildah-cache"
+)
+
+// RepoNamesToNamedReferences parse the raw string to Named reference
+func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
+ var result []reference.Named
+ for _, dest := range destList {
+ named, err := reference.ParseNormalizedNamed(dest)
+ if err != nil {
+ return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err)
+ }
+ if !reference.IsNameOnly(named) {
+ return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named)
+ }
+ result = append(result, named)
+ }
+ return result, nil
+}
+
+// CommonBuildOptions parses the build options from the bud cli
+func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
+ return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
+}
+
+// CommonBuildOptionsFromFlagSet parses the build options from the bud cli
+func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) {
+ var (
+ memoryLimit int64
+ memorySwap int64
+ noDNS bool
+ err error
+ )
+
+ memVal, _ := flags.GetString("memory")
+ if memVal != "" {
+ memoryLimit, err = units.RAMInBytes(memVal)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value for memory: %w", err)
+ }
+ }
+
+ memSwapValue, _ := flags.GetString("memory-swap")
+ if memSwapValue != "" {
+ if memSwapValue == "-1" {
+ memorySwap = -1
+ } else {
+ memorySwap, err = units.RAMInBytes(memSwapValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value for memory-swap: %w", err)
+ }
+ }
+ }
+
+ noHostname, _ := flags.GetBool("no-hostname")
+ noHosts, _ := flags.GetBool("no-hosts")
+
+ addHost, _ := flags.GetStringSlice("add-host")
+ if len(addHost) > 0 {
+ if noHosts {
+ return nil, errors.New("--no-hosts and --add-host conflict, can not be used together")
+ }
+ for _, host := range addHost {
+ if err := validateExtraHost(host); err != nil {
+ return nil, fmt.Errorf("invalid value for add-host: %w", err)
+ }
+ }
+ }
+
+ noDNS = false
+ dnsServers := []string{}
+ if flags.Changed("dns") {
+ dnsServers, _ = flags.GetStringSlice("dns")
+ for _, server := range dnsServers {
+ if strings.ToLower(server) == "none" {
+ noDNS = true
+ }
+ }
+ if noDNS && len(dnsServers) > 1 {
+ return nil, errors.New("invalid --dns, --dns=none may not be used with any other --dns options")
+ }
+ }
+
+ dnsSearch := []string{}
+ if flags.Changed("dns-search") {
+ dnsSearch, _ = flags.GetStringSlice("dns-search")
+ if noDNS && len(dnsSearch) > 0 {
+ return nil, errors.New("invalid --dns-search, --dns-search may not be used with --dns=none")
+ }
+ }
+
+ dnsOptions := []string{}
+ if flags.Changed("dns-option") {
+ dnsOptions, _ = flags.GetStringSlice("dns-option")
+ if noDNS && len(dnsOptions) > 0 {
+ return nil, errors.New("invalid --dns-option, --dns-option may not be used with --dns=none")
+ }
+ }
+
+ if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil {
+ return nil, fmt.Errorf("invalid --shm-size: %w", err)
+ }
+ volumes, _ := flags.GetStringArray("volume")
+ cpuPeriod, _ := flags.GetUint64("cpu-period")
+ cpuQuota, _ := flags.GetInt64("cpu-quota")
+ cpuShares, _ := flags.GetUint64("cpu-shares")
+ httpProxy, _ := flags.GetBool("http-proxy")
+ identityLabel, _ := flags.GetBool("identity-label")
+ omitHistory, _ := flags.GetBool("omit-history")
+
+ ulimit := []string{}
+ if flags.Changed("ulimit") {
+ ulimit, _ = flags.GetStringSlice("ulimit")
+ }
+
+ secrets, _ := flags.GetStringArray("secret")
+ sshsources, _ := flags.GetStringArray("ssh")
+ ociHooks, _ := flags.GetStringArray("hooks-dir")
+
+ commonOpts := &define.CommonBuildOptions{
+ AddHost: addHost,
+ CPUPeriod: cpuPeriod,
+ CPUQuota: cpuQuota,
+ CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(),
+ CPUSetMems: findFlagFunc("cpuset-mems").Value.String(),
+ CPUShares: cpuShares,
+ CgroupParent: findFlagFunc("cgroup-parent").Value.String(),
+ DNSOptions: dnsOptions,
+ DNSSearch: dnsSearch,
+ DNSServers: dnsServers,
+ HTTPProxy: httpProxy,
+ IdentityLabel: types.NewOptionalBool(identityLabel),
+ Memory: memoryLimit,
+ MemorySwap: memorySwap,
+ NoHostname: noHostname,
+ NoHosts: noHosts,
+ OmitHistory: omitHistory,
+ ShmSize: findFlagFunc("shm-size").Value.String(),
+ Ulimit: ulimit,
+ Volumes: volumes,
+ Secrets: secrets,
+ SSHSources: sshsources,
+ OCIHooksDir: ociHooks,
+ }
+ securityOpts, _ := flags.GetStringArray("security-opt")
+ if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
+ return nil, err
+ }
+ return commonOpts, nil
+}
+
+// GetAdditionalBuildContext consumes raw string and returns parsed AdditionalBuildContext
+func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, error) {
+ ret := define.AdditionalBuildContext{IsURL: false, IsImage: false, Value: value}
+ if strings.HasPrefix(value, "docker-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker-image://")
+ } else if strings.HasPrefix(value, "container-image://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "container-image://")
+ } else if strings.HasPrefix(value, "docker://") {
+ ret.IsImage = true
+ ret.Value = strings.TrimPrefix(value, "docker://")
+ } else if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") {
+ ret.IsImage = false
+ ret.IsURL = true
+ } else {
+ path, err := filepath.Abs(value)
+ if err != nil {
+ return define.AdditionalBuildContext{}, fmt.Errorf("unable to convert additional build-context %q path to absolute: %w", value, err)
+ }
+ ret.Value = path
+ }
+ return ret, nil
+}
+
+func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
+ for _, opt := range securityOpts {
+ if opt == "no-new-privileges" {
+ commonOpts.NoNewPrivileges = true
+ continue
+ }
+
+ con := strings.SplitN(opt, "=", 2)
+ if len(con) != 2 {
+ return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
+ }
+ switch con[0] {
+ case "label":
+ commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1])
+ case "apparmor":
+ commonOpts.ApparmorProfile = con[1]
+ case "seccomp":
+ commonOpts.SeccompProfilePath = con[1]
+ default:
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
+ }
+
+ }
+
+ if commonOpts.SeccompProfilePath == "" {
+ if _, err := os.Stat(SeccompOverridePath); err == nil {
+ commonOpts.SeccompProfilePath = SeccompOverridePath
+ } else {
+ if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ if _, err := os.Stat(SeccompDefaultPath); err != nil {
+ if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ } else {
+ commonOpts.SeccompProfilePath = SeccompDefaultPath
+ }
+ }
+ }
+ return nil
+}
+
+// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator
+func SplitStringWithColonEscape(str string) []string {
+ return internalParse.SplitStringWithColonEscape(str)
+}
+
+// Volume parses the input of --volume
+func Volume(volume string) (specs.Mount, error) {
+ return internalParse.Volume(volume)
+}
+
+// Volumes validates the host and container paths passed in to the --volume flag
+func Volumes(volumes []string) error {
+ if len(volumes) == 0 {
+ return nil
+ }
+ for _, volume := range volumes {
+ if _, err := Volume(volume); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ValidateVolumeHostDir validates a volume mount's source directory
+func ValidateVolumeHostDir(hostDir string) error {
+ return parse.ValidateVolumeHostDir(hostDir)
+}
+
+// ValidateVolumeCtrDir validates a volume mount's destination directory.
+func ValidateVolumeCtrDir(ctrDir string) error {
+ return parse.ValidateVolumeCtrDir(ctrDir)
+}
+
+// ValidateVolumeOpts validates a volume's options
+func ValidateVolumeOpts(options []string) ([]string, error) {
+ return parse.ValidateVolumeOpts(options)
+}
+
+// validateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
+// for add-host flag
+func validateExtraHost(val string) error {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := validateIPAddress(arr[1]); err != nil {
+ return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return nil
+}
+
+// validateIPAddress validates an Ip address.
+// for dns, ip, and ip6 flags also
+func validateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// SystemContextFromOptions returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
+ return SystemContextFromFlagSet(c.Flags(), c.Flag)
+}
+
+// SystemContextFromFlagSet returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) {
+ certDir, err := flags.GetString("cert-dir")
+ if err != nil {
+ certDir = ""
+ }
+ ctx := &types.SystemContext{
+ DockerCertPath: certDir,
+ }
+ tlsVerify, err := flags.GetBool("tls-verify")
+ if err == nil && findFlagFunc("tls-verify").Changed {
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
+ ctx.OCIInsecureSkipTLSVerify = !tlsVerify
+ ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
+ }
+ insecure, err := flags.GetBool("insecure")
+ if err == nil && findFlagFunc("insecure").Changed {
+ if ctx.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ return nil, errors.New("--insecure may not be used with --tls-verify")
+ }
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(insecure)
+ ctx.OCIInsecureSkipTLSVerify = insecure
+ ctx.DockerDaemonInsecureSkipTLSVerify = insecure
+ }
+ disableCompression, err := flags.GetBool("disable-compression")
+ if err == nil {
+ if disableCompression {
+ ctx.OCIAcceptUncompressedLayers = true
+ } else {
+ ctx.DirForceCompress = true
+ }
+ }
+ creds, err := flags.GetString("creds")
+ if err == nil && findFlagFunc("creds").Changed {
+ var err error
+ ctx.DockerAuthConfig, err = AuthConfig(creds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ sigPolicy, err := flags.GetString("signature-policy")
+ if err == nil && findFlagFunc("signature-policy").Changed {
+ ctx.SignaturePolicyPath = sigPolicy
+ }
+ authfile, err := flags.GetString("authfile")
+ if err == nil {
+ ctx.AuthFilePath = getAuthFile(authfile)
+ }
+ regConf, err := flags.GetString("registries-conf")
+ if err == nil && findFlagFunc("registries-conf").Changed {
+ ctx.SystemRegistriesConfPath = regConf
+ }
+ regConfDir, err := flags.GetString("registries-conf-dir")
+ if err == nil && findFlagFunc("registries-conf-dir").Changed {
+ ctx.RegistriesDirPath = regConfDir
+ }
+ shortNameAliasConf, err := flags.GetString("short-name-alias-conf")
+ if err == nil && findFlagFunc("short-name-alias-conf").Changed {
+ ctx.UserShortNameAliasConfPath = shortNameAliasConf
+ }
+ ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version)
+ if findFlagFunc("os") != nil && findFlagFunc("os").Changed {
+ var os string
+ if os, err = flags.GetString("os"); err != nil {
+ return nil, err
+ }
+ ctx.OSChoice = os
+ }
+ if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed {
+ var arch string
+ if arch, err = flags.GetString("arch"); err != nil {
+ return nil, err
+ }
+ ctx.ArchitectureChoice = arch
+ }
+ if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed {
+ var variant string
+ if variant, err = flags.GetString("variant"); err != nil {
+ return nil, err
+ }
+ ctx.VariantChoice = variant
+ }
+ if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed {
+ var specs []string
+ if specs, err = flags.GetStringSlice("platform"); err != nil {
+ return nil, err
+ }
+ if len(specs) == 0 || specs[0] == "" {
+ return nil, fmt.Errorf("unable to parse --platform value %v", specs)
+ }
+ platform := specs[0]
+ os, arch, variant, err := Platform(platform)
+ if err != nil {
+ return nil, err
+ }
+ if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" {
+ return nil, errors.New("invalid --platform may not be used with --os, --arch, or --variant")
+ }
+ ctx.OSChoice = os
+ ctx.ArchitectureChoice = arch
+ ctx.VariantChoice = variant
+ }
+
+ ctx.BigFilesTemporaryDir = GetTempDir()
+ return ctx, nil
+}
+
+func getAuthFile(authfile string) string {
+ if authfile != "" {
+ absAuthfile, err := filepath.Abs(authfile)
+ if err == nil {
+ return absAuthfile
+ }
+ logrus.Warnf("ignoring passed-in auth file path, evaluating it: %v", err)
+ }
+ return auth.GetDefaultAuthFile()
+}
+
+// PlatformFromOptions parses the operating system (os) and architecture (arch)
+// from the provided command line options. Deprecated in favor of
+// PlatformsFromOptions(), but kept here because it's part of our API.
+func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) {
+ platforms, err := PlatformsFromOptions(c)
+ if err != nil {
+ return "", "", err
+ }
+ if len(platforms) < 1 {
+ return "", "", errors.New("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
+ }
+ return platforms[0].OS, platforms[0].Arch, nil
+}
+
+// PlatformsFromOptions parses the operating system (os) and architecture
+// (arch) from the provided command line options. If --platform used, it
+// also returns the list of platforms that were passed in as its argument.
+func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Variant string }, err error) {
+ var os, arch, variant string
+ if c.Flag("os").Changed {
+ if os, err = c.Flags().GetString("os"); err != nil {
+ return nil, err
+ }
+ }
+ if c.Flag("arch").Changed {
+ if arch, err = c.Flags().GetString("arch"); err != nil {
+ return nil, err
+ }
+ }
+ if c.Flag("variant").Changed {
+ if variant, err = c.Flags().GetString("variant"); err != nil {
+ return nil, err
+ }
+ }
+ platforms = []struct{ OS, Arch, Variant string }{{os, arch, variant}}
+ if c.Flag("platform").Changed {
+ platforms = nil
+ platformSpecs, err := c.Flags().GetStringSlice("platform")
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse platform: %w", err)
+ }
+ if os != "" || arch != "" || variant != "" {
+ return nil, fmt.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
+ }
+ for _, pf := range platformSpecs {
+ if os, arch, variant, err = Platform(pf); err != nil {
+ return nil, fmt.Errorf("unable to parse platform %q: %w", pf, err)
+ }
+ platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant})
+ }
+ }
+ return platforms, nil
+}
+
+// DefaultPlatform returns the standard platform for the current system
+func DefaultPlatform() string {
+ return platforms.DefaultString()
+}
+
+// Platform separates the platform string into os, arch and variant,
+// accepting any of $arch, $os/$arch, or $os/$arch/$variant.
+func Platform(platform string) (os, arch, variant string, err error) {
+ platform = strings.Trim(platform, "/")
+ if platform == "local" || platform == "" {
+ return Platform(DefaultPlatform())
+ }
+ platformSpec, err := platforms.Parse(platform)
+ if err != nil {
+ return "", "", "", fmt.Errorf("invalid platform syntax for --platform=%q: %w", platform, err)
+ }
+ return platformSpec.OS, platformSpec.Architecture, platformSpec.Variant, nil
+}
+
+func parseCreds(creds string) (string, string) {
+ if creds == "" {
+ return "", ""
+ }
+ up := strings.SplitN(creds, ":", 2)
+ if len(up) == 1 {
+ return up[0], ""
+ }
+ if up[0] == "" {
+ return "", up[1]
+ }
+ return up[0], up[1]
+}
+
+// AuthConfig parses the creds in format [username[:password] into an auth
+// config.
+func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
+ username, password := parseCreds(creds)
+ if username == "" {
+ fmt.Print("Username: ")
+ fmt.Scanln(&username)
+ }
+ if password == "" {
+ fmt.Print("Password: ")
+ termPassword, err := term.ReadPassword(0)
+ if err != nil {
+ return nil, fmt.Errorf("could not read password from terminal: %w", err)
+ }
+ password = string(termPassword)
+ }
+
+ return &types.DockerAuthConfig{
+ Username: username,
+ Password: password,
+ }, nil
+}
+
+// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag.
+// Takes `buildOutput` as string and returns BuildOutputOption
+func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
+ if len(buildOutput) == 1 && buildOutput == "-" {
+ // Feature parity with buildkit, output tar to stdout
+ // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs
+ return define.BuildOutputOption{Path: "",
+ IsDir: false,
+ IsStdout: true}, nil
+ }
+ if !strings.Contains(buildOutput, ",") {
+ // expect default --output <dirname>
+ return define.BuildOutputOption{Path: buildOutput,
+ IsDir: true,
+ IsStdout: false}, nil
+ }
+ isDir := true
+ isStdout := false
+ typeSelected := false
+ pathSelected := false
+ path := ""
+ tokens := strings.Split(buildOutput, ",")
+ for _, option := range tokens {
+ arr := strings.SplitN(option, "=", 2)
+ if len(arr) != 2 {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput)
+ }
+ switch arr[0] {
+ case "type":
+ if typeSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
+ }
+ typeSelected = true
+ if arr[1] == "local" {
+ isDir = true
+ } else if arr[1] == "tar" {
+ isDir = false
+ } else {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput)
+ }
+ case "dest":
+ if pathSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
+ }
+ pathSelected = true
+ path = arr[1]
+ default:
+ return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput)
+ }
+ }
+
+ if !typeSelected || !pathSelected {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput)
+ }
+
+ if path == "-" {
+ if isDir {
+ return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput)
+ }
+ return define.BuildOutputOption{Path: "",
+ IsDir: false,
+ IsStdout: true}, nil
+ }
+
+ return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil
+}
+
+// TeeType parses a string value and returns a TeeType
+func TeeType(teeType string) define.TeeType {
+ return define.TeeType(strings.ToLower(teeType))
+}
+
+// GetConfidentialWorkloadOptions parses a confidential workload settings
+// argument, which controls both whether or not we produce an image that
+// expects to be run using krun, and how we handle things like encrypting
+// the disk image that the container image will contain.
+func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOptions, error) {
+ options := define.ConfidentialWorkloadOptions{
+ TempDir: GetTempDir(),
+ }
+ defaults := options
+ for _, option := range strings.Split(arg, ",") {
+ var err error
+ switch {
+ case strings.HasPrefix(option, "type="):
+ options.TeeType = TeeType(strings.TrimPrefix(option, "type="))
+ switch options.TeeType {
+ case define.SEV, define.SNP, mkcwtypes.SEV_NO_ES:
+ default:
+ return options, fmt.Errorf("parsing type= value %q: unrecognized value", options.TeeType)
+ }
+ case strings.HasPrefix(option, "attestation_url="), strings.HasPrefix(option, "attestation-url="):
+ options.Convert = true
+ options.AttestationURL = strings.TrimPrefix(option, "attestation_url=")
+ if options.AttestationURL == option {
+ options.AttestationURL = strings.TrimPrefix(option, "attestation-url=")
+ }
+ case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="):
+ options.Convert = true
+ options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=")
+ case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="):
+ options.WorkloadID = strings.TrimPrefix(option, "workload_id=")
+ if options.WorkloadID == option {
+ options.WorkloadID = strings.TrimPrefix(option, "workload-id=")
+ }
+ case strings.HasPrefix(option, "cpus="):
+ options.CPUs, err = strconv.Atoi(strings.TrimPrefix(option, "cpus="))
+ if err != nil {
+ return options, fmt.Errorf("parsing cpus= value %q: %w", strings.TrimPrefix(option, "cpus="), err)
+ }
+ case strings.HasPrefix(option, "memory="):
+ options.Memory, err = strconv.Atoi(strings.TrimPrefix(option, "memory="))
+ if err != nil {
+ return options, fmt.Errorf("parsing memory= value %q: %w", strings.TrimPrefix(option, "memorys"), err)
+ }
+ case option == "ignore_attestation_errors", option == "ignore-attestation-errors":
+ options.IgnoreAttestationErrors = true
+ case strings.HasPrefix(option, "ignore_attestation_errors="), strings.HasPrefix(option, "ignore-attestation-errors="):
+ val := strings.TrimPrefix(option, "ignore_attestation_errors=")
+ if val == option {
+ val = strings.TrimPrefix(option, "ignore-attestation-errors=")
+ }
+ options.IgnoreAttestationErrors = val == "true" || val == "yes" || val == "on" || val == "1"
+ case strings.HasPrefix(option, "firmware-library="), strings.HasPrefix(option, "firmware_library="):
+ val := strings.TrimPrefix(option, "firmware-library=")
+ if val == option {
+ val = strings.TrimPrefix(option, "firmware_library=")
+ }
+ options.FirmwareLibrary = val
+ case strings.HasPrefix(option, "slop="):
+ options.Slop = strings.TrimPrefix(option, "slop=")
+ default:
+ knownOptions := []string{"type", "attestation_url", "passphrase", "workload_id", "cpus", "memory", "firmware_library", "slop"}
+ return options, fmt.Errorf("expected one or more of %q as arguments for --cw, not %q", knownOptions, option)
+ }
+ }
+ if options != defaults && !options.Convert {
+ return options, fmt.Errorf("--cw arguments missing one or more of (%q, %q)", "passphrase", "attestation_url")
+ }
+ return options, nil
+}
+
+// IDMappingOptions parses the build options related to user namespaces and ID mapping.
+func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
+}
+
+// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically
+// a user namespace.
+func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
+ parts := strings.SplitN(base, ":", 2)
+ if parts[0] != "auto" {
+ return nil, errors.New("wrong user namespace mode")
+ }
+ options := storageTypes.AutoUserNsOptions{}
+ if len(parts) == 1 {
+ return &options, nil
+ }
+ for _, o := range strings.Split(parts[1], ",") {
+ v := strings.SplitN(o, "=", 2)
+ if len(v) != 2 {
+ return nil, fmt.Errorf("invalid option specified: %q", o)
+ }
+ switch v[0] {
+ case "size":
+ s, err := strconv.ParseUint(v[1], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ options.Size = uint32(s)
+ case "uidmapping":
+ mapping, err := storageTypes.ParseIDMapping([]string{v[1]}, nil, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...)
+ case "gidmapping":
+ mapping, err := storageTypes.ParseIDMapping(nil, []string{v[1]}, "", "")
+ if err != nil {
+ return nil, err
+ }
+ options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...)
+ default:
+ return nil, fmt.Errorf("unknown option specified: %q", v[0])
+ }
+ }
+ return &options, nil
+}
+
+// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping.
+func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ isAuto := false
+ autoOpts := &storageTypes.AutoUserNsOptions{}
+ user := findFlagFunc("userns-uid-map-user").Value.String()
+ group := findFlagFunc("userns-gid-map-group").Value.String()
+ // If only the user or group was specified, use the same value for the
+ // other, since we need both in order to initialize the maps using the
+ // names.
+ if user == "" && group != "" {
+ user = group
+ }
+ if group == "" && user != "" {
+ group = user
+ }
+ // Either start with empty maps or the name-based maps.
+ mappings := idtools.NewIDMappingsFromMaps(nil, nil)
+ if user != "" && group != "" {
+ submappings, err := idtools.NewIDMappings(user, group)
+ if err != nil {
+ return nil, nil, err
+ }
+ mappings = submappings
+ }
+ globalOptions := persistentFlags
+ // We'll parse the UID and GID mapping options the same way.
+ buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) {
+ outmap := make([]specs.LinuxIDMapping, 0, len(basemap))
+ // Start with the name-based map entries.
+ for _, m := range basemap {
+ outmap = append(outmap, specs.LinuxIDMapping{
+ ContainerID: uint32(m.ContainerID),
+ HostID: uint32(m.HostID),
+ Size: uint32(m.Size),
+ })
+ }
+ // Parse the flag's value as one or more triples (if it's even
+ // been set), and append them.
+ var spec []string
+ if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed {
+ spec, _ = globalOptions.GetStringSlice(option)
+ }
+ if findFlagFunc(option).Changed {
+ spec, _ = flags.GetStringSlice(option)
+ }
+ idmap, err := parseIDMap(spec)
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range idmap {
+ outmap = append(outmap, specs.LinuxIDMapping{
+ ContainerID: m[0],
+ HostID: m[1],
+ Size: m[2],
+ })
+ }
+ return outmap, nil
+ }
+ uidmap, err := buildIDMap(mappings.UIDs(), "userns-uid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ gidmap, err := buildIDMap(mappings.GIDs(), "userns-gid-map")
+ if err != nil {
+ return nil, nil, err
+ }
+ // If we only have one map or the other populated at this point, then
+ // use the same mapping for both, since we know that no user or group
+ // name was specified, but a specific mapping was for one or the other.
+ if len(uidmap) == 0 && len(gidmap) != 0 {
+ uidmap = gidmap
+ }
+ if len(gidmap) == 0 && len(uidmap) != 0 {
+ gidmap = uidmap
+ }
+
+ // By default, having mappings configured means we use a user
+ // namespace. Otherwise, we don't.
+ usernsOption := define.NamespaceOption{
+ Name: string(specs.UserNamespace),
+ Host: len(uidmap) == 0 && len(gidmap) == 0,
+ }
+ // If the user specifically requested that we either use or don't use
+ // user namespaces, override that default.
+ if findFlagFunc("userns").Changed {
+ how := findFlagFunc("userns").Value.String()
+ if strings.HasPrefix(how, "auto") {
+ autoOpts, err = GetAutoOptions(how)
+ if err != nil {
+ return nil, nil, err
+ }
+ isAuto = true
+ usernsOption.Host = false
+ } else {
+ switch how {
+ case "", "container", "private":
+ usernsOption.Host = false
+ case "host":
+ usernsOption.Host = true
+ default:
+ how = strings.TrimPrefix(how, "ns:")
+ if _, err := os.Stat(how); err != nil {
+ return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err)
+ }
+ logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
+ usernsOption.Path = how
+ }
+ }
+ }
+ usernsOptions = define.NamespaceOptions{usernsOption}
+
+ // If the user requested that we use the host namespace, but also that
+ // we use mappings, that's not going to work.
+ if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host {
+ return nil, nil, fmt.Errorf("can not specify ID mappings while using host's user namespace")
+ }
+ return usernsOptions, &define.IDMappingOptions{
+ HostUIDMapping: usernsOption.Host,
+ HostGIDMapping: usernsOption.Host,
+ UIDMap: uidmap,
+ GIDMap: gidmap,
+ AutoUserNs: isAuto,
+ AutoUserNsOpts: *autoOpts,
+ }, nil
+}
+
+func parseIDMap(spec []string) (m [][3]uint32, err error) {
+ for _, s := range spec {
+ args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) })
+ if len(args)%3 != 0 {
+ return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
+ }
+ for len(args) >= 3 {
+ cid, err := strconv.ParseUint(args[0], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
+ }
+ hostid, err := strconv.ParseUint(args[1], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
+ }
+ size, err := strconv.ParseUint(args[2], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %q from mapping %q as a number: %w", args[2], s, err)
+ }
+ m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)})
+ args = args[3:]
+ }
+ }
+ return m, nil
+}
+
+// NamespaceOptions parses the build options for all namespaces except for user namespace.
+func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
+ return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag)
+}
+
+// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace.
+func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
+ options := make(define.NamespaceOptions, 0, 7)
+ policy := define.NetworkDefault
+ for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
+ if flags.Lookup(what) != nil && findFlagFunc(what).Changed {
+ how := findFlagFunc(what).Value.String()
+ switch what {
+ case "cgroupns":
+ what = string(specs.CgroupNamespace)
+ }
+ switch how {
+ case "", "container", "private":
+ logrus.Debugf("setting %q namespace to %q", what, "")
+ policy = define.NetworkEnabled
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ })
+ case "host":
+ logrus.Debugf("setting %q namespace to host", what)
+ policy = define.NetworkEnabled
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ Host: true,
+ })
+ default:
+ if what == string(specs.NetworkNamespace) {
+ if how == "none" {
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ })
+ policy = define.NetworkDisabled
+ logrus.Debugf("setting network to disabled")
+ break
+ }
+ }
+ how = strings.TrimPrefix(how, "ns:")
+ // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
+ if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
+ if _, err := os.Stat(how); err != nil {
+ return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err)
+ }
+ }
+ policy = define.NetworkEnabled
+ logrus.Debugf("setting %q namespace to %q", what, how)
+ options.AddOrReplace(define.NamespaceOption{
+ Name: what,
+ Path: how,
+ })
+ }
+ }
+ }
+ return options, policy, nil
+}
+
+func defaultIsolation() (define.Isolation, error) {
+ isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION")
+ if isSet {
+ switch strings.ToLower(isolation) {
+ case "oci":
+ return define.IsolationOCI, nil
+ case "rootless":
+ return define.IsolationOCIRootless, nil
+ case "chroot":
+ return define.IsolationChroot, nil
+ default:
+ return 0, fmt.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
+ }
+ }
+ if unshare.IsRootless() {
+ return define.IsolationOCIRootless, nil
+ }
+ return define.IsolationDefault, nil
+}
+
+// IsolationOption parses the --isolation flag.
+func IsolationOption(isolation string) (define.Isolation, error) {
+ if isolation != "" {
+ switch strings.ToLower(isolation) {
+ case "oci", "default":
+ return define.IsolationOCI, nil
+ case "rootless":
+ return define.IsolationOCIRootless, nil
+ case "chroot":
+ return define.IsolationChroot, nil
+ default:
+ return 0, fmt.Errorf("unrecognized isolation type %q", isolation)
+ }
+ }
+ return defaultIsolation()
+}
+
+// Device parses device mapping string to a src, dest & permissions string
+// Valid values for device look like:
+//
+// '/dev/sdc"
+// '/dev/sdc:/dev/xvdc"
+// '/dev/sdc:/dev/xvdc:rwm"
+// '/dev/sdc:rm"
+func Device(device string) (string, string, string, error) {
+ src := ""
+ dst := ""
+ permissions := "rwm"
+ arr := strings.Split(device, ":")
+ switch len(arr) {
+ case 3:
+ if !isValidDeviceMode(arr[2]) {
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
+ }
+ permissions = arr[2]
+ fallthrough
+ case 2:
+ if isValidDeviceMode(arr[1]) {
+ permissions = arr[1]
+ } else {
+ if len(arr[1]) == 0 || arr[1][0] != '/' {
+ return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
+ }
+ dst = arr[1]
+ }
+ fallthrough
+ case 1:
+ if len(arr[0]) > 0 {
+ src = arr[0]
+ break
+ }
+ fallthrough
+ default:
+ return "", "", "", fmt.Errorf("invalid device specification: %s", device)
+ }
+
+ if dst == "" {
+ dst = src
+ }
+ return src, dst, permissions, nil
+}
+
+// isValidDeviceMode checks if the mode for device is valid or not.
+// isValid mode is a composition of r (read), w (write), and m (mknod).
+func isValidDeviceMode(mode string) bool {
+ var legalDeviceMode = map[rune]bool{
+ 'r': true,
+ 'w': true,
+ 'm': true,
+ }
+ if mode == "" {
+ return false
+ }
+ for _, c := range mode {
+ if !legalDeviceMode[c] {
+ return false
+ }
+ legalDeviceMode[c] = false
+ }
+ return true
+}
+
+// GetTempDir returns the path of the preferred temporary directory on the host.
+func GetTempDir() string {
+ return tmpdir.GetTempDir()
+}
+
+// Secrets parses the --secret flag
+func Secrets(secrets []string) (map[string]define.Secret, error) {
+ invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
+ parsed := make(map[string]define.Secret)
+ for _, secret := range secrets {
+ tokens := strings.Split(secret, ",")
+ var id, src, typ string
+ for _, val := range tokens {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "id":
+ id = kv[1]
+ case "src":
+ src = kv[1]
+ case "env":
+ src = kv[1]
+ typ = "env"
+ case "type":
+ if kv[1] != "file" && kv[1] != "env" {
+ return nil, errors.New("invalid secret type, must be file or env")
+ }
+ typ = kv[1]
+ }
+ }
+ if id == "" {
+ return nil, invalidSyntax
+ }
+ if src == "" {
+ src = id
+ }
+ if typ == "" {
+ if _, ok := os.LookupEnv(id); ok {
+ typ = "env"
+ } else {
+ typ = "file"
+ }
+ }
+
+ if typ == "file" {
+ fullPath, err := filepath.Abs(src)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
+ }
+ _, err = os.Stat(fullPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secrets: %w", err)
+ }
+ src = fullPath
+ }
+ newSecret := define.Secret{
+ Source: src,
+ SourceType: typ,
+ }
+ parsed[id] = newSecret
+
+ }
+ return parsed, nil
+}
+
+// SSH parses the --ssh flag
+func SSH(sshSources []string) (map[string]*sshagent.Source, error) {
+ parsed := make(map[string]*sshagent.Source)
+ var paths []string
+ for _, v := range sshSources {
+ parts := strings.SplitN(v, "=", 2)
+ if len(parts) > 1 {
+ paths = strings.Split(parts[1], ",")
+ }
+
+ source, err := sshagent.NewSource(paths)
+ if err != nil {
+ return nil, err
+ }
+ parsed[parts[0]] = source
+ }
+ return parsed, nil
+}
+
+// ContainerIgnoreFile consumes path to `dockerignore` or `containerignore`
+// and returns list of files to exclude along with the path to processed ignore
+// file. Deprecated since this might become internal only, please avoid relying
+// on this function.
+func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]string, string, error) {
+ if path != "" {
+ excludes, err := imagebuilder.ParseIgnore(path)
+ return excludes, path, err
+ }
+ // If path was not supplied give priority to `<containerfile>.containerignore` first.
+ for _, containerfile := range containerFiles {
+ if !filepath.IsAbs(containerfile) {
+ containerfile = filepath.Join(contextDir, containerfile)
+ }
+ containerfileIgnore := ""
+ if _, err := os.Stat(containerfile + ".containerignore"); err == nil {
+ containerfileIgnore = containerfile + ".containerignore"
+ }
+ if _, err := os.Stat(containerfile + ".dockerignore"); err == nil {
+ containerfileIgnore = containerfile + ".dockerignore"
+ }
+ if containerfileIgnore != "" {
+ excludes, err := imagebuilder.ParseIgnore(containerfileIgnore)
+ return excludes, containerfileIgnore, err
+ }
+ }
+ path, symlinkErr := securejoin.SecureJoin(contextDir, ".containerignore")
+ if symlinkErr != nil {
+ return nil, "", symlinkErr
+ }
+ excludes, err := imagebuilder.ParseIgnore(path)
+ if errors.Is(err, os.ErrNotExist) {
+ path, symlinkErr = securejoin.SecureJoin(contextDir, ".dockerignore")
+ if symlinkErr != nil {
+ return nil, "", symlinkErr
+ }
+ excludes, err = imagebuilder.ParseIgnore(path)
+ }
+ if errors.Is(err, os.ErrNotExist) {
+ return excludes, "", nil
+ }
+ return excludes, path, err
+}
diff --git a/pkg/parse/parse_test.go b/pkg/parse/parse_test.go
new file mode 100644
index 0000000..c1f8833
--- /dev/null
+++ b/pkg/parse/parse_test.go
@@ -0,0 +1,224 @@
+package parse
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/image/v5/types"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCommonBuildOptionsFromFlagSet(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.String("memory", "1GB", "")
+ fs.String("shm-size", "5TB", "")
+ fs.String("cpuset-cpus", "1", "")
+ fs.String("cpuset-mems", "2", "")
+ fs.String("cgroup-parent", "none", "")
+ err := fs.Parse([]string{"--memory", "2GB"})
+ assert.NoError(t, err)
+ cbo, err := CommonBuildOptionsFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, cbo.Memory, int64(2147483648))
+}
+
+// TestDeviceParser verifies the given device strings is parsed correctly
+func TestDeviceParser(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+
+ // Test defaults
+ src, dest, permissions, err := Device("/dev/foo")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/foo")
+ assert.Equal(t, permissions, "rwm")
+
+ // Test defaults, different dest
+ src, dest, permissions, err = Device("/dev/foo:/dev/bar")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/bar")
+ assert.Equal(t, permissions, "rwm")
+
+ // Test fully specified
+ src, dest, permissions, err = Device("/dev/foo:/dev/bar:rm")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/bar")
+ assert.Equal(t, permissions, "rm")
+
+ // Test device, permissions
+ src, dest, permissions, err = Device("/dev/foo:rm")
+ assert.NoError(t, err)
+ assert.Equal(t, src, "/dev/foo")
+ assert.Equal(t, dest, "/dev/foo")
+ assert.Equal(t, permissions, "rm")
+
+ //test bogus permissions
+ _, _, _, err = Device("/dev/fuse1:BOGUS")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("/dev/foo:/dev/bar:rm:")
+ assert.Error(t, err)
+
+ _, _, _, err = Device("/dev/foo::rm")
+ assert.Error(t, err)
+}
+
+func TestIsValidDeviceMode(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+ assert.False(t, isValidDeviceMode("BOGUS"))
+ assert.False(t, isValidDeviceMode("rwx"))
+ assert.True(t, isValidDeviceMode("r"))
+ assert.True(t, isValidDeviceMode("rw"))
+ assert.True(t, isValidDeviceMode("rm"))
+ assert.True(t, isValidDeviceMode("rwm"))
+}
+
+func TestDeviceFromPath(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("Devices is only supported on Linux")
+ }
+ // Path is valid
+ dev, err := DeviceFromPath("/dev/null")
+ assert.NoError(t, err)
+ assert.Equal(t, len(dev), 1)
+ assert.Equal(t, dev[0].Major, int64(1))
+ assert.Equal(t, dev[0].Minor, int64(3))
+ assert.Equal(t, string(dev[0].Permissions), "rwm")
+ assert.Equal(t, dev[0].Uid, uint32(0))
+ assert.Equal(t, dev[0].Gid, uint32(0))
+
+ // Path does not exists
+ _, err = DeviceFromPath("/dev/BOGUS")
+ assert.Error(t, err)
+
+ // Path is a directory of devices
+ _, err = DeviceFromPath("/dev/pts")
+ assert.NoError(t, err)
+
+ // path of directory has no device
+ _, err = DeviceFromPath("/etc/passwd")
+ assert.Error(t, err)
+}
+
+func TestIDMappingOptions(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ pfs := pflag.NewFlagSet("persist", pflag.PanicOnError)
+ fs.String("userns-uid-map-user", "", "")
+ fs.String("userns-gid-map-group", "", "")
+ fs.String("userns-uid-map", "", "")
+ fs.String("userns-gid-map", "", "")
+ fs.String("userns", "", "")
+ err := fs.Parse([]string{})
+ assert.NoError(t, err)
+ uos, _, err := IDMappingOptionsFromFlagSet(fs, pfs, fs.Lookup)
+ assert.NoError(t, err)
+ nso := uos.Find(string(specs.UserNamespace))
+ assert.Equal(t, *nso, define.NamespaceOption{
+ Host: true,
+ Name: string(specs.UserNamespace),
+ })
+}
+
+func TestIsolation(t *testing.T) {
+ def, err := defaultIsolation()
+ if err != nil {
+ assert.Error(t, err)
+ }
+
+ isolations := []string{"", "default", "oci", "chroot", "rootless"}
+ for _, i := range isolations {
+ isolation, err := IsolationOption(i)
+ if err != nil {
+ assert.Error(t, fmt.Errorf("isolation %q not supported", i))
+ }
+ var expected string
+ switch i {
+ case "":
+ expected = def.String()
+ case "default":
+ expected = "oci"
+ default:
+ expected = i
+ }
+
+ if isolation.String() != expected {
+ assert.Error(t, fmt.Errorf("isolation %q not equal to user input %q", isolation.String(), expected))
+ }
+ }
+}
+
+func TestNamespaceOptions(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.String("cgroupns", "", "")
+ err := fs.Parse([]string{"--cgroupns", "private"})
+ assert.NoError(t, err)
+ nsos, np, err := NamespaceOptionsFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, np, define.NetworkEnabled)
+ nso := nsos.Find(string(specs.CgroupNamespace))
+ assert.Equal(t, *nso, define.NamespaceOption{
+ Name: string(specs.CgroupNamespace),
+ })
+}
+
+func TestParsePlatform(t *testing.T) {
+ os, arch, variant, err := Platform("a/b/c")
+ assert.NoError(t, err)
+ assert.NoError(t, err)
+ assert.Equal(t, os, "a")
+ assert.Equal(t, arch, "b")
+ assert.Equal(t, variant, "c")
+
+ os, arch, variant, err = Platform("a/b")
+ assert.NoError(t, err)
+ assert.NoError(t, err)
+ assert.Equal(t, os, "a")
+ assert.Equal(t, arch, "b")
+ assert.Equal(t, variant, "")
+
+ _, _, _, err = Platform("a")
+ assert.Error(t, err)
+}
+
+func TestSplitStringWithColonEscape(t *testing.T) {
+ tests := []struct {
+ volume string
+ expectedResult []string
+ }{
+ {"/root/a:/root/test:O", []string{"/root/a", "/root/test", "O"}},
+ {"/root/a\\:b/c:/root/test:O", []string{"/root/a:b/c", "/root/test", "O"}},
+ {"/root/a:/root/test\\:test1/a:O", []string{"/root/a", "/root/test:test1/a", "O"}},
+ {"/root/a\\:b/c:/root/test\\:test1/a:O", []string{"/root/a:b/c", "/root/test:test1/a", "O"}},
+ }
+ for _, args := range tests {
+ val := SplitStringWithColonEscape(args.volume)
+ assert.Equal(t, val, args.expectedResult)
+ }
+}
+
+func TestSystemContextFromFlagSet(t *testing.T) {
+ fs := pflag.NewFlagSet("testme", pflag.PanicOnError)
+ fs.Bool("tls-verify", false, "")
+ err := fs.Parse([]string{"--tls-verify", "false"})
+ assert.NoError(t, err)
+ sc, err := SystemContextFromFlagSet(fs, fs.Lookup)
+ assert.NoError(t, err)
+ assert.Equal(t, sc, &types.SystemContext{
+ BigFilesTemporaryDir: GetTempDir(),
+ DockerInsecureSkipTLSVerify: types.OptionalBoolFalse,
+ DockerRegistryUserAgent: fmt.Sprintf("Buildah/%s", define.Version),
+ })
+}
diff --git a/pkg/parse/parse_unix.go b/pkg/parse/parse_unix.go
new file mode 100644
index 0000000..ff8ce85
--- /dev/null
+++ b/pkg/parse/parse_unix.go
@@ -0,0 +1,49 @@
+//go:build linux || darwin
+// +build linux darwin
+
+package parse
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/buildah/define"
+ "github.com/opencontainers/runc/libcontainer/devices"
+)
+
+func DeviceFromPath(device string) (define.ContainerDevices, error) {
+ var devs define.ContainerDevices
+ src, dst, permissions, err := Device(device)
+ if err != nil {
+ return nil, err
+ }
+ srcInfo, err := os.Stat(src)
+ if err != nil {
+ return nil, fmt.Errorf("getting info of source device %s: %w", src, err)
+ }
+
+ if !srcInfo.IsDir() {
+ dev, err := devices.DeviceFromPath(src, permissions)
+ if err != nil {
+ return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
+ }
+ dev.Path = dst
+ device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst}
+ devs = append(devs, device)
+ return devs, nil
+ }
+
+ // If source device is a directory
+ srcDevices, err := devices.GetDevices(src)
+ if err != nil {
+ return nil, fmt.Errorf("getting source devices from directory %s: %w", src, err)
+ }
+ for _, d := range srcDevices {
+ d.Path = filepath.Join(dst, filepath.Base(d.Path))
+ d.Permissions = devices.Permissions(permissions)
+ device := define.BuildahDevice{Device: *d, Source: src, Destination: dst}
+ devs = append(devs, device)
+ }
+ return devs, nil
+}
diff --git a/pkg/parse/parse_unsupported.go b/pkg/parse/parse_unsupported.go
new file mode 100644
index 0000000..e3d3a71
--- /dev/null
+++ b/pkg/parse/parse_unsupported.go
@@ -0,0 +1,18 @@
+//go:build !linux && !darwin
+// +build !linux,!darwin
+
+package parse
+
+import (
+ "errors"
+
+ "github.com/containers/buildah/define"
+)
+
+func getDefaultProcessLimits() []string {
+ return []string{}
+}
+
+func DeviceFromPath(device string) (define.ContainerDevices, error) {
+ return nil, errors.New("devices not supported")
+}
diff --git a/pkg/rusage/rusage.go b/pkg/rusage/rusage.go
new file mode 100644
index 0000000..7b1226d
--- /dev/null
+++ b/pkg/rusage/rusage.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "fmt"
+ "time"
+
+ units "github.com/docker/go-units"
+)
+
+// Rusage is a subset of a Unix-style resource usage counter for the current
+// process and its children. The counters are always 0 on platforms where the
+// system call is not available (i.e., systems where getrusage() doesn't
+// exist).
+type Rusage struct {
+ Date time.Time
+ Elapsed time.Duration
+ Utime, Stime time.Duration
+ Inblock, Outblock int64
+}
+
+// FormatDiff formats the result of rusage.Rusage.Subtract() for logging.
+func FormatDiff(diff Rusage) string {
+ return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512)))
+}
+
+// Subtract subtracts the items in delta from r, and returns the difference.
+// The Date field is zeroed for easier comparison with the zero value for the
+// Rusage type.
+func (r Rusage) Subtract(baseline Rusage) Rusage {
+ return Rusage{
+ Elapsed: r.Date.Sub(baseline.Date),
+ Utime: r.Utime - baseline.Utime,
+ Stime: r.Stime - baseline.Stime,
+ Inblock: r.Inblock - baseline.Inblock,
+ Outblock: r.Outblock - baseline.Outblock,
+ }
+}
+
+// Get returns the counters for the current process and its children,
+// subtracting any values in the passed in "since" value, or an error.
+// The Elapsed field will always be set to zero.
+func Get() (Rusage, error) {
+ counters, err := get()
+ if err != nil {
+ return Rusage{}, err
+ }
+ return counters, nil
+}
diff --git a/pkg/rusage/rusage_test.go b/pkg/rusage/rusage_test.go
new file mode 100644
index 0000000..62ac573
--- /dev/null
+++ b/pkg/rusage/rusage_test.go
@@ -0,0 +1,48 @@
+package rusage
+
+import (
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ noopCommand = "noop"
+)
+
+func noopMain() {
+}
+
+func init() {
+ reexec.Register(noopCommand, noopMain)
+}
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ flag.Parse()
+ if testing.Verbose() {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+func TestRusage(t *testing.T) {
+ if !Supported() {
+ t.Skip("not supported on this platform")
+ }
+ before, err := Get()
+ require.Nil(t, err, "unexpected error from GetRusage before running child: %v", err)
+ cmd := reexec.Command(noopCommand)
+ err = cmd.Run()
+ require.Nil(t, err, "unexpected error running child process: %v", err)
+ after, err := Get()
+ require.Nil(t, err, "unexpected error from GetRusage after running child: %v", err)
+ t.Logf("rusage from child: %#v", FormatDiff(after.Subtract(before)))
+ require.NotZero(t, after.Subtract(before), "running a child process didn't use any resources?")
+}
diff --git a/pkg/rusage/rusage_unix.go b/pkg/rusage/rusage_unix.go
new file mode 100644
index 0000000..317046f
--- /dev/null
+++ b/pkg/rusage/rusage_unix.go
@@ -0,0 +1,35 @@
+//go:build !windows
+// +build !windows
+
+package rusage
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+)
+
+func mkduration(tv syscall.Timeval) time.Duration {
+ return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond
+}
+
+func get() (Rusage, error) {
+ var rusage syscall.Rusage
+ err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
+ if err != nil {
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", err)
+ }
+ r := Rusage{
+ Date: time.Now(),
+ Utime: mkduration(rusage.Utime),
+ Stime: mkduration(rusage.Stime),
+ Inblock: int64(rusage.Inblock), // nolint: unconvert
+ Outblock: int64(rusage.Oublock), // nolint: unconvert
+ }
+ return r, nil
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return true
+}
diff --git a/pkg/rusage/rusage_unsupported.go b/pkg/rusage/rusage_unsupported.go
new file mode 100644
index 0000000..54ed77f
--- /dev/null
+++ b/pkg/rusage/rusage_unsupported.go
@@ -0,0 +1,18 @@
+//go:build windows
+// +build windows
+
+package rusage
+
+import (
+ "fmt"
+ "syscall"
+)
+
+func get() (Rusage, error) {
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", syscall.ENOTSUP)
+}
+
+// Supported returns true if resource usage counters are supported on this OS.
+func Supported() bool {
+ return false
+}
diff --git a/pkg/sshagent/sshagent.go b/pkg/sshagent/sshagent.go
new file mode 100644
index 0000000..ec28482
--- /dev/null
+++ b/pkg/sshagent/sshagent.go
@@ -0,0 +1,254 @@
+package sshagent
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/containers/buildah/internal/tmpdir"
+ "github.com/opencontainers/selinux/go-selinux"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+// AgentServer is an ssh agent that can be served and shutdown at a later time
+type AgentServer struct {
+ agent agent.Agent
+ wg sync.WaitGroup
+ conn *net.Conn
+ listener net.Listener
+ shutdown chan bool
+ servePath string
+ serveDir string
+}
+
+// NewAgentServer creates a new agent on the host
+func NewAgentServer(source *Source) (*AgentServer, error) {
+ if source.Keys != nil {
+ return newAgentServerKeyring(source.Keys)
+ }
+ return newAgentServerSocket(source.Socket)
+}
+
+// newAgentServerKeyring creates a new agent from scratch and adds keys
+func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) {
+ a := agent.NewKeyring()
+ for _, k := range keys {
+ if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
+ return nil, fmt.Errorf("failed to create ssh agent: %w", err)
+ }
+ }
+ return &AgentServer{
+ agent: a,
+ shutdown: make(chan bool, 1),
+ }, nil
+}
+
+// newAgentServerSocket creates a new agent from an existing agent on the host
+func newAgentServerSocket(socketPath string) (*AgentServer, error) {
+ conn, err := net.Dial("unix", socketPath)
+ if err != nil {
+ return nil, err
+ }
+ a := &readOnlyAgent{agent.NewClient(conn)}
+
+ return &AgentServer{
+ agent: a,
+ conn: &conn,
+ shutdown: make(chan bool, 1),
+ }, nil
+
+}
+
+// Serve starts the SSH agent on the host and returns the path of the socket where the agent is serving
+func (a *AgentServer) Serve(processLabel string) (string, error) {
+ // Calls to `selinux.SetSocketLabel` should be wrapped in
+ // runtime.LockOSThread()/runtime.UnlockOSThread() until
+ // the the socket is created to guarantee another goroutine
+ // does not migrate to the current thread before execution
+ // is complete.
+ // Ref: https://github.com/opencontainers/selinux/blob/main/go-selinux/selinux.go#L158
+ runtime.LockOSThread()
+ err := selinux.SetSocketLabel(processLabel)
+ if err != nil {
+ return "", err
+ }
+ serveDir, err := os.MkdirTemp(tmpdir.GetTempDir(), ".buildah-ssh-sock")
+ if err != nil {
+ return "", err
+ }
+ servePath := filepath.Join(serveDir, "ssh_auth_sock")
+ a.serveDir = serveDir
+ a.servePath = servePath
+ listener, err := net.Listen("unix", servePath)
+ if err != nil {
+ return "", err
+ }
+ // Reset socket label.
+ err = selinux.SetSocketLabel("")
+ // Unlock the thread only if the process label could be restored
+ // successfully. Otherwise leave the thread locked and the Go runtime
+ // will terminate it once it returns to the threads pool.
+ runtime.UnlockOSThread()
+ if err != nil {
+ return "", err
+ }
+ a.listener = listener
+
+ go func() {
+ for {
+ //listener.Accept blocks
+ c, err := listener.Accept()
+ if err != nil {
+ select {
+ case <-a.shutdown:
+ return
+ default:
+ logrus.Errorf("error accepting SSH connection: %v", err)
+ continue
+ }
+ }
+ a.wg.Add(1)
+ go func() {
+ // agent.ServeAgent will only ever return with error,
+ err := agent.ServeAgent(a.agent, c)
+ if err != io.EOF {
+ logrus.Errorf("error serving agent: %v", err)
+ }
+ a.wg.Done()
+ }()
+ // the only way to get agent.ServeAgent is to close the connection it's serving on
+ // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection.
+ go func() {
+ time.Sleep(2000 * time.Millisecond)
+ c.Close()
+ }()
+ }
+ }()
+ return a.servePath, nil
+}
+
+// Shutdown shuts down the agent and closes the socket
+func (a *AgentServer) Shutdown() error {
+ if a.listener != nil {
+ a.shutdown <- true
+ a.listener.Close()
+ }
+ if a.conn != nil {
+ conn := *a.conn
+ conn.Close()
+ }
+ a.wg.Wait()
+ err := os.RemoveAll(a.serveDir)
+ if err != nil {
+ return err
+ }
+ a.serveDir = ""
+ a.servePath = ""
+ return nil
+}
+
+// ServePath returns the path where the agent is serving
+func (a *AgentServer) ServePath() string {
+ return a.servePath
+}
+
+// readOnlyAgent and its functions originally from github.com/mopby/buildkit/session/sshforward/sshprovider/agentprovider.go
+
+// readOnlyAgent implemetnts the agent.Agent interface
+// readOnlyAgent allows reads only to prevent keys from being added from the build to the forwarded ssh agent on the host
+type readOnlyAgent struct {
+ agent.ExtendedAgent
+}
+
+func (a *readOnlyAgent) Add(_ agent.AddedKey) error {
+ return errors.New("adding new keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error {
+ return errors.New("removing keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) RemoveAll() error {
+ return errors.New("removing keys not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Lock(_ []byte) error {
+ return errors.New("locking agent not allowed by buildah")
+}
+
+func (a *readOnlyAgent) Extension(_ string, _ []byte) ([]byte, error) {
+ return nil, errors.New("extensions not allowed by buildah")
+}
+
+// Source is what the forwarded agent's source is
+// The source of the forwarded agent can be from a socket on the host, or from individual key files
+type Source struct {
+ Socket string
+ Keys []interface{}
+}
+
+// NewSource takes paths and checks of they are keys or sockets, and creates a source
+func NewSource(paths []string) (*Source, error) {
+ var keys []interface{}
+ var socket string
+ if len(paths) == 0 {
+ socket = os.Getenv("SSH_AUTH_SOCK")
+ if socket == "" {
+ return nil, errors.New("SSH_AUTH_SOCK not set in environment")
+ }
+ absSocket, err := filepath.Abs(socket)
+ if err != nil {
+ return nil, fmt.Errorf("evaluating SSH_AUTH_SOCK in environment: %w", err)
+ }
+ socket = absSocket
+ }
+ for _, p := range paths {
+ if socket != "" {
+ return nil, errors.New("only one socket is allowed")
+ }
+
+ fi, err := os.Stat(p)
+ if err != nil {
+ return nil, err
+ }
+ if fi.Mode()&os.ModeSocket > 0 {
+ if len(keys) == 0 {
+ socket = p
+ } else {
+ return nil, errors.New("cannot mix keys and socket file")
+ }
+ continue
+ }
+
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
+ if err != nil {
+ return nil, err
+ }
+
+ k, err := ssh.ParseRawPrivateKey(dt)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse ssh key: %w", err)
+ }
+ keys = append(keys, k)
+ }
+ if socket != "" {
+ return &Source{
+ Socket: socket,
+ }, nil
+ }
+ return &Source{
+ Keys: keys,
+ }, nil
+}
diff --git a/pkg/sshagent/sshagent_test.go b/pkg/sshagent/sshagent_test.go
new file mode 100644
index 0000000..bf0d68b
--- /dev/null
+++ b/pkg/sshagent/sshagent_test.go
@@ -0,0 +1,55 @@
+package sshagent
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+func testNewKeySource() (*Source, error) {
+ k, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, err
+ }
+ return &Source{
+ Keys: []interface{}{k},
+ }, nil
+}
+
+func testClient(path string) ([]*agent.Key, error) {
+ conn, err := net.Dial("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ ac := agent.NewClient(conn)
+ keys, err := ac.List()
+ if err != nil {
+ return nil, err
+ }
+ return keys, nil
+
+}
+
+func TestAgentServer(t *testing.T) {
+ src, err := testNewKeySource()
+ require.NoError(t, err)
+ ag, err := NewAgentServer(src)
+ require.NoError(t, err)
+ sock, err := ag.Serve("")
+ require.NoError(t, err)
+ // Get key from agent
+ keys, err := testClient(sock)
+ require.NoError(t, err)
+ require.Equal(t, len(keys), 1)
+ require.Equal(t, keys[0].Type(), "ssh-rsa")
+ // Check for proper shutdown
+ err = ag.Shutdown()
+ require.NoError(t, err)
+
+ _, err = testClient(sock)
+ require.Error(t, err)
+}
diff --git a/pkg/supplemented/compat.go b/pkg/supplemented/compat.go
new file mode 100644
index 0000000..5689648
--- /dev/null
+++ b/pkg/supplemented/compat.go
@@ -0,0 +1,26 @@
+// This package is deprecated. Its functionality has been moved to
+// github.com/containers/common/pkg/supplemented, which provides the same API.
+// The stubs and aliases here are present for compatibility with older code.
+// New implementations should use github.com/containers/common/pkg/supplemented
+// directly.
+package supplemented
+
+import (
+ "github.com/containers/common/pkg/manifests"
+ "github.com/containers/common/pkg/supplemented"
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrDigestNotFound is an alias for github.com/containers/common/pkg/manifests.ErrDigestNotFound.
+ ErrDigestNotFound = manifests.ErrDigestNotFound
+ // ErrBlobNotFound is an alias for github.com/containers/common/pkg/supplemented.ErrBlobNotFound.
+ ErrBlobNotFound = supplemented.ErrBlobNotFound
+)
+
+// Reference wraps github.com/containers/common/pkg/supplemented.Reference().
+func Reference(ref types.ImageReference, supplemental []types.ImageReference, multiple cp.ImageListSelection, instances []digest.Digest) types.ImageReference {
+ return supplemented.Reference(ref, supplemental, multiple, instances)
+}
diff --git a/pkg/umask/umask.go b/pkg/umask/umask.go
new file mode 100644
index 0000000..c5edead
--- /dev/null
+++ b/pkg/umask/umask.go
@@ -0,0 +1,13 @@
+package umask
+
+import (
+ "github.com/containers/common/pkg/umask"
+)
+
+func CheckUmask() {
+ umask.Check()
+}
+
+func SetUmask(value int) int {
+ return umask.Set(value)
+}
diff --git a/pkg/util/resource_unix.go b/pkg/util/resource_unix.go
new file mode 100644
index 0000000..4f7c08c
--- /dev/null
+++ b/pkg/util/resource_unix.go
@@ -0,0 +1,38 @@
+//go:build linux || freebsd || darwin
+// +build linux freebsd darwin
+
+package util
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/docker/go-units"
+)
+
+func ParseUlimit(ulimit string) (*units.Ulimit, error) {
+ ul, err := units.ParseUlimit(ulimit)
+ if err != nil {
+ return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
+ }
+
+ if ul.Hard != -1 && ul.Soft == -1 {
+ return ul, nil
+ }
+
+ rl, err := ul.GetRlimit()
+ if err != nil {
+ return nil, err
+ }
+ var limit syscall.Rlimit
+ if err := syscall.Getrlimit(rl.Type, &limit); err != nil {
+ return nil, err
+ }
+ if ul.Soft == -1 {
+ ul.Soft = int64(limit.Cur)
+ }
+ if ul.Hard == -1 {
+ ul.Hard = int64(limit.Max)
+ }
+ return ul, nil
+}
diff --git a/pkg/util/resource_unix_test.go b/pkg/util/resource_unix_test.go
new file mode 100644
index 0000000..6ee95dc
--- /dev/null
+++ b/pkg/util/resource_unix_test.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "syscall"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseUlimit(t *testing.T) {
+ _, err := ParseUlimit("bogus")
+ assert.NotNil(t, err)
+
+ ul, err := ParseUlimit("memlock=100:200")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(100))
+ assert.Equal(t, ul.Hard, int64(200))
+
+ var limit syscall.Rlimit
+ err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit)
+ assert.Nil(t, err)
+
+ ul, err = ParseUlimit("nofile=-1:-1")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(limit.Cur))
+ assert.Equal(t, ul.Hard, int64(limit.Max))
+
+ ul, err = ParseUlimit("nofile=100:-1")
+ assert.Nil(t, err)
+ assert.Equal(t, ul.Soft, int64(100))
+ assert.Equal(t, ul.Hard, int64(limit.Max))
+}
diff --git a/pkg/util/resource_windows.go b/pkg/util/resource_windows.go
new file mode 100644
index 0000000..3717091
--- /dev/null
+++ b/pkg/util/resource_windows.go
@@ -0,0 +1,16 @@
+package util
+
+import (
+ "fmt"
+
+ "github.com/docker/go-units"
+)
+
+func ParseUlimit(ulimit string) (*units.Ulimit, error) {
+ ul, err := units.ParseUlimit(ulimit)
+ if err != nil {
+ return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err)
+ }
+
+ return ul, nil
+}
diff --git a/pkg/util/test/test1/Containerfile b/pkg/util/test/test1/Containerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test1/Containerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/test/test1/Dockerfile b/pkg/util/test/test1/Dockerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test1/Dockerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/test/test2/Dockerfile b/pkg/util/test/test2/Dockerfile
new file mode 100644
index 0000000..453e3ac
--- /dev/null
+++ b/pkg/util/test/test2/Dockerfile
@@ -0,0 +1 @@
+from scratch
diff --git a/pkg/util/uptime_darwin.go b/pkg/util/uptime_darwin.go
new file mode 100644
index 0000000..d185cb4
--- /dev/null
+++ b/pkg/util/uptime_darwin.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+ "time"
+)
+
+func ReadUptime() (time.Duration, error) {
+ return 0, errors.New("readUptime not supported on darwin")
+}
diff --git a/pkg/util/uptime_freebsd.go b/pkg/util/uptime_freebsd.go
new file mode 100644
index 0000000..7112aba
--- /dev/null
+++ b/pkg/util/uptime_freebsd.go
@@ -0,0 +1,25 @@
+package util
+
+import (
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// For some reason, unix.ClockGettime isn't implemented by x/sys/unix on FreeBSD
+func clockGettime(clockid int32, time *unix.Timespec) (err error) {
+ _, _, e1 := unix.Syscall(unix.SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
+
+func ReadUptime() (time.Duration, error) {
+ var uptime unix.Timespec
+ if err := clockGettime(unix.CLOCK_UPTIME, &uptime); err != nil {
+ return 0, err
+ }
+ return time.Duration(unix.TimespecToNsec(uptime)), nil
+}
diff --git a/pkg/util/uptime_linux.go b/pkg/util/uptime_linux.go
new file mode 100644
index 0000000..a27a480
--- /dev/null
+++ b/pkg/util/uptime_linux.go
@@ -0,0 +1,28 @@
+package util
+
+import (
+ "bytes"
+ "errors"
+ "time"
+ "os"
+)
+
+func ReadUptime() (time.Duration, error) {
+ buf, err := os.ReadFile("/proc/uptime")
+ if err != nil {
+ return 0, err
+ }
+ f := bytes.Fields(buf)
+ if len(f) < 1 {
+ return 0, errors.New("invalid uptime")
+ }
+
+ // Convert uptime in seconds to a human-readable format
+ up := string(f[0])
+ upSeconds := up + "s"
+ upDuration, err := time.ParseDuration(upSeconds)
+ if err != nil {
+ return 0, err
+ }
+ return upDuration, nil
+}
diff --git a/pkg/util/uptime_windows.go b/pkg/util/uptime_windows.go
new file mode 100644
index 0000000..ef3adac
--- /dev/null
+++ b/pkg/util/uptime_windows.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+ "time"
+)
+
+func ReadUptime() (time.Duration, error) {
+ return 0, errors.New("readUptime not supported on windows")
+}
diff --git a/pkg/util/util.go b/pkg/util/util.go
new file mode 100644
index 0000000..17ad360
--- /dev/null
+++ b/pkg/util/util.go
@@ -0,0 +1,82 @@
+package util
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/pkg/parse"
+)
+
+// Mirrors path to a tmpfile if path points to a
+// file descriptor instead of actual file on filesystem
+// reason: operations with file descriptors are can lead
+// to edge cases where content on FD is not in a consumable
+// state after first consumption.
+// returns path as string and bool to confirm if temp file
+// was created and needs to be cleaned up.
+func MirrorToTempFileIfPathIsDescriptor(file string) (string, bool) {
+ // one use-case is discussed here
+ // https://github.com/containers/buildah/issues/3070
+ if !strings.HasPrefix(file, "/dev/fd/") {
+ return file, false
+ }
+ b, err := os.ReadFile(file)
+ if err != nil {
+ // if anything goes wrong return original path
+ return file, false
+ }
+ tmpfile, err := os.CreateTemp(parse.GetTempDir(), "buildah-temp-file")
+ if err != nil {
+ return file, false
+ }
+ defer tmpfile.Close()
+ if _, err := tmpfile.Write(b); err != nil {
+ // if anything goes wrong return original path
+ return file, false
+ }
+
+ return tmpfile.Name(), true
+}
+
+// DiscoverContainerfile tries to find a Containerfile or a Dockerfile within the provided `path`.
+func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
+ // Test for existence of the file
+ target, err := os.Stat(path)
+ if err != nil {
+ return "", fmt.Errorf("discovering Containerfile: %w", err)
+ }
+
+ switch mode := target.Mode(); {
+ case mode.IsDir():
+ // If the path is a real directory, we assume a Containerfile or a Dockerfile within it
+ ctrfile := filepath.Join(path, "Containerfile")
+
+ // Test for existence of the Containerfile file
+ file, err := os.Stat(ctrfile)
+ if err != nil {
+ // See if we have a Dockerfile within it
+ ctrfile = filepath.Join(path, "Dockerfile")
+
+ // Test for existence of the Dockerfile file
+ file, err = os.Stat(ctrfile)
+ if err != nil {
+ return "", fmt.Errorf("cannot find Containerfile or Dockerfile in context directory: %w", err)
+ }
+ }
+
+ // The file exists, now verify the correct mode
+ if mode := file.Mode(); mode.IsRegular() {
+ foundCtrFile = ctrfile
+ } else {
+ return "", fmt.Errorf("assumed Containerfile %q is not a file", ctrfile)
+ }
+
+ case mode.IsRegular():
+ // If the context dir is a file, we assume this as Containerfile
+ foundCtrFile = path
+ }
+
+ return foundCtrFile, nil
+}
diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go
new file mode 100644
index 0000000..a39108e
--- /dev/null
+++ b/pkg/util/util_test.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDiscoverContainerfile(t *testing.T) {
+ _, err := DiscoverContainerfile("./bogus")
+ assert.NotNil(t, err)
+
+ _, err = DiscoverContainerfile("./")
+ assert.NotNil(t, err)
+
+ name, err := DiscoverContainerfile("test/test1/Dockerfile")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Dockerfile")
+
+ name, err = DiscoverContainerfile("test/test1/Containerfile")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Containerfile")
+
+ name, err = DiscoverContainerfile("test/test1")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test1/Containerfile")
+
+ name, err = DiscoverContainerfile("test/test2")
+ assert.Nil(t, err)
+ assert.Equal(t, name, "test/test2/Dockerfile")
+
+}
diff --git a/pkg/util/version_unix.go b/pkg/util/version_unix.go
new file mode 100644
index 0000000..88e8b58
--- /dev/null
+++ b/pkg/util/version_unix.go
@@ -0,0 +1,19 @@
+//go:build linux || freebsd || darwin
+// +build linux freebsd darwin
+
+package util
+
+import (
+ "bytes"
+
+ "golang.org/x/sys/unix"
+)
+
+func ReadKernelVersion() (string, error) {
+ var uname unix.Utsname
+ if err := unix.Uname(&uname); err != nil {
+ return "", err
+ }
+ n := bytes.IndexByte(uname.Release[:], 0)
+ return string(uname.Release[:n]), nil
+}
diff --git a/pkg/util/version_windows.go b/pkg/util/version_windows.go
new file mode 100644
index 0000000..9acf469
--- /dev/null
+++ b/pkg/util/version_windows.go
@@ -0,0 +1,10 @@
+package util
+
+import (
+ "errors"
+)
+
+func ReadKernelVersion() (string, error) {
+ return "", errors.New("readKernelVersion not supported on windows")
+
+}
diff --git a/pkg/volumes/volumes.go b/pkg/volumes/volumes.go
new file mode 100644
index 0000000..aa469a2
--- /dev/null
+++ b/pkg/volumes/volumes.go
@@ -0,0 +1,13 @@
+package volumes
+
+import (
+ "os"
+
+ "github.com/containers/buildah/internal/volumes"
+)
+
+// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it.
+func CleanCacheMount() error {
+ cacheParent := volumes.CacheParent()
+ return os.RemoveAll(cacheParent)
+}