summaryrefslogtreecommitdiffstats
path: root/internal
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:12:05 +0000
commit9ec46d47bedefa10bdaaa8a587ddb1851ef396ec (patch)
treeba7545ee99b384a6fc3e5ea028ae4c643648d683 /internal
parentInitial commit. (diff)
downloadgolang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.tar.xz
golang-github-containers-buildah-9ec46d47bedefa10bdaaa8a587ddb1851ef396ec.zip
Adding upstream version 1.33.5+ds1.upstream/1.33.5+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'internal')
-rw-r--r--internal/config/convert.go121
-rw-r--r--internal/config/convert_test.go166
-rw-r--r--internal/config/executor.go45
-rw-r--r--internal/config/executor_test.go5
-rw-r--r--internal/config/override.go181
-rw-r--r--internal/mkcw/archive.go464
-rw-r--r--internal/mkcw/archive_test.go181
-rw-r--r--internal/mkcw/attest.go250
-rwxr-xr-xinternal/mkcw/embed/entrypoint.gzbin0 -> 405 bytes
-rw-r--r--internal/mkcw/embed/entrypoint.s16
-rw-r--r--internal/mkcw/entrypoint.go6
-rw-r--r--internal/mkcw/luks.go51
-rw-r--r--internal/mkcw/luks_test.go66
-rw-r--r--internal/mkcw/makefs.go38
-rw-r--r--internal/mkcw/types/attest.go47
-rw-r--r--internal/mkcw/types/workload.go34
-rw-r--r--internal/mkcw/workload.go223
-rw-r--r--internal/mkcw/workload_test.go62
-rw-r--r--internal/parse/parse.go79
-rw-r--r--internal/source/add.go133
-rw-r--r--internal/source/create.go70
-rw-r--r--internal/source/pull.go110
-rw-r--r--internal/source/push.go69
-rw-r--r--internal/source/source.go121
-rw-r--r--internal/tmpdir/tmpdir.go26
-rw-r--r--internal/tmpdir/tmpdir_test.go58
-rw-r--r--internal/types.go18
-rw-r--r--internal/util/util.go99
-rw-r--r--internal/volumes/volumes.go637
29 files changed, 3376 insertions, 0 deletions
diff --git a/internal/config/convert.go b/internal/config/convert.go
new file mode 100644
index 0000000..7287c67
--- /dev/null
+++ b/internal/config/convert.go
@@ -0,0 +1,121 @@
+package config
+
+import (
+ "github.com/containers/image/v5/manifest"
+ dockerclient "github.com/fsouza/go-dockerclient"
+)
+
+// Schema2ConfigFromGoDockerclientConfig converts a go-dockerclient Config
+// structure to a manifest Schema2Config.
+func Schema2ConfigFromGoDockerclientConfig(config *dockerclient.Config) *manifest.Schema2Config {
+ overrideExposedPorts := make(map[manifest.Schema2Port]struct{})
+ for port := range config.ExposedPorts {
+ overrideExposedPorts[manifest.Schema2Port(port)] = struct{}{}
+ }
+ var overrideHealthCheck *manifest.Schema2HealthConfig
+ if config.Healthcheck != nil {
+ overrideHealthCheck = &manifest.Schema2HealthConfig{
+ Test: config.Healthcheck.Test,
+ StartPeriod: config.Healthcheck.StartPeriod,
+ Interval: config.Healthcheck.Interval,
+ Timeout: config.Healthcheck.Timeout,
+ Retries: config.Healthcheck.Retries,
+ }
+ }
+ labels := make(map[string]string)
+ for k, v := range config.Labels {
+ labels[k] = v
+ }
+ volumes := make(map[string]struct{})
+ for v := range config.Volumes {
+ volumes[v] = struct{}{}
+ }
+ s2config := &manifest.Schema2Config{
+ Hostname: config.Hostname,
+ Domainname: config.Domainname,
+ User: config.User,
+ AttachStdin: config.AttachStdin,
+ AttachStdout: config.AttachStdout,
+ AttachStderr: config.AttachStderr,
+ ExposedPorts: overrideExposedPorts,
+ Tty: config.Tty,
+ OpenStdin: config.OpenStdin,
+ StdinOnce: config.StdinOnce,
+ Env: append([]string{}, config.Env...),
+ Cmd: append([]string{}, config.Cmd...),
+ Healthcheck: overrideHealthCheck,
+ ArgsEscaped: config.ArgsEscaped,
+ Image: config.Image,
+ Volumes: volumes,
+ WorkingDir: config.WorkingDir,
+ Entrypoint: append([]string{}, config.Entrypoint...),
+ NetworkDisabled: config.NetworkDisabled,
+ MacAddress: config.MacAddress,
+ OnBuild: append([]string{}, config.OnBuild...),
+ Labels: labels,
+ StopSignal: config.StopSignal,
+ Shell: config.Shell,
+ }
+ if config.StopTimeout != 0 {
+ s2config.StopTimeout = &config.StopTimeout
+ }
+ return s2config
+}
+
+// GoDockerclientConfigFromSchema2Config converts a manifest Schema2Config
+// to a go-dockerclient config structure.
+func GoDockerclientConfigFromSchema2Config(s2config *manifest.Schema2Config) *dockerclient.Config {
+ overrideExposedPorts := make(map[dockerclient.Port]struct{})
+ for port := range s2config.ExposedPorts {
+ overrideExposedPorts[dockerclient.Port(port)] = struct{}{}
+ }
+ var healthCheck *dockerclient.HealthConfig
+ if s2config.Healthcheck != nil {
+ healthCheck = &dockerclient.HealthConfig{
+ Test: s2config.Healthcheck.Test,
+ StartPeriod: s2config.Healthcheck.StartPeriod,
+ Interval: s2config.Healthcheck.Interval,
+ Timeout: s2config.Healthcheck.Timeout,
+ Retries: s2config.Healthcheck.Retries,
+ }
+ }
+ labels := make(map[string]string)
+ for k, v := range s2config.Labels {
+ labels[k] = v
+ }
+ volumes := make(map[string]struct{})
+ for v := range s2config.Volumes {
+ volumes[v] = struct{}{}
+ }
+ config := &dockerclient.Config{
+ Hostname: s2config.Hostname,
+ Domainname: s2config.Domainname,
+ User: s2config.User,
+ AttachStdin: s2config.AttachStdin,
+ AttachStdout: s2config.AttachStdout,
+ AttachStderr: s2config.AttachStderr,
+ PortSpecs: nil,
+ ExposedPorts: overrideExposedPorts,
+ Tty: s2config.Tty,
+ OpenStdin: s2config.OpenStdin,
+ StdinOnce: s2config.StdinOnce,
+ Env: append([]string{}, s2config.Env...),
+ Cmd: append([]string{}, s2config.Cmd...),
+ Healthcheck: healthCheck,
+ ArgsEscaped: s2config.ArgsEscaped,
+ Image: s2config.Image,
+ Volumes: volumes,
+ WorkingDir: s2config.WorkingDir,
+ Entrypoint: append([]string{}, s2config.Entrypoint...),
+ NetworkDisabled: s2config.NetworkDisabled,
+ MacAddress: s2config.MacAddress,
+ OnBuild: append([]string{}, s2config.OnBuild...),
+ Labels: labels,
+ StopSignal: s2config.StopSignal,
+ Shell: s2config.Shell,
+ }
+ if s2config.StopTimeout != nil {
+ config.StopTimeout = *s2config.StopTimeout
+ }
+ return config
+}
diff --git a/internal/config/convert_test.go b/internal/config/convert_test.go
new file mode 100644
index 0000000..589bced
--- /dev/null
+++ b/internal/config/convert_test.go
@@ -0,0 +1,166 @@
+package config
+
+import (
+ "reflect"
+ "strconv"
+ "testing"
+
+ "github.com/containers/buildah/util"
+ "github.com/containers/image/v5/manifest"
+ dockerclient "github.com/fsouza/go-dockerclient"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// fillAllFields recursively fills in 1 or "1" for every field in the passed-in
+// structure, and that slices and maps have at least one value in them.
+func fillAllFields[pStruct any](t *testing.T, st pStruct) {
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Pointer {
+ v = reflect.Indirect(v)
+ }
+ fillAllValueFields(t, v)
+}
+
+func fillAllValueFields(t *testing.T, v reflect.Value) {
+ fields := reflect.VisibleFields(v.Type())
+ for _, field := range fields {
+ if field.Anonymous {
+ // all right, fine, keep your secrets
+ continue
+ }
+ f := v.FieldByName(field.Name)
+ var keyType, elemType reflect.Type
+ if field.Type.Kind() == reflect.Map {
+ keyType = field.Type.Key()
+ }
+ switch field.Type.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Pointer, reflect.Slice:
+ elemType = field.Type.Elem()
+ }
+ fillValue(t, f, field.Name, field.Type.Kind(), keyType, elemType)
+ }
+}
+
+func fillValue(t *testing.T, value reflect.Value, name string, kind reflect.Kind, keyType, elemType reflect.Type) {
+ switch kind {
+ case reflect.Invalid,
+ reflect.Array, reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer,
+ reflect.Float32, reflect.Float64,
+ reflect.Complex64, reflect.Complex128:
+ require.NotEqualf(t, kind, kind, "unhandled %s field %s: tests require updating", kind, name)
+ case reflect.Bool:
+ value.SetBool(true)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ value.SetInt(1)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ value.SetUint(1)
+ case reflect.Map:
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+ keyPtr := reflect.New(keyType)
+ key := reflect.Indirect(keyPtr)
+ fillValue(t, key, name, keyType.Kind(), nil, nil)
+ elemPtr := reflect.New(elemType)
+ elem := reflect.Indirect(elemPtr)
+ fillValue(t, elem, name, elemType.Kind(), nil, nil)
+ value.SetMapIndex(key, reflect.Indirect(elem))
+ case reflect.Slice:
+ vPtr := reflect.New(elemType)
+ v := reflect.Indirect(vPtr)
+ fillValue(t, v, name, elemType.Kind(), nil, nil)
+ value.Set(reflect.Append(reflect.MakeSlice(value.Type(), 0, 1), v))
+ case reflect.String:
+ value.SetString("1")
+ case reflect.Struct:
+ fillAllValueFields(t, value)
+ case reflect.Pointer:
+ p := reflect.New(elemType)
+ fillValue(t, reflect.Indirect(p), name, elemType.Kind(), nil, nil)
+ value.Set(p)
+ }
+}
+
+// checkAllFields recursively checks that every field not listed in allowZeroed
+// is not set to its zero value, that every slice is not empty, and that every
+// map has at least one entry. It makes an additional exception for structs
+// which have no defined fields.
+func checkAllFields[pStruct any](t *testing.T, st pStruct, allowZeroed []string) {
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Pointer {
+ v = reflect.Indirect(v)
+ }
+ checkAllValueFields(t, v, "", allowZeroed)
+}
+
+func checkAllValueFields(t *testing.T, v reflect.Value, name string, allowedToBeZero []string) {
+ fields := reflect.VisibleFields(v.Type())
+ for _, field := range fields {
+ if field.Anonymous {
+ // all right, fine, keep your secrets
+ continue
+ }
+ fieldName := field.Name
+ if name != "" {
+ fieldName = name + "." + field.Name
+ }
+ if util.StringInSlice(fieldName, allowedToBeZero) {
+ continue
+ }
+ f := v.FieldByName(field.Name)
+ var elemType reflect.Type
+ switch field.Type.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Pointer, reflect.Slice:
+ elemType = field.Type.Elem()
+ }
+ checkValue(t, f, fieldName, field.Type.Kind(), elemType, allowedToBeZero)
+ }
+}
+
+func checkValue(t *testing.T, value reflect.Value, name string, kind reflect.Kind, elemType reflect.Type, allowedToBeZero []string) {
+ if kind != reflect.Invalid {
+ switch kind {
+ case reflect.Map:
+ assert.Falsef(t, value.IsZero(), "map field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ keys := value.MapKeys()
+ for i := 0; i < len(keys); i++ {
+ v := value.MapIndex(keys[i])
+ checkValue(t, v, name+"{"+keys[i].String()+"}", elemType.Kind(), nil, allowedToBeZero)
+ }
+ case reflect.Slice:
+ assert.Falsef(t, value.IsZero(), "slice field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ for i := 0; i < value.Len(); i++ {
+ v := value.Index(i)
+ checkValue(t, v, name+"["+strconv.Itoa(i)+"]", elemType.Kind(), nil, allowedToBeZero)
+ }
+ case reflect.Struct:
+ if fields := reflect.VisibleFields(value.Type()); len(fields) != 0 {
+ // structs which are defined with no fields are okay
+ assert.Falsef(t, value.IsZero(), "slice field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ }
+ checkAllValueFields(t, value, name, allowedToBeZero)
+ case reflect.Pointer:
+ assert.Falsef(t, value.IsZero(), "pointer field %s not set when it was not already expected to be left unpopulated by conversion", name)
+ checkValue(t, reflect.Indirect(value), name, elemType.Kind(), nil, allowedToBeZero)
+ }
+ }
+}
+
+func TestGoDockerclientConfigFromSchema2Config(t *testing.T) {
+ var input manifest.Schema2Config
+ fillAllFields(t, &input)
+ output := GoDockerclientConfigFromSchema2Config(&input)
+ // make exceptions for fields in "output" which have no corresponding field in "input"
+ notInSchema2Config := []string{"CPUSet", "CPUShares", "DNS", "Memory", "KernelMemory", "MemorySwap", "MemoryReservation", "Mounts", "PortSpecs", "PublishService", "SecurityOpts", "VolumeDriver", "VolumesFrom"}
+ checkAllFields(t, output, notInSchema2Config)
+}
+
+func TestSchema2ConfigFromGoDockerclientConfig(t *testing.T) {
+ var input dockerclient.Config
+ fillAllFields(t, &input)
+ output := Schema2ConfigFromGoDockerclientConfig(&input)
+ // make exceptions for fields in "output" which have no corresponding field in "input"
+ notInDockerConfig := []string{}
+ checkAllFields(t, output, notInDockerConfig)
+}
diff --git a/internal/config/executor.go b/internal/config/executor.go
new file mode 100644
index 0000000..19b1429
--- /dev/null
+++ b/internal/config/executor.go
@@ -0,0 +1,45 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ dockerclient "github.com/fsouza/go-dockerclient"
+ "github.com/openshift/imagebuilder"
+)
+
+// configOnlyExecutor implements the Executor interface that an
+// imagebuilder.Builder expects to be able to call to do some heavy lifting,
+// but it just refuses to do the work of ADD, COPY, or RUN. It also doesn't
+// care if the working directory exists in a container, because it's really
+// only concerned with letting the Builder's RunConfig get updated by changes
+// from a Dockerfile. Try anything more than that and it'll return an error.
+type configOnlyExecutor struct{}
+
+func (g *configOnlyExecutor) Preserve(path string) error {
+ return errors.New("ADD/COPY/RUN not supported as changes")
+}
+
+func (g *configOnlyExecutor) EnsureContainerPath(path string) error {
+ return nil
+}
+
+func (g *configOnlyExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
+ return nil
+}
+
+func (g *configOnlyExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+ if len(copies) == 0 {
+ return nil
+ }
+ return errors.New("ADD/COPY not supported as changes")
+}
+
+func (g *configOnlyExecutor) Run(run imagebuilder.Run, config dockerclient.Config) error {
+ return errors.New("RUN not supported as changes")
+}
+
+func (g *configOnlyExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+ return fmt.Errorf("did not understand change instruction %q", step.Original)
+}
diff --git a/internal/config/executor_test.go b/internal/config/executor_test.go
new file mode 100644
index 0000000..1fbc263
--- /dev/null
+++ b/internal/config/executor_test.go
@@ -0,0 +1,5 @@
+package config
+
+import "github.com/openshift/imagebuilder"
+
+var _ imagebuilder.Executor = &configOnlyExecutor{}
diff --git a/internal/config/override.go b/internal/config/override.go
new file mode 100644
index 0000000..a1dfebf
--- /dev/null
+++ b/internal/config/override.go
@@ -0,0 +1,181 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/buildah/docker"
+ "github.com/containers/image/v5/manifest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/openshift/imagebuilder"
+)
+
+// firstStringElseSecondString takes two strings, and returns the first
+// string if it isn't empty, else the second string
+func firstStringElseSecondString(first, second string) string {
+ if first != "" {
+ return first
+ }
+ return second
+}
+
+// firstSliceElseSecondSlice takes two string slices, and returns the first
+// slice of strings if it has contents, else the second slice
+func firstSliceElseSecondSlice(first, second []string) []string {
+ if len(first) > 0 {
+ return append([]string{}, first...)
+ }
+ return append([]string{}, second...)
+}
+
+// firstSlicePairElseSecondSlicePair takes two pairs of string slices, and
+// returns the first pair of slices if either has contents, else the second
+// pair
+func firstSlicePairElseSecondSlicePair(firstA, firstB, secondA, secondB []string) ([]string, []string) {
+ if len(firstA) > 0 || len(firstB) > 0 {
+ return append([]string{}, firstA...), append([]string{}, firstB...)
+ }
+ return append([]string{}, secondA...), append([]string{}, secondB...)
+}
+
+// mergeEnv combines variables from a and b into a single environment slice. if
+// a and b both provide values for the same variable, the value from b is
+// preferred
+func mergeEnv(a, b []string) []string {
+ index := make(map[string]int)
+ results := make([]string, 0, len(a)+len(b))
+ for _, kv := range append(append([]string{}, a...), b...) {
+ k, _, specifiesValue := strings.Cut(kv, "=")
+ if !specifiesValue {
+ if value, ok := os.LookupEnv(kv); ok {
+ kv = kv + "=" + value
+ } else {
+ kv = kv + "="
+ }
+ }
+ if i, seen := index[k]; seen {
+ results[i] = kv
+ } else {
+ index[k] = len(results)
+ results = append(results, kv)
+ }
+ }
+ return results
+}
+
+// Override takes a buildah docker config and an OCI ImageConfig, and applies a
+// mixture of a slice of Dockerfile-style instructions and fields from a config
+// blob to them both
+func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
+ if len(overrideChanges) > 0 {
+ if overrideConfig == nil {
+ overrideConfig = &manifest.Schema2Config{}
+ }
+ // Parse the set of changes as we would a Dockerfile.
+ changes := strings.Join(overrideChanges, "\n")
+ parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes))
+ if err != nil {
+ return fmt.Errorf("parsing change set %+v: %w", changes, err)
+ }
+ // Create a dummy builder object to process configuration-related
+ // instructions.
+ subBuilder := imagebuilder.NewBuilder(nil)
+ // Convert the incoming data into an initial RunConfig.
+ subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig)
+ // Process the change instructions one by one.
+ for _, node := range parsed.Children {
+ var step imagebuilder.Step
+ if err := step.Resolve(node); err != nil {
+ return fmt.Errorf("resolving change %q: %w", node.Original, err)
+ }
+ if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil {
+ return fmt.Errorf("processing change %q: %w", node.Original, err)
+ }
+ }
+ // Pull settings out of the dummy builder's RunConfig.
+ overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig)
+ }
+ if overrideConfig != nil {
+ // Apply changes from a possibly-provided possibly-changed config struct.
+ dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
+ dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
+ dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
+ oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User)
+ dconfig.AttachStdin = overrideConfig.AttachStdin
+ dconfig.AttachStdout = overrideConfig.AttachStdout
+ dconfig.AttachStderr = overrideConfig.AttachStderr
+ if len(overrideConfig.ExposedPorts) > 0 {
+ dexposedPorts := make(map[docker.Port]struct{})
+ oexposedPorts := make(map[string]struct{})
+ for port := range dconfig.ExposedPorts {
+ dexposedPorts[port] = struct{}{}
+ }
+ for port := range overrideConfig.ExposedPorts {
+ dexposedPorts[docker.Port(port)] = struct{}{}
+ }
+ for port := range oconfig.ExposedPorts {
+ oexposedPorts[port] = struct{}{}
+ }
+ for port := range overrideConfig.ExposedPorts {
+ oexposedPorts[string(port)] = struct{}{}
+ }
+ dconfig.ExposedPorts = dexposedPorts
+ oconfig.ExposedPorts = oexposedPorts
+ }
+ dconfig.Tty = overrideConfig.Tty
+ dconfig.OpenStdin = overrideConfig.OpenStdin
+ dconfig.StdinOnce = overrideConfig.StdinOnce
+ if len(overrideConfig.Env) > 0 {
+ dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env)
+ oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
+ }
+ dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd)
+ oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
+ if overrideConfig.Healthcheck != nil {
+ dconfig.Healthcheck = &docker.HealthConfig{
+ Test: append([]string{}, overrideConfig.Healthcheck.Test...),
+ Interval: overrideConfig.Healthcheck.Interval,
+ Timeout: overrideConfig.Healthcheck.Timeout,
+ StartPeriod: overrideConfig.Healthcheck.StartPeriod,
+ Retries: overrideConfig.Healthcheck.Retries,
+ }
+ }
+ dconfig.ArgsEscaped = overrideConfig.ArgsEscaped
+ dconfig.Image = firstStringElseSecondString(overrideConfig.Image, dconfig.Image)
+ if len(overrideConfig.Volumes) > 0 {
+ if dconfig.Volumes == nil {
+ dconfig.Volumes = make(map[string]struct{})
+ }
+ if oconfig.Volumes == nil {
+ oconfig.Volumes = make(map[string]struct{})
+ }
+ for volume := range overrideConfig.Volumes {
+ dconfig.Volumes[volume] = struct{}{}
+ oconfig.Volumes[volume] = struct{}{}
+ }
+ }
+ dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir)
+ oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
+ dconfig.NetworkDisabled = overrideConfig.NetworkDisabled
+ dconfig.MacAddress = overrideConfig.MacAddress
+ dconfig.OnBuild = overrideConfig.OnBuild
+ if len(overrideConfig.Labels) > 0 {
+ if dconfig.Labels == nil {
+ dconfig.Labels = make(map[string]string)
+ }
+ if oconfig.Labels == nil {
+ oconfig.Labels = make(map[string]string)
+ }
+ for k, v := range overrideConfig.Labels {
+ dconfig.Labels[k] = v
+ oconfig.Labels[k] = v
+ }
+ }
+ dconfig.StopSignal = overrideConfig.StopSignal
+ oconfig.StopSignal = overrideConfig.StopSignal
+ dconfig.StopTimeout = overrideConfig.StopTimeout
+ dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell)
+ }
+ return nil
+}
diff --git a/internal/mkcw/archive.go b/internal/mkcw/archive.go
new file mode 100644
index 0000000..a0677e4
--- /dev/null
+++ b/internal/mkcw/archive.go
@@ -0,0 +1,464 @@
+package mkcw
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/containers/luksy"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/go-units"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+const minimumImageSize = 10 * 1024 * 1024
+
+// ArchiveOptions includes optional settings for generating an archive.
+type ArchiveOptions struct {
+ // If supplied, we'll register the workload with this server.
+ // Practically necessary if DiskEncryptionPassphrase is not set, in
+ // which case we'll generate one and throw it away after.
+ AttestationURL string
+
+ // Used to measure the environment. If left unset (0, ""), defaults will be applied.
+ CPUs int
+ Memory int
+
+ // Can be manually set. If left unset ("", false, nil), reasonable values will be used.
+ TempDir string
+ TeeType TeeType
+ IgnoreAttestationErrors bool
+ ImageSize int64
+ WorkloadID string
+ Slop string
+ DiskEncryptionPassphrase string
+ FirmwareLibrary string
+ Logger *logrus.Logger
+}
+
+type chainRetrievalError struct {
+ stderr string
+ err error
+}
+
+func (c chainRetrievalError) Error() string {
+ if trimmed := strings.TrimSpace(c.stderr); trimmed != "" {
+ return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v: %v", strings.TrimSpace(c.stderr), c.err)
+ }
+ return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v", c.err)
+}
+
+// Archive generates a WorkloadConfig for a specified directory and produces a
+// tar archive of a container image's rootfs with the expected contents.
+// The input directory will have a ".krun_config.json" file added to it while
+// this function is running, but it will be removed on completion.
+func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) {
+ const (
+ teeDefaultCPUs = 2
+ teeDefaultMemory = 512
+ teeDefaultFilesystem = "ext4"
+ teeDefaultTeeType = SNP
+ )
+
+ if path == "" {
+ return nil, WorkloadConfig{}, fmt.Errorf("required path not specified")
+ }
+ logger := options.Logger
+ if logger == nil {
+ logger = logrus.StandardLogger()
+ }
+
+ teeType := options.TeeType
+ if teeType == "" {
+ teeType = teeDefaultTeeType
+ }
+ cpus := options.CPUs
+ if cpus == 0 {
+ cpus = teeDefaultCPUs
+ }
+ memory := options.Memory
+ if memory == 0 {
+ memory = teeDefaultMemory
+ }
+ filesystem := teeDefaultFilesystem
+ workloadID := options.WorkloadID
+ if workloadID == "" {
+ digestInput := path + filesystem + time.Now().String()
+ workloadID = digest.Canonical.FromString(digestInput).Encoded()
+ }
+ workloadConfig := WorkloadConfig{
+ Type: teeType,
+ WorkloadID: workloadID,
+ CPUs: cpus,
+ Memory: memory,
+ AttestationURL: options.AttestationURL,
+ }
+
+ // Do things which are specific to the type of TEE we're building for.
+ var chainBytes []byte
+ var chainBytesFile string
+ var chainInfo fs.FileInfo
+ switch teeType {
+ default:
+ return nil, WorkloadConfig{}, fmt.Errorf("don't know how to generate TeeData for TEE type %q", teeType)
+ case SEV, SEV_NO_ES:
+ // If we need a certificate chain, get it.
+ chain, err := os.CreateTemp(options.TempDir, "chain")
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ chain.Close()
+ defer func() {
+ if err := os.Remove(chain.Name()); err != nil {
+ logger.Warnf("error removing temporary file %q: %v", chain.Name(), err)
+ }
+ }()
+ logrus.Debugf("sevctl export -f %s", chain.Name())
+ cmd := exec.Command("sevctl", "export", "-f", chain.Name())
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout, cmd.Stderr = &stdout, &stderr
+ if err := cmd.Run(); err != nil {
+ if !options.IgnoreAttestationErrors {
+ return nil, WorkloadConfig{}, chainRetrievalError{stderr.String(), err}
+ }
+ logger.Warn(chainRetrievalError{stderr.String(), err}.Error())
+ }
+ if chainBytes, err = os.ReadFile(chain.Name()); err != nil {
+ chainBytes = []byte{}
+ }
+ var teeData SevWorkloadData
+ if len(chainBytes) > 0 {
+ chainBytesFile = "sev.chain"
+ chainInfo, err = os.Stat(chain.Name())
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ teeData.VendorChain = "/" + chainBytesFile
+ }
+ encodedTeeData, err := json.Marshal(teeData)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
+ }
+ workloadConfig.TeeData = string(encodedTeeData)
+ case SNP:
+ teeData := SnpWorkloadData{
+ Generation: "milan",
+ }
+ encodedTeeData, err := json.Marshal(teeData)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err)
+ }
+ workloadConfig.TeeData = string(encodedTeeData)
+ }
+
+ // Write part of the config blob where the krun init process will be
+ // looking for it. The oci2cw tool used `buildah inspect` output, but
+ // init is just looking for fields that have the right names in any
+ // object, and the image's config will have that, so let's try encoding
+ // it directly.
+ krunConfigPath := filepath.Join(path, ".krun_config.json")
+ krunConfigBytes, err := json.Marshal(ociConfig)
+ if err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err)
+ }
+ if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil {
+ return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err)
+ }
+ defer func() {
+ if err := os.Remove(krunConfigPath); err != nil {
+ logger.Warnf("removing krun configuration file: %v", err)
+ }
+ }()
+
+ // Encode the workload config, in case it fails for any reason.
+ cleanedUpWorkloadConfig := workloadConfig
+ switch cleanedUpWorkloadConfig.Type {
+ default:
+ return nil, WorkloadConfig{}, fmt.Errorf("don't know how to canonicalize TEE type %q", cleanedUpWorkloadConfig.Type)
+ case SEV, SEV_NO_ES:
+ cleanedUpWorkloadConfig.Type = SEV
+ case SNP:
+ cleanedUpWorkloadConfig.Type = SNP
+ }
+ workloadConfigBytes, err := json.Marshal(cleanedUpWorkloadConfig)
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+
+ // Make sure we have the passphrase to use for encrypting the disk image.
+ diskEncryptionPassphrase := options.DiskEncryptionPassphrase
+ if diskEncryptionPassphrase == "" {
+ diskEncryptionPassphrase, err = GenerateDiskEncryptionPassphrase()
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ }
+
+ // If we weren't told how big the image should be, get a rough estimate
+ // of the input data size, then add a hedge to it.
+ imageSize := slop(options.ImageSize, options.Slop)
+ if imageSize == 0 {
+ var sourceSize int64
+ if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error {
+ if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ info, err := d.Info()
+ if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ sourceSize += info.Size()
+ return nil
+ }); err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ imageSize = slop(sourceSize, options.Slop)
+ }
+ if imageSize%4096 != 0 {
+ imageSize += (4096 - (imageSize % 4096))
+ }
+ if imageSize < minimumImageSize {
+ imageSize = minimumImageSize
+ }
+
+ // Create a file to use as the unencrypted version of the disk image.
+ plain, err := os.CreateTemp(options.TempDir, "plain.img")
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ removePlain := true
+ defer func() {
+ if removePlain {
+ if err := os.Remove(plain.Name()); err != nil {
+ logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
+ }
+ }
+ }()
+
+ // Lengthen the plaintext disk image file.
+ if err := plain.Truncate(imageSize); err != nil {
+ plain.Close()
+ return nil, WorkloadConfig{}, err
+ }
+ plainInfo, err := plain.Stat()
+ plain.Close()
+ if err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+
+ // Format the disk image with the filesystem contents.
+ if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil {
+ if strings.TrimSpace(stderr) != "" {
+ return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err)
+ }
+ return nil, WorkloadConfig{}, err
+ }
+
+ // If we're registering the workload, we can do that now.
+ if workloadConfig.AttestationURL != "" {
+ if err := SendRegistrationRequest(workloadConfig, diskEncryptionPassphrase, options.FirmwareLibrary, options.IgnoreAttestationErrors, logger); err != nil {
+ return nil, WorkloadConfig{}, err
+ }
+ }
+
+ // Try to encrypt on the fly.
+ pipeReader, pipeWriter := io.Pipe()
+ removePlain = false
+ go func() {
+ var err error
+ defer func() {
+ if err := os.Remove(plain.Name()); err != nil {
+ logger.Warnf("removing temporary file %q: %v", plain.Name(), err)
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ } else {
+ pipeWriter.Close()
+ }
+ }()
+ plain, err := os.Open(plain.Name())
+ if err != nil {
+ logrus.Errorf("opening unencrypted disk image %q: %v", plain.Name(), err)
+ return
+ }
+ defer plain.Close()
+ tw := tar.NewWriter(pipeWriter)
+ defer tw.Flush()
+
+ // Write /entrypoint
+ var decompressedEntrypoint bytes.Buffer
+ decompressor, err := gzip.NewReader(bytes.NewReader(entrypointCompressedBytes))
+ if err != nil {
+ logrus.Errorf("decompressing copy of entrypoint: %v", err)
+ return
+ }
+ defer decompressor.Close()
+ if _, err = io.Copy(&decompressedEntrypoint, decompressor); err != nil {
+ logrus.Errorf("decompressing copy of entrypoint: %v", err)
+ return
+ }
+ entrypointHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for entrypoint: %v", err)
+ return
+ }
+ entrypointHeader.Name = "entrypoint"
+ entrypointHeader.Mode = 0o755
+ entrypointHeader.Uname, entrypointHeader.Gname = "", ""
+ entrypointHeader.Uid, entrypointHeader.Gid = 0, 0
+ entrypointHeader.Size = int64(decompressedEntrypoint.Len())
+ if err = tw.WriteHeader(entrypointHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", entrypointHeader.Name, err)
+ return
+ }
+ if _, err = io.Copy(tw, &decompressedEntrypoint); err != nil {
+ logrus.Errorf("writing %q: %v", entrypointHeader.Name, err)
+ return
+ }
+
+ // Write /sev.chain
+ if chainInfo != nil {
+ chainHeader, err := tar.FileInfoHeader(chainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", chainInfo.Name(), err)
+ return
+ }
+ chainHeader.Name = chainBytesFile
+ chainHeader.Mode = 0o600
+ chainHeader.Uname, chainHeader.Gname = "", ""
+ chainHeader.Uid, chainHeader.Gid = 0, 0
+ chainHeader.Size = int64(len(chainBytes))
+ if err = tw.WriteHeader(chainHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", chainHeader.Name, err)
+ return
+ }
+ if _, err = tw.Write(chainBytes); err != nil {
+ logrus.Errorf("writing %q: %v", chainHeader.Name, err)
+ return
+ }
+ }
+
+ // Write /krun-sev.json.
+ workloadConfigHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
+ return
+ }
+ workloadConfigHeader.Name = "krun-sev.json"
+ workloadConfigHeader.Mode = 0o600
+ workloadConfigHeader.Uname, workloadConfigHeader.Gname = "", ""
+ workloadConfigHeader.Uid, workloadConfigHeader.Gid = 0, 0
+ workloadConfigHeader.Size = int64(len(workloadConfigBytes))
+ if err = tw.WriteHeader(workloadConfigHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", workloadConfigHeader.Name, err)
+ return
+ }
+ if _, err = tw.Write(workloadConfigBytes); err != nil {
+ logrus.Errorf("writing %q: %v", workloadConfigHeader.Name, err)
+ return
+ }
+
+ // Write /tmp.
+ tmpHeader, err := tar.FileInfoHeader(plainInfo, "")
+ if err != nil {
+ logrus.Errorf("building header for %q: %v", plainInfo.Name(), err)
+ return
+ }
+ tmpHeader.Name = "tmp/"
+ tmpHeader.Typeflag = tar.TypeDir
+ tmpHeader.Mode = 0o1777
+ tmpHeader.Uname, workloadConfigHeader.Gname = "", ""
+ tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0
+ tmpHeader.Size = 0
+ if err = tw.WriteHeader(tmpHeader); err != nil {
+ logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err)
+ return
+ }
+
+ // Now figure out the footer that we'll append to the encrypted disk.
+ var footer bytes.Buffer
+ lengthBuffer := make([]byte, 8)
+ footer.Write(workloadConfigBytes)
+ footer.WriteString("KRUN")
+ binary.LittleEndian.PutUint64(lengthBuffer, uint64(len(workloadConfigBytes)))
+ footer.Write(lengthBuffer)
+
+ // Start encrypting and write /disk.img.
+ header, encrypt, blockSize, err := luksy.EncryptV1([]string{diskEncryptionPassphrase}, "")
+ paddingBoundary := int64(4096)
+ paddingNeeded := (paddingBoundary - ((int64(len(header)) + imageSize + int64(footer.Len())) % paddingBoundary)) % paddingBoundary
+ diskHeader := workloadConfigHeader
+ diskHeader.Name = "disk.img"
+ diskHeader.Mode = 0o600
+ diskHeader.Size = int64(len(header)) + imageSize + paddingNeeded + int64(footer.Len())
+ if err = tw.WriteHeader(diskHeader); err != nil {
+ logrus.Errorf("writing archive header for disk.img: %v", err)
+ return
+ }
+ if _, err = io.Copy(tw, bytes.NewReader(header)); err != nil {
+ logrus.Errorf("writing encryption header for disk.img: %v", err)
+ return
+ }
+ encryptWrapper := luksy.EncryptWriter(encrypt, tw, blockSize)
+ if _, err = io.Copy(encryptWrapper, plain); err != nil {
+ logrus.Errorf("encrypting disk.img: %v", err)
+ return
+ }
+ encryptWrapper.Close()
+ if _, err = tw.Write(make([]byte, paddingNeeded)); err != nil {
+ logrus.Errorf("writing padding for disk.img: %v", err)
+ return
+ }
+ if _, err = io.Copy(tw, &footer); err != nil {
+ logrus.Errorf("writing footer for disk.img: %v", err)
+ return
+ }
+ tw.Close()
+ }()
+
+ return pipeReader, workloadConfig, nil
+}
+
+func slop(size int64, slop string) int64 {
+ if slop == "" {
+ return size * 5 / 4
+ }
+ for _, factor := range strings.Split(slop, "+") {
+ factor = strings.TrimSpace(factor)
+ if factor == "" {
+ continue
+ }
+ if strings.HasSuffix(factor, "%") {
+ percentage := strings.TrimSuffix(factor, "%")
+ percent, err := strconv.ParseInt(percentage, 10, 8)
+ if err != nil {
+ logrus.Warnf("parsing percentage %q: %v", factor, err)
+ } else {
+ size *= (percent + 100)
+ size /= 100
+ }
+ } else {
+ more, err := units.RAMInBytes(factor)
+ if err != nil {
+ logrus.Warnf("parsing %q as a size: %v", factor, err)
+ } else {
+ size += more
+ }
+ }
+ }
+ return size
+}
diff --git a/internal/mkcw/archive_test.go b/internal/mkcw/archive_test.go
new file mode 100644
index 0000000..c2e06fc
--- /dev/null
+++ b/internal/mkcw/archive_test.go
@@ -0,0 +1,181 @@
+package mkcw
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSlop(t *testing.T) {
+ testCases := []struct {
+ input int64
+ slop string
+ output int64
+ }{
+ {100, "", 125},
+ {100, "10%", 110},
+ {100, "100%", 200},
+ {100, "10GB", 10*1024*1024*1024 + 100},
+ {100, "10%+10GB", 10*1024*1024*1024 + 110},
+ {100, "10% + 10GB", 10*1024*1024*1024 + 110},
+ }
+ for _, testCase := range testCases {
+ t.Run(testCase.slop, func(t *testing.T) {
+ assert.Equal(t, testCase.output, slop(testCase.input, testCase.slop))
+ })
+ }
+}
+
+// dummyAttestationHandler replies with a fixed response code to requests to
+// the right path, and caches passphrases indexed by workload ID
+type dummyAttestationHandler struct {
+ t *testing.T
+ status int
+ passphrases map[string]string
+ passphrasesLock sync.Mutex
+}
+
+func (d *dummyAttestationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ var body bytes.Buffer
+ if req.Body != nil {
+ if _, err := io.Copy(&body, req.Body); err != nil {
+ d.t.Logf("reading request body: %v", err)
+ return
+ }
+ req.Body.Close()
+ }
+ if req.URL != nil && req.URL.Path == "/kbs/v0/register_workload" {
+ var registrationRequest RegistrationRequest
+ // if we can't decode the client request, bail
+ if err := json.Unmarshal(body.Bytes(), &registrationRequest); err != nil {
+ rw.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ // cache the passphrase
+ d.passphrasesLock.Lock()
+ if d.passphrases == nil {
+ d.passphrases = make(map[string]string)
+ }
+ d.passphrases[registrationRequest.WorkloadID] = registrationRequest.Passphrase
+ d.passphrasesLock.Unlock()
+ // return the predetermined status
+ status := d.status
+ if status == 0 {
+ status = http.StatusOK
+ }
+ rw.WriteHeader(status)
+ return
+ }
+ // no such handler
+ rw.WriteHeader(http.StatusInternalServerError)
+}
+
+func TestArchive(t *testing.T) {
+ ociConfig := &v1.Image{
+ Config: v1.ImageConfig{
+ User: "root",
+ Env: []string{"PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/usr/sbin:/sbin:/usr/sbin:/sbin"},
+ Cmd: []string{"/bin/bash"},
+ WorkingDir: "/root",
+ Labels: map[string]string{
+ "label_a": "b",
+ "label_c": "d",
+ },
+ },
+ }
+ for _, status := range []int{http.StatusOK, http.StatusInternalServerError} {
+ for _, ignoreChainRetrievalErrors := range []bool{false, true} {
+ for _, ignoreAttestationErrors := range []bool{false, true} {
+ t.Run(fmt.Sprintf("status=%d,ignoreChainRetrievalErrors=%v,ignoreAttestationErrors=%v", status, ignoreChainRetrievalErrors, ignoreAttestationErrors), func(t *testing.T) {
+ // listen on a system-assigned port
+ listener, err := net.Listen("tcp", ":0")
+ require.NoError(t, err)
+ // keep track of our listener address
+ addr := listener.Addr()
+ // serve requests on that listener
+ handler := &dummyAttestationHandler{t: t, status: status}
+ server := http.Server{
+ Handler: handler,
+ }
+ go func() {
+ if err := server.Serve(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ t.Logf("serve: %v", err)
+ }
+ }()
+ // clean up at the end of this test
+ t.Cleanup(func() { assert.NoError(t, server.Close()) })
+ // generate the container rootfs using a temporary empty directory
+ archiveOptions := ArchiveOptions{
+ CPUs: 4,
+ Memory: 256,
+ TempDir: t.TempDir(),
+ AttestationURL: "http://" + addr.String(),
+ IgnoreAttestationErrors: ignoreAttestationErrors,
+ }
+ inputPath := t.TempDir()
+ rc, workloadConfig, err := Archive(inputPath, ociConfig, archiveOptions)
+ // bail now if we got an error we didn't expect
+ if err != nil {
+ if errors.As(err, &chainRetrievalError{}) {
+ if !ignoreChainRetrievalErrors {
+ return
+ }
+ }
+ if errors.As(err, &attestationError{}) {
+ if !ignoreAttestationErrors {
+ require.NoError(t, err)
+ }
+ }
+ return
+ }
+ if err == nil {
+ defer rc.Close()
+ }
+ // read each archive entry's contents into a map
+ contents := make(map[string][]byte)
+ tr := tar.NewReader(rc)
+ hdr, err := tr.Next()
+ for hdr != nil {
+ contents[hdr.Name], err = io.ReadAll(tr)
+ require.NoError(t, err)
+ hdr, err = tr.Next()
+ }
+ if err != nil {
+ require.ErrorIs(t, err, io.EOF)
+ }
+ // check that krun-sev.json is a JSON-encoded copy of the workload config
+ var writtenWorkloadConfig WorkloadConfig
+ err = json.Unmarshal(contents["krun-sev.json"], &writtenWorkloadConfig)
+ require.NoError(t, err)
+ assert.Equal(t, workloadConfig, writtenWorkloadConfig)
+ // save the disk image to a file
+ encryptedFile := filepath.Join(t.TempDir(), "encrypted.img")
+ err = os.WriteFile(encryptedFile, contents["disk.img"], 0o600)
+ require.NoError(t, err)
+ // check that we have a configuration footer in there
+ _, err = ReadWorkloadConfigFromImage(encryptedFile)
+ require.NoError(t, err)
+ // check that the attestation server got the encryption passphrase
+ handler.passphrasesLock.Lock()
+ passphrase := handler.passphrases[workloadConfig.WorkloadID]
+ handler.passphrasesLock.Unlock()
+ err = CheckLUKSPassphrase(encryptedFile, passphrase)
+ require.NoError(t, err)
+ })
+ }
+ }
+ }
+}
diff --git a/internal/mkcw/attest.go b/internal/mkcw/attest.go
new file mode 100644
index 0000000..91362d3
--- /dev/null
+++ b/internal/mkcw/attest.go
@@ -0,0 +1,250 @@
+package mkcw
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/buildah/internal/mkcw/types"
+ "github.com/sirupsen/logrus"
+)
+
+type (
+ RegistrationRequest = types.RegistrationRequest
+ TeeConfig = types.TeeConfig
+ TeeConfigFlags = types.TeeConfigFlags
+ TeeConfigMinFW = types.TeeConfigMinFW
+)
+
+type measurementError struct {
+ err error
+}
+
+func (m measurementError) Error() string {
+ return fmt.Sprintf("generating measurement for attestation: %v", m.err)
+}
+
+type attestationError struct {
+ err error
+}
+
+func (a attestationError) Error() string {
+ return fmt.Sprintf("registering workload: %v", a.err)
+}
+
+type httpError struct {
+ statusCode int
+}
+
+func (h httpError) Error() string {
+ if statusText := http.StatusText(h.statusCode); statusText != "" {
+ return fmt.Sprintf("received server status %d (%q)", h.statusCode, statusText)
+ }
+ return fmt.Sprintf("received server status %d", h.statusCode)
+}
+
+// SendRegistrationRequest registers a workload with the specified decryption
+// passphrase with the service whose location is part of the WorkloadConfig.
+func SendRegistrationRequest(workloadConfig WorkloadConfig, diskEncryptionPassphrase, firmwareLibrary string, ignoreAttestationErrors bool, logger *logrus.Logger) error {
+ if workloadConfig.AttestationURL == "" {
+ return errors.New("attestation URL not provided")
+ }
+
+ // Measure the execution environment.
+ measurement, err := GenerateMeasurement(workloadConfig, firmwareLibrary)
+ if err != nil {
+ if !ignoreAttestationErrors {
+ return &measurementError{err}
+ }
+ logger.Warnf("generating measurement for attestation: %v", err)
+ }
+
+ // Build the workload registration (attestation) request body.
+ var teeConfigBytes []byte
+ switch workloadConfig.Type {
+ case SEV, SEV_NO_ES, SNP:
+ var cbits types.TeeConfigFlagBits
+ switch workloadConfig.Type {
+ case SEV:
+ cbits = types.SEV_CONFIG_NO_DEBUG |
+ types.SEV_CONFIG_NO_KEY_SHARING |
+ types.SEV_CONFIG_ENCRYPTED_STATE |
+ types.SEV_CONFIG_NO_SEND |
+ types.SEV_CONFIG_DOMAIN |
+ types.SEV_CONFIG_SEV
+ case SEV_NO_ES:
+ cbits = types.SEV_CONFIG_NO_DEBUG |
+ types.SEV_CONFIG_NO_KEY_SHARING |
+ types.SEV_CONFIG_NO_SEND |
+ types.SEV_CONFIG_DOMAIN |
+ types.SEV_CONFIG_SEV
+ case SNP:
+ cbits = types.SNP_CONFIG_SMT |
+ types.SNP_CONFIG_MANDATORY |
+ types.SNP_CONFIG_MIGRATE_MA |
+ types.SNP_CONFIG_DEBUG
+ default:
+ panic("internal error") // shouldn't happen
+ }
+ teeConfig := TeeConfig{
+ Flags: TeeConfigFlags{
+ Bits: cbits,
+ },
+ MinFW: TeeConfigMinFW{
+ Major: 0,
+ Minor: 0,
+ },
+ }
+ teeConfigBytes, err = json.Marshal(teeConfig)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("don't know how to generate tee_config for %q TEEs", workloadConfig.Type)
+ }
+
+ registrationRequest := RegistrationRequest{
+ WorkloadID: workloadConfig.WorkloadID,
+ LaunchMeasurement: measurement,
+ TeeConfig: string(teeConfigBytes),
+ Passphrase: diskEncryptionPassphrase,
+ }
+ registrationRequestBytes, err := json.Marshal(registrationRequest)
+ if err != nil {
+ return err
+ }
+
+ // Register the workload.
+ parsedURL, err := url.Parse(workloadConfig.AttestationURL)
+ if err != nil {
+ return err
+ }
+ parsedURL.Path = path.Join(parsedURL.Path, "/kbs/v0/register_workload")
+ if err != nil {
+ return err
+ }
+ url := parsedURL.String()
+ requestContentType := "application/json"
+ requestBody := bytes.NewReader(registrationRequestBytes)
+ defer http.DefaultClient.CloseIdleConnections()
+ resp, err := http.Post(url, requestContentType, requestBody)
+ if resp != nil {
+ if resp.Body != nil {
+ resp.Body.Close()
+ }
+ switch resp.StatusCode {
+ default:
+ if !ignoreAttestationErrors {
+ return &attestationError{&httpError{resp.StatusCode}}
+ }
+ logger.Warn(attestationError{&httpError{resp.StatusCode}}.Error())
+ case http.StatusOK, http.StatusAccepted:
+ // great!
+ }
+ }
+ if err != nil {
+ if !ignoreAttestationErrors {
+ return &attestationError{err}
+ }
+ logger.Warn(attestationError{err}.Error())
+ }
+ return nil
+}
+
+// GenerateMeasurement generates the runtime measurement using the CPU count,
+// memory size, and the firmware shared library, whatever it's called, wherever
+// it is.
+// If firmwareLibrary is a path, it will be the only one checked.
+// If firmwareLibrary is a filename, it will be checked for in a hard-coded set
+// of directories.
+// If firmwareLibrary is empty, both the filename and the directory it is in
+// will be taken from a hard-coded set of candidates.
+func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) (string, error) {
+ cpuString := fmt.Sprintf("%d", workloadConfig.CPUs)
+ memoryString := fmt.Sprintf("%d", workloadConfig.Memory)
+ var prefix string
+ switch workloadConfig.Type {
+ case SEV:
+ prefix = "SEV-ES"
+ case SEV_NO_ES:
+ prefix = "SEV"
+ case SNP:
+ prefix = "SNP"
+ default:
+ return "", fmt.Errorf("don't know which measurement to use for TEE type %q", workloadConfig.Type)
+ }
+
+ sharedLibraryDirs := []string{
+ "/usr/local/lib64",
+ "/usr/local/lib",
+ "/lib64",
+ "/lib",
+ "/usr/lib64",
+ "/usr/lib",
+ }
+ if llp, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
+ sharedLibraryDirs = append(sharedLibraryDirs, strings.Split(llp, ":")...)
+ }
+ libkrunfwNames := []string{
+ "libkrunfw-sev.so.4",
+ "libkrunfw-sev.so.3",
+ "libkrunfw-sev.so",
+ }
+ var pathsToCheck []string
+ if firmwareLibrary == "" {
+ for _, sharedLibraryDir := range sharedLibraryDirs {
+ if sharedLibraryDir == "" {
+ continue
+ }
+ for _, libkrunfw := range libkrunfwNames {
+ candidate := filepath.Join(sharedLibraryDir, libkrunfw)
+ pathsToCheck = append(pathsToCheck, candidate)
+ }
+ }
+ } else {
+ if filepath.IsAbs(firmwareLibrary) {
+ pathsToCheck = append(pathsToCheck, firmwareLibrary)
+ } else {
+ for _, sharedLibraryDir := range sharedLibraryDirs {
+ if sharedLibraryDir == "" {
+ continue
+ }
+ candidate := filepath.Join(sharedLibraryDir, firmwareLibrary)
+ pathsToCheck = append(pathsToCheck, candidate)
+ }
+ }
+ }
+ for _, candidate := range pathsToCheck {
+ if _, err := os.Lstat(candidate); err == nil {
+ var stdout, stderr bytes.Buffer
+ logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate)
+ cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ if stderr.Len() > 0 {
+ err = fmt.Errorf("krunfw_measurement: %s: %w", strings.TrimSpace(stderr.String()), err)
+ }
+ return "", err
+ }
+ scanner := bufio.NewScanner(&stdout)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, prefix+":") {
+ return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil
+ }
+ }
+ return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":")
+ }
+ }
+ return "", fmt.Errorf("generating measurement: none of %v found: %w", pathsToCheck, os.ErrNotExist)
+}
diff --git a/internal/mkcw/embed/entrypoint.gz b/internal/mkcw/embed/entrypoint.gz
new file mode 100755
index 0000000..0e00351
--- /dev/null
+++ b/internal/mkcw/embed/entrypoint.gz
Binary files differ
diff --git a/internal/mkcw/embed/entrypoint.s b/internal/mkcw/embed/entrypoint.s
new file mode 100644
index 0000000..0e4429c
--- /dev/null
+++ b/internal/mkcw/embed/entrypoint.s
@@ -0,0 +1,16 @@
+ .section .rodata.1,"aMS",@progbits,1
+msg:
+ .string "This image is designed to be run as a confidential workload using libkrun.\n"
+ .section .text._start,"ax",@progbits
+ .globl _start
+ .type _start,@function
+_start:
+ movq $1, %rax # write
+ movq $2, %rdi # fd=stderr_fileno
+ movq $msg, %rsi # message
+ movq $75, %rdx # length
+ syscall
+ movq $60, %rax # exit
+ movq $1, %rdi # status=1
+ syscall
+ .section .note.GNU-stack,"",@progbits
diff --git a/internal/mkcw/entrypoint.go b/internal/mkcw/entrypoint.go
new file mode 100644
index 0000000..d720321
--- /dev/null
+++ b/internal/mkcw/entrypoint.go
@@ -0,0 +1,6 @@
+package mkcw
+
+import _ "embed"
+
+//go:embed "embed/entrypoint.gz"
+var entrypointCompressedBytes []byte
diff --git a/internal/mkcw/luks.go b/internal/mkcw/luks.go
new file mode 100644
index 0000000..0d795e6
--- /dev/null
+++ b/internal/mkcw/luks.go
@@ -0,0 +1,51 @@
+package mkcw
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "os"
+
+ "github.com/containers/luksy"
+)
+
+// CheckLUKSPassphrase checks that the specified LUKS-encrypted file can be
+// decrypted using the specified passphrase.
+func CheckLUKSPassphrase(path, decryptionPassphrase string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ v1header, v2headerA, v2headerB, v2json, err := luksy.ReadHeaders(f, luksy.ReadHeaderOptions{})
+ if err != nil {
+ return err
+ }
+ if v1header != nil {
+ _, _, _, _, err = v1header.Decrypt(decryptionPassphrase, f)
+ return err
+ }
+ if v2headerA == nil && v2headerB == nil {
+ return fmt.Errorf("no LUKS headers read from %q", path)
+ }
+ if v2headerA != nil {
+ if _, _, _, _, err = v2headerA.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
+ return err
+ }
+ }
+ if v2headerB != nil {
+ if _, _, _, _, err = v2headerB.Decrypt(decryptionPassphrase, f, *v2json); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GenerateDiskEncryptionPassphrase generates a random disk encryption password
+func GenerateDiskEncryptionPassphrase() (string, error) {
+ randomizedBytes := make([]byte, 32)
+ if _, err := rand.Read(randomizedBytes); err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(randomizedBytes), nil
+}
diff --git a/internal/mkcw/luks_test.go b/internal/mkcw/luks_test.go
new file mode 100644
index 0000000..3df723f
--- /dev/null
+++ b/internal/mkcw/luks_test.go
@@ -0,0 +1,66 @@
+package mkcw
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/luksy"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckLUKSPassphrase(t *testing.T) {
+ passphrase, err := GenerateDiskEncryptionPassphrase()
+ require.NoError(t, err)
+ secondPassphrase, err := GenerateDiskEncryptionPassphrase()
+ require.NoError(t, err)
+
+ t.Run("v1", func(t *testing.T) {
+ header, encrypter, blockSize, err := luksy.EncryptV1([]string{secondPassphrase, passphrase}, "")
+ require.NoError(t, err)
+ f, err := os.Create(filepath.Join(t.TempDir(), "v1"))
+ require.NoError(t, err)
+ n, err := f.Write(header)
+ require.NoError(t, err)
+ require.Equal(t, len(header), n)
+ wrapper := luksy.EncryptWriter(encrypter, f, blockSize)
+ _, err = wrapper.Write(make([]byte, blockSize*10))
+ require.NoError(t, err)
+ wrapper.Close()
+ f.Close()
+
+ err = CheckLUKSPassphrase(f.Name(), passphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), secondPassphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), "nope, this is not a correct passphrase")
+ require.Error(t, err)
+ })
+
+ t.Run("v2", func(t *testing.T) {
+ for _, sectorSize := range []int{512, 1024, 2048, 4096} {
+ t.Run(fmt.Sprintf("sectorSize=%d", sectorSize), func(t *testing.T) {
+ header, encrypter, blockSize, err := luksy.EncryptV2([]string{secondPassphrase, passphrase}, "", sectorSize)
+ require.NoError(t, err)
+ f, err := os.Create(filepath.Join(t.TempDir(), "v2"))
+ require.NoError(t, err)
+ n, err := f.Write(header)
+ require.NoError(t, err)
+ require.Equal(t, len(header), n)
+ wrapper := luksy.EncryptWriter(encrypter, f, blockSize)
+ _, err = wrapper.Write(make([]byte, blockSize*10))
+ require.NoError(t, err)
+ wrapper.Close()
+ f.Close()
+
+ err = CheckLUKSPassphrase(f.Name(), passphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), secondPassphrase)
+ require.NoError(t, err)
+ err = CheckLUKSPassphrase(f.Name(), "nope, this is not one of the correct passphrases")
+ require.Error(t, err)
+ })
+ }
+ })
+}
diff --git a/internal/mkcw/makefs.go b/internal/mkcw/makefs.go
new file mode 100644
index 0000000..308f2a9
--- /dev/null
+++ b/internal/mkcw/makefs.go
@@ -0,0 +1,38 @@
+package mkcw
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+)
+
+// MakeFS formats the imageFile as a filesystem of the specified type,
+// populating it with the contents of the directory at sourcePath.
+// Recognized filesystem types are "ext2", "ext3", "ext4", and "btrfs".
+// Note that krun's init is currently hard-wired to assume "ext4".
+// Returns the stdout, stderr, and any error returned by the mkfs command.
+func MakeFS(sourcePath, imageFile, filesystem string) (string, string, error) {
+ var stdout, stderr strings.Builder
+ // N.B. mkfs.xfs can accept a protofile via its -p option, but the
+ // protofile format doesn't allow us to supply timestamp information or
+ // specify that files are hard linked
+ switch filesystem {
+ case "ext2", "ext3", "ext4":
+ logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
+ cmd := exec.Command("mkfs", "-t", filesystem, "-d", sourcePath, imageFile)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ err := cmd.Run()
+ return stdout.String(), stderr.String(), err
+ case "btrfs":
+ logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile)
+ cmd := exec.Command("mkfs", "-t", filesystem, "--rootdir", sourcePath, imageFile)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ err := cmd.Run()
+ return stdout.String(), stderr.String(), err
+ }
+ return "", "", fmt.Errorf("don't know how to make a %q filesystem with contents", filesystem)
+}
diff --git a/internal/mkcw/types/attest.go b/internal/mkcw/types/attest.go
new file mode 100644
index 0000000..276c7f0
--- /dev/null
+++ b/internal/mkcw/types/attest.go
@@ -0,0 +1,47 @@
+package types
+
+// RegistrationRequest is the body of the request which we use for registering
+// this confidential workload with the attestation server.
+// https://github.com/virtee/reference-kbs/blob/10b2a4c0f8caf78a077210b172863bbae54f66aa/src/main.rs#L83
+type RegistrationRequest struct {
+ WorkloadID string `json:"workload_id"`
+ LaunchMeasurement string `json:"launch_measurement"`
+ Passphrase string `json:"passphrase"`
+ TeeConfig string `json:"tee_config"` // JSON-encoded teeConfig? or specific to the type of TEE?
+}
+
+// TeeConfig contains information about a trusted execution environment.
+type TeeConfig struct {
+ Flags TeeConfigFlags `json:"flags"` // runtime requirement bits
+ MinFW TeeConfigMinFW `json:"minfw"` // minimum platform firmware version
+}
+
+// TeeConfigFlags is a bit field containing policy flags specific to the environment.
+// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/sev.rs#L172
+// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/snp.rs#L114
+type TeeConfigFlags struct {
+ Bits TeeConfigFlagBits `json:"bits"`
+}
+
+// TeeConfigFlagBits are bits representing run-time expectations.
+type TeeConfigFlagBits int
+
+const (
+ SEV_CONFIG_NO_DEBUG TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming no debugging of guests
+ SEV_CONFIG_NO_KEY_SHARING TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming no sharing keys between guests
+ SEV_CONFIG_ENCRYPTED_STATE TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming requires SEV-ES
+ SEV_CONFIG_NO_SEND TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming no transferring the guest to another platform
+ SEV_CONFIG_DOMAIN TeeConfigFlagBits = 0b00010000 //revive:disable-line:var-naming no transferring the guest out of the domain (?)
+ SEV_CONFIG_SEV TeeConfigFlagBits = 0b00100000 //revive:disable-line:var-naming no transferring the guest to non-SEV platforms
+ SNP_CONFIG_SMT TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming SMT is enabled on the host machine
+ SNP_CONFIG_MANDATORY TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming reserved bit which should always be set
+ SNP_CONFIG_MIGRATE_MA TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming allowed to use a migration agent
+ SNP_CONFIG_DEBUG TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming allow debugging
+)
+
+// TeeConfigFlagMinFW corresponds to a minimum version of the kernel+initrd
+// combination that should be booted.
+type TeeConfigMinFW struct {
+ Major int `json:"major"`
+ Minor int `json:"minor"`
+}
diff --git a/internal/mkcw/types/workload.go b/internal/mkcw/types/workload.go
new file mode 100644
index 0000000..9036485
--- /dev/null
+++ b/internal/mkcw/types/workload.go
@@ -0,0 +1,34 @@
+package types
+
+import "github.com/containers/buildah/define"
+
+// WorkloadConfig is the data type which is encoded and stored in /krun-sev.json in a container
+// image, and included directly in the disk image.
+// https://github.com/containers/libkrun/blob/57c59dc5359bdeeb8260b3493e9f63d3708f9ab9/src/vmm/src/resources.rs#L57
+type WorkloadConfig struct {
+ Type define.TeeType `json:"tee"`
+ TeeData string `json:"tee_data"` // Type == SEV: JSON-encoded SevWorkloadData, SNP: JSON-encoded SnpWorkloadData, others?
+ WorkloadID string `json:"workload_id"`
+ CPUs int `json:"cpus"`
+ Memory int `json:"ram_mib"`
+ AttestationURL string `json:"attestation_url"`
+}
+
+// SevWorkloadData contains the path to the SEV certificate chain and optionally,
+// the attestation server's public key(?)
+// https://github.com/containers/libkrun/blob/d31747aa92cf83df2abaeb87e2a83311c135d003/src/vmm/src/linux/tee/amdsev.rs#L222
+type SevWorkloadData struct {
+ VendorChain string `json:"vendor_chain"`
+ AttestationServerPubkey string `json:"attestation_server_pubkey"`
+}
+
+// SnpWorkloadData contains the required CPU generation name.
+// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json
+type SnpWorkloadData struct {
+ Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo/siena=4, turin=5)
+}
+
+const (
+ // SEV_NO_ES is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization without encrypted state, requires epyc 1000 "naples")
+ SEV_NO_ES define.TeeType = "sev_no_es" //revive:disable-line:var-naming
+)
diff --git a/internal/mkcw/workload.go b/internal/mkcw/workload.go
new file mode 100644
index 0000000..4109ce9
--- /dev/null
+++ b/internal/mkcw/workload.go
@@ -0,0 +1,223 @@
+package mkcw
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal/mkcw/types"
+)
+
+type (
+ // WorkloadConfig is the data type which is encoded and stored in an image.
+ WorkloadConfig = types.WorkloadConfig
+ // SevWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SEV.
+ SevWorkloadData = types.SevWorkloadData
+ // SnpWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SNP.
+ SnpWorkloadData = types.SnpWorkloadData
+ // TeeType is one of the known types of trusted execution environments for which we
+ // can generate suitable image contents.
+ TeeType = define.TeeType
+)
+
+const (
+ maxWorkloadConfigSize = 1024 * 1024
+ preferredPaddingBoundary = 4096
+ // SEV is a known trusted execution environment type: AMD-SEV
+ SEV = define.SEV
+ // SEV_NO_ES is a known trusted execution environment type: AMD-SEV without encrypted state
+ SEV_NO_ES = types.SEV_NO_ES //revive:disable-line:var-naming
+ // SNP is a known trusted execution environment type: AMD-SNP
+ SNP = define.SNP
+ // krun looks for its configuration JSON directly in a disk image if the last twelve bytes
+ // of the disk image are this magic value followed by a little-endian 64-bit
+ // length-of-the-configuration
+ krunMagic = "KRUN"
+)
+
+// ReadWorkloadConfigFromImage reads the workload configuration from the
+// specified disk image file
+func ReadWorkloadConfigFromImage(path string) (WorkloadConfig, error) {
+ // Read the last 12 bytes, which should be "KRUN" followed by a 64-bit
+ // little-endian length. The (length) bytes immediately preceding
+ // these hold the JSON-encoded workloadConfig.
+ var wc WorkloadConfig
+ f, err := os.Open(path)
+ if err != nil {
+ return wc, err
+ }
+ defer f.Close()
+
+ // Read those last 12 bytes.
+ finalTwelve := make([]byte, 12)
+ if _, err = f.Seek(-12, io.SeekEnd); err != nil {
+ return wc, fmt.Errorf("checking for workload config signature: %w", err)
+ }
+ if n, err := f.Read(finalTwelve); err != nil || n != len(finalTwelve) {
+ if err != nil && !errors.Is(err, io.EOF) {
+ return wc, fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
+ }
+ if n != len(finalTwelve) {
+ return wc, fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", path, n)
+ }
+ }
+ if magic := string(finalTwelve[0:4]); magic != "KRUN" {
+ return wc, fmt.Errorf("expected magic string KRUN in %q, found %q)", path, magic)
+ }
+ length := binary.LittleEndian.Uint64(finalTwelve[4:])
+ if length > maxWorkloadConfigSize {
+ return wc, fmt.Errorf("workload config in %q is %d bytes long, which seems unreasonable (max allowed %d)", path, length, maxWorkloadConfigSize)
+ }
+
+ // Read and decode the config.
+ configBytes := make([]byte, length)
+ if _, err = f.Seek(-(int64(length) + 12), io.SeekEnd); err != nil {
+ return wc, fmt.Errorf("looking for workload config from disk image: %w", err)
+ }
+ if n, err := f.Read(configBytes); err != nil || n != len(configBytes) {
+ if err != nil {
+ return wc, fmt.Errorf("reading workload config from disk image: %w", err)
+ }
+ return wc, fmt.Errorf("short read (expected %d bytes near the end of %q, got %d)", len(configBytes), path, n)
+ }
+ err = json.Unmarshal(configBytes, &wc)
+ if err != nil {
+ err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err)
+ }
+ return wc, err
+}
+
+// WriteWorkloadConfigToImage writes the workload configuration to the
+// specified disk image file, overwriting a previous configuration if it's
+// asked to and it finds one
+func WriteWorkloadConfigToImage(imageFile *os.File, workloadConfigBytes []byte, overwrite bool) error {
+ // Read those last 12 bytes to check if there's a configuration there already, which we should overwrite.
+ var overwriteOffset int64
+ if overwrite {
+ finalTwelve := make([]byte, 12)
+ if _, err := imageFile.Seek(-12, io.SeekEnd); err != nil {
+ return fmt.Errorf("checking for workload config signature: %w", err)
+ }
+ if n, err := imageFile.Read(finalTwelve); err != nil || n != len(finalTwelve) {
+ if err != nil && !errors.Is(err, io.EOF) {
+ return fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err)
+ }
+ if n != len(finalTwelve) {
+ return fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", imageFile.Name(), n)
+ }
+ }
+ if magic := string(finalTwelve[0:4]); magic == "KRUN" {
+ length := binary.LittleEndian.Uint64(finalTwelve[4:])
+ if length < maxWorkloadConfigSize {
+ overwriteOffset = int64(length + 12)
+ }
+ }
+ }
+ // If we found a configuration in the file, try to figure out how much padding was used.
+ paddingSize := int64(preferredPaddingBoundary)
+ if overwriteOffset != 0 {
+ st, err := imageFile.Stat()
+ if err != nil {
+ return err
+ }
+ for _, possiblePaddingLength := range []int64{0x100000, 0x10000, 0x1000, 0x200, 0x100} {
+ if overwriteOffset > possiblePaddingLength {
+ continue
+ }
+ if st.Size()%possiblePaddingLength != 0 {
+ continue
+ }
+ if _, err := imageFile.Seek(-possiblePaddingLength, io.SeekEnd); err != nil {
+ return fmt.Errorf("checking size of padding at end of file: %w", err)
+ }
+ buf := make([]byte, possiblePaddingLength)
+ n, err := imageFile.Read(buf)
+ if err != nil {
+ return fmt.Errorf("reading possible padding at end of file: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("short read checking size of padding at end of file: %d != %d", n, len(buf))
+ }
+ if bytes.Equal(buf[:possiblePaddingLength-overwriteOffset], make([]byte, possiblePaddingLength-overwriteOffset)) {
+ // everything up to the configuration was zero bytes, so it was padding
+ overwriteOffset = possiblePaddingLength
+ paddingSize = possiblePaddingLength
+ break
+ }
+ }
+ }
+
+ // Append the krun configuration to a new buffer.
+ var formatted bytes.Buffer
+ nWritten, err := formatted.Write(workloadConfigBytes)
+ if err != nil {
+ return fmt.Errorf("building workload config: %w", err)
+ }
+ if nWritten != len(workloadConfigBytes) {
+ return fmt.Errorf("short write appending configuration to buffer: %d != %d", nWritten, len(workloadConfigBytes))
+ }
+ // Append the magic string to the buffer.
+ nWritten, err = formatted.WriteString(krunMagic)
+ if err != nil {
+ return fmt.Errorf("building workload config signature: %w", err)
+ }
+ if nWritten != len(krunMagic) {
+ return fmt.Errorf("short write appending krun magic to buffer: %d != %d", nWritten, len(krunMagic))
+ }
+ // Append the 64-bit little-endian length of the workload configuration to the buffer.
+ workloadConfigLengthBytes := make([]byte, 8)
+ binary.LittleEndian.PutUint64(workloadConfigLengthBytes, uint64(len(workloadConfigBytes)))
+ nWritten, err = formatted.Write(workloadConfigLengthBytes)
+ if err != nil {
+ return fmt.Errorf("building workload config signature size: %w", err)
+ }
+ if nWritten != len(workloadConfigLengthBytes) {
+ return fmt.Errorf("short write appending configuration length to buffer: %d != %d", nWritten, len(workloadConfigLengthBytes))
+ }
+
+ // Build a copy of that data, with padding preceding it.
+ var padded bytes.Buffer
+ if int64(formatted.Len())%paddingSize != 0 {
+ extra := paddingSize - (int64(formatted.Len()) % paddingSize)
+ nWritten, err := padded.Write(make([]byte, extra))
+ if err != nil {
+ return fmt.Errorf("buffering padding: %w", err)
+ }
+ if int64(nWritten) != extra {
+ return fmt.Errorf("short write buffering padding for disk image: %d != %d", nWritten, extra)
+ }
+ }
+ extra := int64(formatted.Len())
+ nWritten, err = padded.Write(formatted.Bytes())
+ if err != nil {
+ return fmt.Errorf("buffering workload config: %w", err)
+ }
+ if int64(nWritten) != extra {
+ return fmt.Errorf("short write buffering workload config: %d != %d", nWritten, extra)
+ }
+
+ // Write the buffer to the file, starting with padding.
+ if _, err = imageFile.Seek(-overwriteOffset, io.SeekEnd); err != nil {
+ return fmt.Errorf("preparing to write workload config: %w", err)
+ }
+ nWritten, err = imageFile.Write(padded.Bytes())
+ if err != nil {
+ return fmt.Errorf("writing workload config: %w", err)
+ }
+ if nWritten != padded.Len() {
+ return fmt.Errorf("short write writing configuration to disk image: %d != %d", nWritten, padded.Len())
+ }
+ offset, err := imageFile.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return fmt.Errorf("preparing mark end of disk image: %w", err)
+ }
+ if err = imageFile.Truncate(offset); err != nil {
+ return fmt.Errorf("marking end of disk image: %w", err)
+ }
+ return nil
+}
diff --git a/internal/mkcw/workload_test.go b/internal/mkcw/workload_test.go
new file mode 100644
index 0000000..2de766f
--- /dev/null
+++ b/internal/mkcw/workload_test.go
@@ -0,0 +1,62 @@
+package mkcw
+
+import (
+ "crypto/rand"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadWriteWorkloadConfig(t *testing.T) {
+ // Create a temporary file to stand in for a disk image.
+ temp := filepath.Join(t.TempDir(), "disk.img")
+ f, err := os.OpenFile(temp, os.O_CREATE|os.O_RDWR, 0o600)
+ require.NoError(t, err)
+ err = f.Truncate(0x1000000)
+ require.NoError(t, err)
+ defer f.Close()
+
+ // Generate a random "encoded workload config".
+ workloadConfig := make([]byte, 0x100)
+ n, err := rand.Read(workloadConfig)
+ require.NoError(t, err)
+ require.Equal(t, len(workloadConfig), n)
+
+ // Read the size of our temporary file.
+ st, err := f.Stat()
+ require.NoError(t, err)
+ originalSize := st.Size()
+
+ // Should get an error, since there's no workloadConfig in there to read.
+ _, err = ReadWorkloadConfigFromImage(f.Name())
+ require.Error(t, err)
+
+ // File should grow, even though we looked for an old config to overwrite.
+ err = WriteWorkloadConfigToImage(f, workloadConfig, true)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Greater(t, st.Size(), originalSize)
+ originalSize = st.Size()
+
+ // File shouldn't grow, even overwriting the config with a slightly larger one.
+ err = WriteWorkloadConfigToImage(f, append([]byte("slightly longer"), workloadConfig...), true)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Equal(t, originalSize, st.Size())
+ originalSize = st.Size()
+
+ // File should grow if we're not trying to replace an old one config with a new one.
+ err = WriteWorkloadConfigToImage(f, []byte("{\"comment\":\"quite a bit shorter\"}"), false)
+ require.NoError(t, err)
+ st, err = f.Stat()
+ require.NoError(t, err)
+ require.Greater(t, st.Size(), originalSize)
+
+ // Should read successfully.
+ _, err = ReadWorkloadConfigFromImage(f.Name())
+ require.NoError(t, err)
+}
diff --git a/internal/parse/parse.go b/internal/parse/parse.go
new file mode 100644
index 0000000..89ff7d3
--- /dev/null
+++ b/internal/parse/parse.go
@@ -0,0 +1,79 @@
+package parse
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/common/pkg/parse"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// ValidateVolumeMountHostDir validates the host path of buildah --volume
+func ValidateVolumeMountHostDir(hostDir string) error {
+ if !filepath.IsAbs(hostDir) {
+ return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir)
+ }
+ if _, err := os.Stat(hostDir); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RevertEscapedColon converts "\:" to ":"
+func RevertEscapedColon(source string) string {
+ return strings.ReplaceAll(source, "\\:", ":")
+}
+
+// SplitStringWithColonEscape splits string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator
+func SplitStringWithColonEscape(str string) []string {
+ result := make([]string, 0, 3)
+ sb := &strings.Builder{}
+ for idx, r := range str {
+ if r == ':' {
+ // the colon is backslash-escaped
+ if idx-1 > 0 && str[idx-1] == '\\' {
+ sb.WriteRune(r)
+ } else {
+ // os.Stat will fail if path contains escaped colon
+ result = append(result, RevertEscapedColon(sb.String()))
+ sb.Reset()
+ }
+ } else {
+ sb.WriteRune(r)
+ }
+ }
+ if sb.Len() > 0 {
+ result = append(result, RevertEscapedColon(sb.String()))
+ }
+ return result
+}
+
+// Volume parses the input of --volume
+func Volume(volume string) (specs.Mount, error) {
+ mount := specs.Mount{}
+ arr := SplitStringWithColonEscape(volume)
+ if len(arr) < 2 {
+ return mount, fmt.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
+ }
+ if err := ValidateVolumeMountHostDir(arr[0]); err != nil {
+ return mount, err
+ }
+ if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil {
+ return mount, err
+ }
+ mountOptions := ""
+ if len(arr) > 2 {
+ mountOptions = arr[2]
+ if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil {
+ return mount, err
+ }
+ }
+ mountOpts := strings.Split(mountOptions, ",")
+ mount.Source = arr[0]
+ mount.Destination = arr[1]
+ mount.Type = "rbind"
+ mount.Options = mountOpts
+ return mount, nil
+}
diff --git a/internal/source/add.go b/internal/source/add.go
new file mode 100644
index 0000000..8363c62
--- /dev/null
+++ b/internal/source/add.go
@@ -0,0 +1,133 @@
+package source
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// AddOptions include data to alter certain knobs when adding a source artifact
+// to a source image.
+type AddOptions struct {
+ // Annotations for the source artifact.
+ Annotations []string
+}
+
+// annotations parses the specified annotations and transforms them into a map.
+// A given annotation can be specified only once.
+func (o *AddOptions) annotations() (map[string]string, error) {
+ annotations := make(map[string]string)
+
+ for _, unparsed := range o.Annotations {
+ parsed := strings.SplitN(unparsed, "=", 2)
+ if len(parsed) != 2 {
+ return nil, fmt.Errorf("invalid annotation %q (expected format is \"key=value\")", unparsed)
+ }
+ if _, exists := annotations[parsed[0]]; exists {
+ return nil, fmt.Errorf("annotation %q specified more than once", parsed[0])
+ }
+ annotations[parsed[0]] = parsed[1]
+ }
+
+ return annotations, nil
+}
+
+// Add adds the specified source artifact at `artifactPath` to the source image
+// at `sourcePath`. Note that the artifact will be added as a gzip-compressed
+// tar ball. Add attempts to auto-tar and auto-compress only if necessary.
+func Add(ctx context.Context, sourcePath string, artifactPath string, options AddOptions) error {
+ // Let's first make sure `sourcePath` exists and that we can access it.
+ if _, err := os.Stat(sourcePath); err != nil {
+ return err
+ }
+
+ annotations, err := options.annotations()
+ if err != nil {
+ return err
+ }
+
+ ociDest, err := openOrCreateSourceImage(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ defer ociDest.Close()
+
+ tarStream, err := archive.TarWithOptions(artifactPath, &archive.TarOptions{Compression: archive.Gzip})
+ if err != nil {
+ return fmt.Errorf("creating compressed tar stream: %w", err)
+ }
+
+ info := types.BlobInfo{
+ Size: -1, // "unknown": we'll get that information *after* adding
+ }
+ addedBlob, err := ociDest.PutBlob(ctx, tarStream, info, nil, false)
+ if err != nil {
+ return fmt.Errorf("adding source artifact: %w", err)
+ }
+
+ // Add the new layers to the source image's manifest.
+ manifest, oldManifestDigest, _, err := readManifestFromOCIPath(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ manifest.Layers = append(manifest.Layers,
+ specV1.Descriptor{
+ MediaType: specV1.MediaTypeImageLayerGzip,
+ Digest: addedBlob.Digest,
+ Size: addedBlob.Size,
+ Annotations: annotations,
+ },
+ )
+ manifestDigest, manifestSize, err := writeManifest(ctx, manifest, ociDest)
+ if err != nil {
+ return err
+ }
+
+ // Now, as we've written the updated manifest, we can delete the
+ // previous one. `types.ImageDestination` doesn't expose a high-level
+ // API to manage multi-manifest destination, so we need to do it
+ // manually. Not an issue, since paths are predictable for an OCI
+ // layout.
+ if err := removeBlob(oldManifestDigest, sourcePath); err != nil {
+ return fmt.Errorf("removing old manifest: %w", err)
+ }
+
+ manifestDescriptor := specV1.Descriptor{
+ MediaType: specV1.MediaTypeImageManifest,
+ Digest: *manifestDigest,
+ Size: manifestSize,
+ }
+ if err := updateIndexWithNewManifestDescriptor(&manifestDescriptor, sourcePath); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func updateIndexWithNewManifestDescriptor(manifest *specV1.Descriptor, sourcePath string) error {
+ index := specV1.Index{}
+ indexPath := filepath.Join(sourcePath, "index.json")
+
+ rawData, err := os.ReadFile(indexPath)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(rawData, &index); err != nil {
+ return err
+ }
+
+ index.Manifests = []specV1.Descriptor{*manifest}
+ rawData, err = json.Marshal(&index)
+ if err != nil {
+ return err
+ }
+
+ return os.WriteFile(indexPath, rawData, 0644)
+}
diff --git a/internal/source/create.go b/internal/source/create.go
new file mode 100644
index 0000000..c335cd0
--- /dev/null
+++ b/internal/source/create.go
@@ -0,0 +1,70 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ spec "github.com/opencontainers/image-spec/specs-go"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// CreateOptions includes data to alter certain knobs when creating a source
+// image.
+type CreateOptions struct {
+ // Author is the author of the source image.
+ Author string
+ // TimeStamp controls whether a "created" timestamp is set or not.
+ TimeStamp bool
+}
+
+// createdTime returns `time.Now()` if the options are configured to include a
+// time stamp.
+func (o *CreateOptions) createdTime() *time.Time {
+ if !o.TimeStamp {
+ return nil
+ }
+ now := time.Now()
+ return &now
+}
+
+// Create creates an empty source image at the specified `sourcePath`. Note
+// that `sourcePath` must not exist.
+func Create(ctx context.Context, sourcePath string, options CreateOptions) error {
+ if _, err := os.Stat(sourcePath); err == nil {
+ return fmt.Errorf("creating source image: %q already exists", sourcePath)
+ }
+
+ ociDest, err := openOrCreateSourceImage(ctx, sourcePath)
+ if err != nil {
+ return err
+ }
+ defer ociDest.Close()
+
+ // Create and add a config.
+ config := ImageConfig{
+ Author: options.Author,
+ Created: options.createdTime(),
+ }
+ configBlob, err := addConfig(ctx, &config, ociDest)
+ if err != nil {
+ return err
+ }
+
+ // Create and write the manifest.
+ manifest := specV1.Manifest{
+ Versioned: spec.Versioned{SchemaVersion: 2},
+ MediaType: specV1.MediaTypeImageManifest,
+ Config: specV1.Descriptor{
+ MediaType: MediaTypeSourceImageConfig,
+ Digest: configBlob.Digest,
+ Size: configBlob.Size,
+ },
+ }
+ if _, _, err := writeManifest(ctx, &manifest, ociDest); err != nil {
+ return err
+ }
+
+ return ociDest.Commit(ctx, nil)
+}
diff --git a/internal/source/pull.go b/internal/source/pull.go
new file mode 100644
index 0000000..f8743dd
--- /dev/null
+++ b/internal/source/pull.go
@@ -0,0 +1,110 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/shortnames"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
+)
+
+// PullOptions includes data to alter certain knobs when pulling a source
+// image.
+type PullOptions struct {
+ // Require HTTPS and verify certificates when accessing the registry.
+ TLSVerify bool
+ // [username[:password] to use when connecting to the registry.
+ Credentials string
+ // Quiet the progress bars when pushing.
+ Quiet bool
+}
+
+// Pull `imageInput` from a container registry to `sourcePath`.
+func Pull(ctx context.Context, imageInput string, sourcePath string, options PullOptions) error {
+ if _, err := os.Stat(sourcePath); err == nil {
+ return fmt.Errorf("%q already exists", sourcePath)
+ }
+
+ srcRef, err := stringToImageReference(imageInput)
+ if err != nil {
+ return err
+ }
+ destRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return err
+ }
+
+ sysCtx := &types.SystemContext{
+ DockerInsecureSkipTLSVerify: types.NewOptionalBool(!options.TLSVerify),
+ }
+ if options.Credentials != "" {
+ authConf, err := parse.AuthConfig(options.Credentials)
+ if err != nil {
+ return err
+ }
+ sysCtx.DockerAuthConfig = authConf
+ }
+
+ if err := validateSourceImageReference(ctx, srcRef, sysCtx); err != nil {
+ return err
+ }
+
+ policy, err := signature.DefaultPolicy(sysCtx)
+ if err != nil {
+ return fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("creating new signature policy context: %w", err)
+ }
+
+ copyOpts := copy.Options{
+ SourceCtx: sysCtx,
+ }
+ if !options.Quiet {
+ copyOpts.ReportWriter = os.Stderr
+ }
+ if _, err := copy.Image(ctx, policyContext, destRef, srcRef, &copyOpts); err != nil {
+ return fmt.Errorf("pulling source image: %w", err)
+ }
+
+ return nil
+}
+
+func stringToImageReference(imageInput string) (types.ImageReference, error) {
+ if shortnames.IsShortName(imageInput) {
+ return nil, fmt.Errorf("pulling source images by short name (%q) is not supported, please use a fully-qualified name", imageInput)
+ }
+
+ ref, err := alltransports.ParseImageName("docker://" + imageInput)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image name: %w", err)
+ }
+
+ return ref, nil
+}
+
+func validateSourceImageReference(ctx context.Context, ref types.ImageReference, sysCtx *types.SystemContext) error {
+ src, err := ref.NewImageSource(ctx, sysCtx)
+ if err != nil {
+ return fmt.Errorf("creating image source from reference: %w", err)
+ }
+ defer src.Close()
+
+ ociManifest, _, _, err := readManifestFromImageSource(ctx, src)
+ if err != nil {
+ return err
+ }
+
+ if ociManifest.Config.MediaType != MediaTypeSourceImageConfig {
+ return fmt.Errorf("invalid media type of image config %q (expected: %q)", ociManifest.Config.MediaType, MediaTypeSourceImageConfig)
+ }
+
+ return nil
+}
diff --git a/internal/source/push.go b/internal/source/push.go
new file mode 100644
index 0000000..799912c
--- /dev/null
+++ b/internal/source/push.go
@@ -0,0 +1,69 @@
+package source
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// PushOptions includes data to alter certain knobs when pushing a source
+// image.
+type PushOptions struct {
+ // Require HTTPS and verify certificates when accessing the registry.
+ TLSVerify bool
+ // [username[:password] to use when connecting to the registry.
+ Credentials string
+ // Quiet the progress bars when pushing.
+ Quiet bool
+}
+
+// Push the source image at `sourcePath` to `imageInput` at a container
+// registry.
+func Push(ctx context.Context, sourcePath string, imageInput string, options PushOptions) error {
+ srcRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return err
+ }
+ destRef, err := stringToImageReference(imageInput)
+ if err != nil {
+ return err
+ }
+
+ sysCtx := &types.SystemContext{
+ DockerInsecureSkipTLSVerify: types.NewOptionalBool(!options.TLSVerify),
+ }
+ if options.Credentials != "" {
+ authConf, err := parse.AuthConfig(options.Credentials)
+ if err != nil {
+ return err
+ }
+ sysCtx.DockerAuthConfig = authConf
+ }
+
+ policy, err := signature.DefaultPolicy(sysCtx)
+ if err != nil {
+ return fmt.Errorf("obtaining default signature policy: %w", err)
+ }
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return fmt.Errorf("creating new signature policy context: %w", err)
+ }
+
+ copyOpts := &copy.Options{
+ DestinationCtx: sysCtx,
+ }
+ if !options.Quiet {
+ copyOpts.ReportWriter = os.Stderr
+ }
+ if _, err := copy.Image(ctx, policyContext, destRef, srcRef, copyOpts); err != nil {
+ return fmt.Errorf("pushing source image: %w", err)
+ }
+
+ return nil
+}
diff --git a/internal/source/source.go b/internal/source/source.go
new file mode 100644
index 0000000..b44a903
--- /dev/null
+++ b/internal/source/source.go
@@ -0,0 +1,121 @@
+package source
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ specV1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// MediaTypeSourceImageConfig specifies the media type of a source-image config.
+const MediaTypeSourceImageConfig = "application/vnd.oci.source.image.config.v1+json"
+
+// ImageConfig specifies the config of a source image.
+type ImageConfig struct {
+ // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6.
+ Created *time.Time `json:"created,omitempty"`
+
+ // Author is the author of the source image.
+ Author string `json:"author,omitempty"`
+}
+
+// writeManifest writes the specified OCI `manifest` to the source image at
+// `ociDest`.
+func writeManifest(ctx context.Context, manifest *specV1.Manifest, ociDest types.ImageDestination) (*digest.Digest, int64, error) {
+ rawData, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, -1, fmt.Errorf("marshalling manifest: %w", err)
+ }
+
+ if err := ociDest.PutManifest(ctx, rawData, nil); err != nil {
+ return nil, -1, fmt.Errorf("writing manifest: %w", err)
+ }
+
+ manifestDigest := digest.FromBytes(rawData)
+ return &manifestDigest, int64(len(rawData)), nil
+}
+
+// readManifestFromImageSource reads the manifest from the specified image
+// source. Note that the manifest is expected to be an OCI v1 manifest.
+func readManifestFromImageSource(ctx context.Context, src types.ImageSource) (*specV1.Manifest, *digest.Digest, int64, error) {
+ rawData, mimeType, err := src.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ if mimeType != specV1.MediaTypeImageManifest {
+ return nil, nil, -1, fmt.Errorf("image %q is of type %q (expected: %q)", strings.TrimPrefix(src.Reference().StringWithinTransport(), "//"), mimeType, specV1.MediaTypeImageManifest)
+ }
+
+ manifest := specV1.Manifest{}
+ if err := json.Unmarshal(rawData, &manifest); err != nil {
+ return nil, nil, -1, fmt.Errorf("reading manifest: %w", err)
+ }
+
+ manifestDigest := digest.FromBytes(rawData)
+ return &manifest, &manifestDigest, int64(len(rawData)), nil
+}
+
+// readManifestFromOCIPath returns the manifest of the specified source image
+// at `sourcePath` along with its digest. The digest can later on be used to
+// locate the manifest on the file system.
+func readManifestFromOCIPath(ctx context.Context, sourcePath string) (*specV1.Manifest, *digest.Digest, int64, error) {
+ ociRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return nil, nil, -1, err
+ }
+
+ ociSource, err := ociRef.NewImageSource(ctx, &types.SystemContext{})
+ if err != nil {
+ return nil, nil, -1, err
+ }
+ defer ociSource.Close()
+
+ return readManifestFromImageSource(ctx, ociSource)
+}
+
+// openOrCreateSourceImage returns an OCI types.ImageDestination of the the
+// specified `sourcePath`. Note that if the path doesn't exist, it'll be
+// created along with the OCI directory layout.
+func openOrCreateSourceImage(ctx context.Context, sourcePath string) (types.ImageDestination, error) {
+ ociRef, err := layout.ParseReference(sourcePath)
+ if err != nil {
+ return nil, err
+ }
+
+ // This will implicitly create an OCI directory layout at `path`.
+ return ociRef.NewImageDestination(ctx, &types.SystemContext{})
+}
+
+// addConfig adds `config` to `ociDest` and returns the corresponding blob
+// info.
+func addConfig(ctx context.Context, config *ImageConfig, ociDest types.ImageDestination) (*types.BlobInfo, error) {
+ rawData, err := json.Marshal(config)
+ if err != nil {
+ return nil, fmt.Errorf("marshalling config: %w", err)
+ }
+
+ info := types.BlobInfo{
+ Size: -1, // "unknown": we'll get that information *after* adding
+ }
+ addedBlob, err := ociDest.PutBlob(ctx, bytes.NewReader(rawData), info, nil, true)
+ if err != nil {
+ return nil, fmt.Errorf("adding config: %w", err)
+ }
+
+ return &addedBlob, nil
+}
+
+// removeBlob removes the specified `blob` from the source image at `sourcePath`.
+func removeBlob(blob *digest.Digest, sourcePath string) error {
+ blobPath := filepath.Join(filepath.Join(sourcePath, "blobs/sha256"), blob.Encoded())
+ return os.Remove(blobPath)
+}
diff --git a/internal/tmpdir/tmpdir.go b/internal/tmpdir/tmpdir.go
new file mode 100644
index 0000000..ff966b2
--- /dev/null
+++ b/internal/tmpdir/tmpdir.go
@@ -0,0 +1,26 @@
+package tmpdir
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/sirupsen/logrus"
+)
+
+// GetTempDir returns the path of the preferred temporary directory on the host.
+func GetTempDir() string {
+ if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
+ abs, err := filepath.Abs(tmpdir)
+ if err == nil {
+ return abs
+ }
+ logrus.Warnf("ignoring TMPDIR from environment, evaluating it: %v", err)
+ }
+ if containerConfig, err := config.Default(); err == nil {
+ if tmpdir, err := containerConfig.ImageCopyTmpDir(); err == nil {
+ return tmpdir
+ }
+ }
+ return "/var/tmp"
+}
diff --git a/internal/tmpdir/tmpdir_test.go b/internal/tmpdir/tmpdir_test.go
new file mode 100644
index 0000000..ea7d673
--- /dev/null
+++ b/internal/tmpdir/tmpdir_test.go
@@ -0,0 +1,58 @@
+package tmpdir
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/common/pkg/config"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetTempDir(t *testing.T) {
+ // test default
+ err := os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+ err = os.Setenv("CONTAINERS_CONF", "/dev/null")
+ require.NoError(t, err)
+ tmpdir := GetTempDir()
+ assert.Equal(t, "/var/tmp", tmpdir)
+
+ // test TMPDIR Environment
+ err = os.Setenv("TMPDIR", "/tmp/bogus")
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.Equal(t, tmpdir, "/tmp/bogus")
+ err = os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+
+ // relative TMPDIR should be automatically converted to absolute
+ err = os.Setenv("TMPDIR", ".")
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.True(t, filepath.IsAbs(tmpdir), "path from GetTempDir should always be absolute")
+ err = os.Unsetenv("TMPDIR")
+ require.NoError(t, err)
+
+ f, err := os.CreateTemp("", "containers.conf-")
+ require.NoError(t, err)
+ // close and remove the temporary file at the end of the program
+ defer f.Close()
+ defer os.Remove(f.Name())
+ data := []byte("[engine]\nimage_copy_tmp_dir=\"/mnt\"\n")
+ _, err = f.Write(data)
+ require.NoError(t, err)
+
+ err = os.Setenv("CONTAINERS_CONF", f.Name())
+ require.NoError(t, err)
+ // force config reset of default containers.conf
+ options := config.Options{
+ SetDefault: true,
+ }
+ _, err = config.New(&options)
+ require.NoError(t, err)
+ tmpdir = GetTempDir()
+ assert.Equal(t, "/mnt", tmpdir)
+
+}
diff --git a/internal/types.go b/internal/types.go
new file mode 100644
index 0000000..ee87eca
--- /dev/null
+++ b/internal/types.go
@@ -0,0 +1,18 @@
+package internal
+
+const (
+ // Temp directory which stores external artifacts which are download for a build.
+ // Example: tar files from external sources.
+ BuildahExternalArtifactsDir = "buildah-external-artifacts"
+)
+
+// Types is internal packages are suspected to change with releases avoid using these outside of buildah
+
+// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor
+// StageExecutor has ability to mount stages/images in current context and
+// automatically clean them up.
+type StageMountDetails struct {
+ DidExecute bool // tells if the stage which is being mounted was freshly executed or was part of older cache
+ IsStage bool // tells if mountpoint returned from stage executor is stage or image
+ MountPoint string // mountpoint of stage/image
+}
diff --git a/internal/util/util.go b/internal/util/util.go
new file mode 100644
index 0000000..01f4b10
--- /dev/null
+++ b/internal/util/util.go
@@ -0,0 +1,99 @@
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/common/libimage"
+ lplatform "github.com/containers/common/libimage/platform"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/unshare"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// LookupImage returns *Image to corresponding imagename or id
+func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) {
+ systemContext := ctx
+ if systemContext == nil {
+ systemContext = &types.SystemContext{}
+ }
+ runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
+ if err != nil {
+ return nil, err
+ }
+ localImage, _, err := runtime.LookupImage(image, nil)
+ if err != nil {
+ return nil, err
+ }
+ return localImage, nil
+}
+
+// NormalizePlatform validates and translate the platform to the canonical value.
+//
+// For example, if "Aarch64" is encountered, we change it to "arm64" or if
+// "x86_64" is encountered, it becomes "amd64".
+//
+// Wrapper around libimage.NormalizePlatform to return and consume
+// v1.Platform instead of independent os, arch and variant.
+func NormalizePlatform(platform v1.Platform) v1.Platform {
+ os, arch, variant := lplatform.Normalize(platform.OS, platform.Architecture, platform.Variant)
+ return v1.Platform{
+ OS: os,
+ Architecture: arch,
+ Variant: variant,
+ }
+}
+
+// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout.
+func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
+ var err error
+ if !filepath.IsAbs(opts.Path) {
+ opts.Path, err = filepath.Abs(opts.Path)
+ if err != nil {
+ return err
+ }
+ }
+ if opts.IsDir {
+ // In order to keep this feature as close as possible to
+ // buildkit it was decided to preserve ownership when
+ // invoked as root since caller already has access to artifacts
+ // therefore we can preserve ownership as is, however for rootless users
+ // ownership has to be changed so exported artifacts can still
+ // be accessible by unprivileged users.
+ // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
+ noLChown := false
+ if unshare.IsRootless() {
+ noLChown = true
+ }
+
+ err = os.MkdirAll(opts.Path, 0700)
+ if err != nil {
+ return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err)
+ }
+
+ err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown})
+ if err != nil {
+ return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err)
+ }
+ } else {
+ outFile := os.Stdout
+ if !opts.IsStdout {
+ outFile, err = os.Create(opts.Path)
+ if err != nil {
+ return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err)
+ }
+ defer outFile.Close()
+ }
+ _, err = io.Copy(outFile, input)
+ if err != nil {
+ return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err)
+ }
+ }
+ return nil
+}
diff --git a/internal/volumes/volumes.go b/internal/volumes/volumes.go
new file mode 100644
index 0000000..a79b8df
--- /dev/null
+++ b/internal/volumes/volumes.go
@@ -0,0 +1,637 @@
+package volumes
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "errors"
+
+ "github.com/containers/buildah/define"
+ "github.com/containers/buildah/internal"
+ internalParse "github.com/containers/buildah/internal/parse"
+ "github.com/containers/buildah/internal/tmpdir"
+ internalUtil "github.com/containers/buildah/internal/util"
+ "github.com/containers/common/pkg/parse"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/unshare"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
+
+const (
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+ // TypeCache is the type for mounting a common persistent cache from host
+ TypeCache = "cache"
+ // mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
+ // Lifecycle of following directory will be inherited from how host machine treats temporary directory
+ buildahCacheDir = "buildah-cache"
+ // mount=type=cache allows users to lock a cache store while its being used by another build
+ BuildahCacheLockfile = "buildah-cache-lockfile"
+ // All the lockfiles are stored in a separate directory inside `BuildahCacheDir`
+ // Example `/var/tmp/buildah-cache/<target>/buildah-cache-lockfile`
+ BuildahCacheLockfileDir = "buildah-cache-lockfiles"
+)
+
+var (
+ errBadMntOption = errors.New("invalid mount option")
+ errBadOptionArg = errors.New("must provide an argument for option")
+ errBadVolDest = errors.New("must set volume destination")
+ errBadVolSrc = errors.New("must set volume source")
+ errDuplicateDest = errors.New("duplicate mount destination")
+)
+
+// CacheParent returns a cache parent for --mount=type=cache
+func CacheParent() string {
+ return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
+}
+
+// GetBindMount parses a single bind mount entry from the --mount flag.
+// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
+// Caller is expected to perform unmount of any mounted images
+func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) {
+ newMount := specs.Mount{
+ Type: define.TypeBind,
+ }
+
+ setRelabel := false
+ mountReadability := false
+ setDest := false
+ bindNonRecursive := false
+ fromImage := ""
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "bind-nonrecursive":
+ newMount.Options = append(newMount.Options, "bind")
+ bindNonRecursive = true
+ case "ro", "nosuid", "nodev", "noexec":
+ // TODO: detect duplication of these options.
+ // (Is this necessary?)
+ newMount.Options = append(newMount.Options, kv[0])
+ mountReadability = true
+ case "rw", "readwrite":
+ newMount.Options = append(newMount.Options, "rw")
+ mountReadability = true
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ mountReadability = true
+ case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "from":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ fromImage = kv[1]
+ case "bind-propagation":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, kv[1])
+ case "src", "source":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Source = kv[1]
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ targetPath := kv[1]
+ if !path.IsAbs(targetPath) {
+ targetPath = filepath.Join(workDir, targetPath)
+ }
+ if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
+ return newMount, "", err
+ }
+ newMount.Destination = targetPath
+ setDest = true
+ case "relabel":
+ if setRelabel {
+ return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
+ }
+ setRelabel = true
+ if len(kv) != 2 {
+ return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
+ }
+ switch kv[1] {
+ case "private":
+ newMount.Options = append(newMount.Options, "Z")
+ case "shared":
+ newMount.Options = append(newMount.Options, "z")
+ default:
+ return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
+ }
+ case "consistency":
+ // Option for OS X only, has no meaning on other platforms
+ // and can thus be safely ignored.
+ // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
+ default:
+ return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ // default mount readability is always readonly
+ if !mountReadability {
+ newMount.Options = append(newMount.Options, "ro")
+ }
+
+ // Following variable ensures that we return imagename only if we did additional mount
+ isImageMounted := false
+ if fromImage != "" {
+ mountPoint := ""
+ if additionalMountPoints != nil {
+ if val, ok := additionalMountPoints[fromImage]; ok {
+ mountPoint = val.MountPoint
+ }
+ }
+ // if mountPoint of image was not found in additionalMap
+ // or additionalMap was nil, try mounting image
+ if mountPoint == "" {
+ image, err := internalUtil.LookupImage(ctx, store, fromImage)
+ if err != nil {
+ return newMount, "", err
+ }
+
+ mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
+ if err != nil {
+ return newMount, "", err
+ }
+ isImageMounted = true
+ }
+ contextDir = mountPoint
+ }
+
+ // buildkit parity: default bind option must be `rbind`
+ // unless specified
+ if !bindNonRecursive {
+ newMount.Options = append(newMount.Options, "rbind")
+ }
+
+ if !setDest {
+ return newMount, fromImage, errBadVolDest
+ }
+
+ // buildkit parity: support absolute path for sources from current build context
+ if contextDir != "" {
+ // path should be /contextDir/specified path
+ newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source))
+ } else {
+ // looks like its coming from `build run --mount=type=bind` allow using absolute path
+ // error out if no source is set
+ if newMount.Source == "" {
+ return newMount, "", errBadVolSrc
+ }
+ if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
+ return newMount, "", err
+ }
+ }
+
+ opts, err := parse.ValidateVolumeOpts(newMount.Options)
+ if err != nil {
+ return newMount, fromImage, err
+ }
+ newMount.Options = opts
+
+ if !isImageMounted {
+ // we don't want any cleanups if image was not mounted explicitly
+ // so dont return anything
+ fromImage = ""
+ }
+
+ return newMount, fromImage, nil
+}
+
+// GetCacheMount parses a single cache mount entry from the --mount flag.
+//
+// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
+func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
+ var err error
+ var mode uint64
+ var buildahLockFilesDir string
+ var (
+ setDest bool
+ setShared bool
+ setReadOnly bool
+ foundSElinuxLabel bool
+ )
+ fromStage := ""
+ newMount := specs.Mount{
+ Type: define.TypeBind,
+ }
+ // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
+ id := ""
+ //buidkit parity: cache directory defaults to 755
+ mode = 0o755
+ //buidkit parity: cache directory defaults to uid 0 if not specified
+ uid := 0
+ //buidkit parity: cache directory defaults to gid 0 if not specified
+ gid := 0
+ // sharing mode
+ sharing := "shared"
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "nosuid", "nodev", "noexec":
+ // TODO: detect duplication of these options.
+ // (Is this necessary?)
+ newMount.Options = append(newMount.Options, kv[0])
+ case "rw", "readwrite":
+ newMount.Options = append(newMount.Options, "rw")
+ case "readonly", "ro":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ setReadOnly = true
+ case "Z", "z":
+ newMount.Options = append(newMount.Options, kv[0])
+ foundSElinuxLabel = true
+ case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U":
+ newMount.Options = append(newMount.Options, kv[0])
+ setShared = true
+ case "sharing":
+ sharing = kv[1]
+ case "bind-propagation":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, kv[1])
+ case "id":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ id = kv[1]
+ case "from":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ fromStage = kv[1]
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ targetPath := kv[1]
+ if !path.IsAbs(targetPath) {
+ targetPath = filepath.Join(workDir, targetPath)
+ }
+ if err := parse.ValidateVolumeCtrDir(targetPath); err != nil {
+ return newMount, nil, err
+ }
+ newMount.Destination = targetPath
+ setDest = true
+ case "src", "source":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Source = kv[1]
+ case "mode":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ mode, err = strconv.ParseUint(kv[1], 8, 32)
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
+ }
+ case "uid":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ uid, err = strconv.Atoi(kv[1])
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
+ }
+ case "gid":
+ if len(kv) == 1 {
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ gid, err = strconv.Atoi(kv[1])
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
+ }
+ default:
+ return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ // If selinux is enabled and no selinux option was configured
+ // default to `z` i.e shared content label.
+ if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" {
+ newMount.Options = append(newMount.Options, "z")
+ }
+
+ if !setDest {
+ return newMount, nil, errBadVolDest
+ }
+
+ if fromStage != "" {
+ // do not create cache on host
+ // instead use read-only mounted stage as cache
+ mountPoint := ""
+ if additionalMountPoints != nil {
+ if val, ok := additionalMountPoints[fromStage]; ok {
+ if val.IsStage {
+ mountPoint = val.MountPoint
+ }
+ }
+ }
+ // Cache does not supports using image so if not stage found
+ // return with error
+ if mountPoint == "" {
+ return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
+ }
+ // path should be /contextDir/specified path
+ newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
+ } else {
+ // we need to create cache on host if no image is being used
+
+ // since type is cache and cache can be reused by consecutive builds
+ // create a common cache directory, which persists on hosts within temp lifecycle
+ // add subdirectory if specified
+
+ // cache parent directory: creates separate cache parent for each user.
+ cacheParent := CacheParent()
+ // create cache on host if not present
+ err = os.MkdirAll(cacheParent, os.FileMode(0755))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
+ }
+
+ if id != "" {
+ newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
+ buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id))
+ } else {
+ newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
+ buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination))
+ }
+ idPair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ //buildkit parity: change uid and gid if specified otheriwise keep `0`
+ err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
+ }
+
+ // create a subdirectory inside `cacheParent` just to store lockfiles
+ buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
+ err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
+ }
+ }
+
+ var targetLock *lockfile.LockFile // = nil
+ succeeded := false
+ defer func() {
+ if !succeeded && targetLock != nil {
+ targetLock.Unlock()
+ }
+ }()
+ switch sharing {
+ case "locked":
+ // lock parent cache
+ lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile))
+ if err != nil {
+ return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
+ }
+ // Will be unlocked after the RUN step is executed.
+ lockfile.Lock()
+ targetLock = lockfile
+ case "shared":
+ // do nothing since default is `shared`
+ break
+ default:
+ // error out for unknown values
+ return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
+ }
+
+ // buildkit parity: default sharing should be shared
+ // unless specified
+ if !setShared {
+ newMount.Options = append(newMount.Options, "shared")
+ }
+
+ // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly
+ if !setReadOnly {
+ newMount.Options = append(newMount.Options, "rw")
+ }
+
+ newMount.Options = append(newMount.Options, "bind")
+
+ opts, err := parse.ValidateVolumeOpts(newMount.Options)
+ if err != nil {
+ return newMount, nil, err
+ }
+ newMount.Options = opts
+
+ succeeded = true
+ return newMount, targetLock, nil
+}
+
+func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
+ finalVolumeMounts := make(map[string]specs.Mount)
+
+ for _, volume := range volumes {
+ volumeMount, err := internalParse.Volume(volume)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
+ return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
+ }
+ finalVolumeMounts[volumeMount.Destination] = volumeMount
+ }
+ return finalVolumeMounts, nil
+}
+
+// UnlockLockArray is a helper for cleaning up after GetVolumes and the like.
+func UnlockLockArray(locks []*lockfile.LockFile) {
+ for _, lock := range locks {
+ lock.Unlock()
+ }
+}
+
+// GetVolumes gets the volumes from --volume and --mount
+//
+// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
+func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) {
+ unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ UnlockLockArray(targetLocks)
+ }
+ }()
+ volumeMounts, err := getVolumeMounts(volumes)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ for dest, mount := range volumeMounts {
+ if _, ok := unifiedMounts[dest]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest)
+ }
+ unifiedMounts[dest] = mount
+ }
+
+ finalMounts := make([]specs.Mount, 0, len(unifiedMounts))
+ for _, mount := range unifiedMounts {
+ finalMounts = append(finalMounts, mount)
+ }
+ succeeded = true
+ return finalMounts, mountedImages, targetLocks, nil
+}
+
+// getMounts takes user-provided input from the --mount flag and creates OCI
+// spec mounts.
+// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
+// buildah run --mount type=tmpfs,target=/dev/shm ...
+//
+// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??).
+func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) {
+ // If `type` is not set default to "bind"
+ mountType := define.TypeBind
+ finalMounts := make(map[string]specs.Mount)
+ mountedImages := make([]string, 0)
+ targetLocks := make([]*lockfile.LockFile, 0)
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ UnlockLockArray(targetLocks)
+ }
+ }()
+
+ errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
+
+ // TODO(vrothberg): the manual parsing can be replaced with a regular expression
+ // to allow a more robust parsing of the mount format and to give
+ // precise errors regarding supported format versus supported options.
+ for _, mount := range mounts {
+ tokens := strings.Split(mount, ",")
+ if len(tokens) < 2 {
+ return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ }
+ for _, field := range tokens {
+ if strings.HasPrefix(field, "type=") {
+ kv := strings.Split(field, "=")
+ if len(kv) != 2 {
+ return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ }
+ mountType = kv[1]
+ }
+ }
+ switch mountType {
+ case define.TypeBind:
+ mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ mountedImages = append(mountedImages, image)
+ case TypeCache:
+ mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if tl != nil {
+ targetLocks = append(targetLocks, tl)
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ case TypeTmpfs:
+ mount, err := GetTmpfsMount(tokens)
+ if err != nil {
+ return nil, mountedImages, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
+ }
+ finalMounts[mount.Destination] = mount
+ default:
+ return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType)
+ }
+ }
+
+ succeeded = true
+ return finalMounts, mountedImages, targetLocks, nil
+}
+
+// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
+func GetTmpfsMount(args []string) (specs.Mount, error) {
+ newMount := specs.Mount{
+ Type: TypeTmpfs,
+ Source: TypeTmpfs,
+ }
+
+ setDest := false
+
+ for _, val := range args {
+ kv := strings.SplitN(val, "=", 2)
+ switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
+ case "ro", "nosuid", "nodev", "noexec":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
+ case "tmpcopyup":
+ //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
+ newMount.Options = append(newMount.Options, kv[0])
+ case "tmpfs-mode":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
+ case "tmpfs-size":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
+ case "src", "source":
+ return newMount, errors.New("source is not supported with tmpfs mounts")
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Destination = kv[1]
+ setDest = true
+ default:
+ return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
+ }
+ }
+
+ if !setDest {
+ return newMount, errBadVolDest
+ }
+
+ return newMount, nil
+}