summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 16:28:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 16:28:49 +0000
commit3639cda7289cb00a5de837f84dc1b54243bd72e8 (patch)
tree2bb2220e791c493e0f4163ff8a819876bc211e41
parentReleasing progress-linux version 1.52.0+ds1-1~progress7.99u1. (diff)
downloadgolang-github-containers-storage-3639cda7289cb00a5de837f84dc1b54243bd72e8.tar.xz
golang-github-containers-storage-3639cda7289cb00a5de837f84dc1b54243bd72e8.zip
Merging upstream version 1.53.0+ds1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.cirrus.yml2
-rw-r--r--Makefile2
-rw-r--r--VERSION2
-rw-r--r--cmd/containers-storage/create.go2
-rw-r--r--cmd/containers-storage/image.go34
-rw-r--r--docs/containers-storage-get-image-dir.md17
-rw-r--r--docs/containers-storage-get-image-run-dir.md17
-rw-r--r--docs/containers-storage.conf.5.md6
-rw-r--r--drivers/driver.go22
-rw-r--r--drivers/overlay/composefs.go72
-rw-r--r--drivers/overlay/overlay.go245
-rw-r--r--drivers/overlay/overlay_nocgo.go13
-rw-r--r--drivers/vfs/driver.go56
-rw-r--r--go.mod16
-rw-r--r--go.sum32
-rw-r--r--idset_test.go2
-rw-r--r--layers.go126
-rw-r--r--pkg/archive/archive.go35
-rw-r--r--pkg/archive/archive_unix.go25
-rw-r--r--pkg/chunked/cache_linux.go26
-rw-r--r--pkg/chunked/cache_linux_test.go6
-rw-r--r--pkg/chunked/compression_linux.go4
-rw-r--r--pkg/chunked/dump/dump.go24
-rw-r--r--pkg/chunked/storage_linux.go274
-rw-r--r--pkg/chunked/toc/toc.go19
-rw-r--r--pkg/fsverity/fsverity_linux.go45
-rw-r--r--pkg/fsverity/fsverity_unsupported.go21
-rw-r--r--pkg/homedir/homedir_others.go4
-rw-r--r--pkg/homedir/homedir_windows.go29
-rw-r--r--pkg/idtools/idtools_unix.go2
-rw-r--r--storage.conf9
-rw-r--r--store.go172
-rw-r--r--tests/image-dirs.bats39
-rw-r--r--userns.go4
34 files changed, 991 insertions, 413 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 09f4d9e..13bc20e 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- IMAGE_SUFFIX: "c20231208t193858z-f39f38d13"
+ IMAGE_SUFFIX: "c20240102t155643z-f39f38d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
diff --git a/Makefile b/Makefile
index 77189d4..8461c09 100644
--- a/Makefile
+++ b/Makefile
@@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell:
- codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
+ codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
binary local-binary: containers-storage
diff --git a/VERSION b/VERSION
index a63cb35..3f48301 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.52.0
+1.53.0
diff --git a/cmd/containers-storage/create.go b/cmd/containers-storage/create.go
index 682d579..706e6cd 100644
--- a/cmd/containers-storage/create.go
+++ b/cmd/containers-storage/create.go
@@ -230,7 +230,7 @@ func createContainer(flags *mflag.FlagSet, action string, m storage.Store, args
}
fmt.Printf("%s\n", container.ID)
for _, name := range container.Names {
- fmt.Printf("\t%s", name)
+ fmt.Printf("\t%s\n", name)
}
return 0, nil
}
diff --git a/cmd/containers-storage/image.go b/cmd/containers-storage/image.go
index 6851e0c..014c146 100644
--- a/cmd/containers-storage/image.go
+++ b/cmd/containers-storage/image.go
@@ -131,6 +131,24 @@ func getImageBigDataDigest(flags *mflag.FlagSet, action string, m storage.Store,
return 0, nil
}
+func getImageDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
+ path, err := m.ImageDirectory(args[0])
+ if err != nil {
+ return 1, err
+ }
+ fmt.Printf("%s\n", path)
+ return 0, nil
+}
+
+func getImageRunDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
+ path, err := m.ImageRunDirectory(args[0])
+ if err != nil {
+ return 1, err
+ }
+ fmt.Printf("%s\n", path)
+ return 0, nil
+}
+
func setImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
@@ -216,5 +234,21 @@ func init() {
addFlags: func(flags *mflag.FlagSet, cmd *command) {
flags.StringVar(&paramImageDataFile, []string{"-file", "f"}, paramImageDataFile, "Read data from file")
},
+ },
+ command{
+ names: []string{"get-image-dir", "getimagedir"},
+ optionsHelp: "[options [...]] imageNameOrID",
+ usage: "Find the image's associated data directory",
+ action: getImageDir,
+ minArgs: 1,
+ maxArgs: 1,
+ },
+ command{
+ names: []string{"get-image-run-dir", "getimagerundir"},
+ optionsHelp: "[options [...]] imageNameOrID",
+ usage: "Find the image's associated runtime directory",
+ action: getImageRunDir,
+ minArgs: 1,
+ maxArgs: 1,
})
}
diff --git a/docs/containers-storage-get-image-dir.md b/docs/containers-storage-get-image-dir.md
new file mode 100644
index 0000000..3b5e10b
--- /dev/null
+++ b/docs/containers-storage-get-image-dir.md
@@ -0,0 +1,17 @@
+## containers-storage-get-image-dir 1 "January 2024"
+
+## NAME
+containers-storage get-image-dir - Find lookaside directory for an image
+
+## SYNOPSIS
+**containers-storage** **get-image-dir** [*options* [...]] *imageNameOrID*
+
+## DESCRIPTION
+Prints the location of a directory which the caller can use to store lookaside
+information which should be cleaned up when the image is deleted.
+
+## EXAMPLE
+**containers-storage get-image-dir my-image**
+
+## SEE ALSO
+containers-storage-get-image-run-dir(1)
diff --git a/docs/containers-storage-get-image-run-dir.md b/docs/containers-storage-get-image-run-dir.md
new file mode 100644
index 0000000..03196cb
--- /dev/null
+++ b/docs/containers-storage-get-image-run-dir.md
@@ -0,0 +1,17 @@
+## containers-storage-get-image-run-dir 1 "January 2024"
+
+## NAME
+containers-storage get-image-run-dir - Find runtime lookaside directory for an image
+
+## SYNOPSIS
+**containers-storage** **get-image-run-dir** [*options* [...]] *imageNameOrID*
+
+## DESCRIPTION
+Prints the location of a directory which the caller can use to store lookaside
+information which should be cleaned up when the host is rebooted.
+
+## EXAMPLE
+**containers-storage get-image-run-dir my-image**
+
+## SEE ALSO
+containers-storage-get-image-dir(1)
diff --git a/docs/containers-storage.conf.5.md b/docs/containers-storage.conf.5.md
index e5f1019..53113ec 100644
--- a/docs/containers-storage.conf.5.md
+++ b/docs/containers-storage.conf.5.md
@@ -84,7 +84,7 @@ The `storage.options` table supports the following options:
**additionalimagestores**=[]
Paths to additional container image stores. Usually these are read/only and stored on remote network shares.
-**pull_options** = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
+**pull_options** = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
Allows specification of how storage is populated when pulling images. This
option can speed the pulling process of images compressed with format zstd:chunked. Containers/storage looks
@@ -95,7 +95,7 @@ container registry. These options can deduplicate pulling of content, disk
storage of content and can allow the kernel to use less memory when running
containers.
-containers/storage supports three keys
+containers/storage supports four keys
* enable_partial_images="true" | "false"
Tells containers/storage to look for files previously pulled in storage
rather then always pulling them from the container registry.
@@ -107,7 +107,7 @@ containers/storage supports three keys
previously pulled content which can be used when attempting to avoid
pulling content from the container registry
* convert_images = "false" | "true"
- If set to true, containers/storage will convert images to the a format compatible with
+ If set to true, containers/storage will convert images to a format compatible with
partial pulls in order to take advantage of local deduplication and hardlinking. It is an
expensive operation so it is not enabled by default.
diff --git a/drivers/driver.go b/drivers/driver.go
index f71ee69..aa99fde 100644
--- a/drivers/driver.go
+++ b/drivers/driver.go
@@ -196,6 +196,8 @@ type DriverWithDifferOutput struct {
BigData map[string][]byte
TarSplit []byte
TOCDigest digest.Digest
+ // RootDirMode is the mode of the root directory of the layer, if specified.
+ RootDirMode *os.FileMode
// Artifacts is a collection of additional artifacts
// generated by the differ that the storage driver can use.
Artifacts map[string]interface{}
@@ -212,10 +214,26 @@ const (
DifferOutputFormatFlat
)
+type DifferFsVerity int
+
+const (
+ // DifferFsVerityDisabled means no fs-verity is used
+ DifferFsVerityDisabled = iota
+
+ // DifferFsVerityEnabled means fs-verity is used when supported
+ DifferFsVerityEnabled
+
+ // DifferFsVerityRequired means fs-verity is required
+ DifferFsVerityRequired
+)
+
// DifferOptions overrides how the differ work
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
+
+ // UseFsVerity defines whether fs-verity is used
+ UseFsVerity DifferFsVerity
}
// Differ defines the interface for using a custom differ.
@@ -231,8 +249,8 @@ type DriverWithDiffer interface {
// ApplyDiffWithDiffer applies the changes using the callback function.
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
- // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
- ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
+ // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
+ ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the location where files are stored for the layer.
diff --git a/drivers/overlay/composefs.go b/drivers/overlay/composefs.go
index ed9287d..baa9d7b 100644
--- a/drivers/overlay/composefs.go
+++ b/drivers/overlay/composefs.go
@@ -7,15 +7,13 @@ import (
"encoding/binary"
"errors"
"fmt"
- "io/fs"
"os"
"os/exec"
"path/filepath"
"sync"
- "syscall"
- "unsafe"
"github.com/containers/storage/pkg/chunked/dump"
+ "github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/loopback"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -34,72 +32,6 @@ func getComposeFsHelper() (string, error) {
return composeFsHelperPath, composeFsHelperErr
}
-func enableVerity(description string, fd int) error {
- enableArg := unix.FsverityEnableArg{
- Version: 1,
- Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
- Block_size: 4096,
- }
-
- _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
- if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
- return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
- }
- return nil
-}
-
-type verityDigest struct {
- Fsv unix.FsverityDigest
- Buf [64]byte
-}
-
-func measureVerity(description string, fd int) (string, error) {
- var digest verityDigest
- digest.Fsv.Size = 64
- _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
- if e1 != 0 {
- return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
- }
- return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
-}
-
-func enableVerityRecursive(root string) (map[string]string, error) {
- digests := make(map[string]string)
- walkFn := func(path string, d fs.DirEntry, err error) error {
- if err != nil {
- return err
- }
- if !d.Type().IsRegular() {
- return nil
- }
-
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- defer f.Close()
-
- if err := enableVerity(path, int(f.Fd())); err != nil {
- return err
- }
-
- verity, err := measureVerity(path, int(f.Fd()))
- if err != nil {
- return err
- }
-
- relPath, err := filepath.Rel(root, path)
- if err != nil {
- return err
- }
-
- digests[relPath] = verity
- return nil
- }
- err := filepath.WalkDir(root, walkFn)
- return digests, err
-}
-
func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
@@ -151,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
return err
}
- if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
+ if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
diff --git a/drivers/overlay/overlay.go b/drivers/overlay/overlay.go
index 8cc33e1..f007aa9 100644
--- a/drivers/overlay/overlay.go
+++ b/drivers/overlay/overlay.go
@@ -82,7 +82,8 @@ const (
lowerFile = "lower"
maxDepth = 500
- tocArtifact = "toc"
+ tocArtifact = "toc"
+ fsVerityDigestsArtifact = "fs-verity-digests"
// idLength represents the number of random characters
// which can be used to create the unique link identifier
@@ -295,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// a bunch of network file systems...
case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
- graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
+ graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
@@ -309,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
- // If custom --imagestore is selected never
- // ditch the original graphRoot, instead add it as
- // additionalImageStore so its images can still be
- // read and used.
- if options.ImageStore != "" {
- graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
- options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
- // complete base name with driver name included
- options.ImageStore = filepath.Join(options.ImageStore, "overlay")
- }
opts, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
@@ -862,22 +853,15 @@ func (d *Driver) Status() [][2]string {
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
- dir, imagestore, _ := d.dir2(id)
+ dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
- workDirBase := dir
- if imagestore != "" {
- if _, err := os.Stat(dir); err != nil {
- return nil, err
- }
- workDirBase = imagestore
- }
metadata := map[string]string{
- "WorkDir": path.Join(workDirBase, "work"),
- "MergedDir": path.Join(workDirBase, "merged"),
- "UpperDir": path.Join(workDirBase, "diff"),
+ "WorkDir": path.Join(dir, "work"),
+ "MergedDir": path.Join(dir, "merged"),
+ "UpperDir": path.Join(dir, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
@@ -895,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
- _ = os.RemoveAll(d.getStagingDir())
+ _ = os.RemoveAll(filepath.Join(d.home, stagingDir))
return mount.Unmount(d.home)
}
@@ -991,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return d.create(id, parent, opts, true)
}
-func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
- dir, imageStore, _ := d.dir2(id)
+func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
+ dir, homedir, _ := d.dir2(id, readOnly)
+
+ disableQuota := readOnly
uidMaps := d.uidMaps
gidMaps := d.gidMaps
@@ -1003,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Make the link directory if it does not exist
- if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
+ if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
return err
}
@@ -1020,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
- workDirBase := dir
- if imageStore != "" {
- workDirBase = imageStore
- if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
- return err
- }
- }
if parent != "" {
- parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
- // If parentBase path is additional image store, select the image contained in parentBase.
- // See https://github.com/containers/podman/issues/19748
- if parentImageStore != "" && !inAdditionalStore {
- parentBase = parentImageStore
- }
+ parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@@ -1054,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
return err
}
- if imageStore != "" {
- if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
- return err
- }
- }
defer func() {
// Clean up on failure
@@ -1066,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err2 := os.RemoveAll(dir); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
}
- if imageStore != "" {
- if err2 := os.RemoveAll(workDirBase); err2 != nil {
- logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2)
- }
- }
}
}()
@@ -1093,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
return err
}
- if imageStore != "" {
- if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
- return err
- }
- }
}
perms := defaultPerms
@@ -1106,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
if parent != "" {
- parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
- // If parentBase path is additional image store, select the image contained in parentBase.
- // See https://github.com/containers/podman/issues/19748
- if parentImageStore != "" && !inAdditionalStore {
- parentBase = parentImageStore
- }
+ parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@@ -1119,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
perms = os.FileMode(st.Mode())
}
- if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
linkBase := path.Join("..", id, "diff")
- if imageStore != "" {
- linkBase = path.Join(imageStore, "diff")
- }
- if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
+ if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
return err
}
@@ -1138,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
- if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
return err
}
- if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
return err
}
@@ -1223,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
- p, _, _ := d.dir2(id)
+ p, _, _ := d.dir2(id, false)
return p
}
-func (d *Driver) dir2(id string) (string, string, bool) {
- newpath := path.Join(d.home, id)
- imageStore := ""
+func (d *Driver) getAllImageStores() []string {
+ additionalImageStores := d.AdditionalImageStores()
if d.imageStore != "" {
- imageStore = path.Join(d.imageStore, id)
+ additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
}
+ return additionalImageStores
+}
+
+func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
+ var homedir string
+
+ if useImageStore && d.imageStore != "" {
+ homedir = path.Join(d.imageStore, d.name)
+ } else {
+ homedir = d.home
+ }
+
+ newpath := path.Join(homedir, id)
+
if _, err := os.Stat(newpath); err != nil {
- for _, p := range d.AdditionalImageStores() {
+ for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
- return l, imageStore, true
+ return l, homedir, true
}
}
}
- return newpath, imageStore, false
+ return newpath, homedir, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@@ -1452,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
- dir, imageStore, inAdditionalStore := d.dir2(id)
+ dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return "", err
}
- workDirBase := dir
- if imageStore != "" {
- workDirBase = imageStore
- }
+
readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
@@ -1564,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}()
composeFsLayers := []string{}
- composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers")
+ composeFsLayersDir := filepath.Join(dir, "composefs-layers")
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob)
@@ -1598,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return dest, nil
}
- diffDir := path.Join(workDirBase, "diff")
+ diffDir := path.Join(dir, "diff")
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
return "", err
@@ -1616,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
- for _, p := range d.AdditionalImageStores() {
+ for _, p := range d.getAllImageStores() {
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
@@ -1684,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
- if len(absLowers) == 0 {
- absLowers = append(absLowers, path.Join(dir, "empty"))
- }
-
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
+ if len(absLowers) == 0 {
+ absLowers = append(absLowers, path.Join(dir, "empty"))
+ }
+
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
- return "", err
+ if !inAdditionalStore {
+ return "", err
+ }
+ // if it is in an additional store, do not fail if the directory already exists
+ if _, err2 := os.Stat(diffDir); err2 != nil {
+ return "", err
+ }
}
- mergedDir := path.Join(workDirBase, "merged")
+ mergedDir := path.Join(dir, "merged")
// Create the driver merged dir
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
@@ -1716,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}()
- workdir := path.Join(workDirBase, "work")
+ workdir := path.Join(dir, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
@@ -1866,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
- dir := d.dir(id)
+ dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return err
}
@@ -1927,11 +1894,27 @@ func (d *Driver) Put(id string) error {
}
}
- if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
- logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
- return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
- }
+ if !inAdditionalStore {
+ uid, gid := int(0), int(0)
+ fi, err := os.Stat(mountpoint)
+ if err != nil {
+ return err
+ }
+ if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
+ uid, gid = int(stat.Uid), int(stat.Gid)
+ }
+ tmpMountpoint := path.Join(dir, "merged.1")
+ if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
+ return err
+ }
+ // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
+ // its atomic semantic. In this way the "merged" directory is never removed.
+ if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
+ logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err)
+ return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
+ }
+ }
return nil
}
@@ -2019,8 +2002,9 @@ func (g *overlayFileGetter) Close() error {
return nil
}
-func (d *Driver) getStagingDir() string {
- return filepath.Join(d.home, stagingDir)
+func (d *Driver) getStagingDir(id string) string {
+ _, homedir, _ := d.dir2(id, d.imageStore != "")
+ return filepath.Join(homedir, stagingDir)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
@@ -2077,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
var applyDir string
if id == "" {
- err := os.MkdirAll(d.getStagingDir(), 0o700)
+ stagingDir := d.getStagingDir(id)
+ err := os.MkdirAll(stagingDir, 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
- applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
+ applyDir, err = os.MkdirTemp(stagingDir, "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
-
+ perms := defaultPerms
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ }
+ if err := os.Chmod(applyDir, perms); err != nil {
+ return graphdriver.DriverWithDifferOutput{}, err
+ }
} else {
var err error
applyDir, err = d.getDiffPath(id)
@@ -2101,6 +2092,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
}
if d.usingComposefs {
differOptions.Format = graphdriver.DifferOutputFormatFlat
+ differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled
}
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
@@ -2116,27 +2108,38 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
}
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
-func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
- if filepath.Dir(stagingDirectory) != d.getStagingDir() {
+func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
+ stagingDirectory := diffOutput.Target
+ if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
+ diffPath, err := d.getDiffPath(id)
+ if err != nil {
+ return err
+ }
- if d.usingComposefs {
- // FIXME: move this logic into the differ so we don't have to open
- // the file twice.
- verityDigests, err := enableVerityRecursive(stagingDirectory)
- if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
- logrus.Warningf("%s", err)
+ // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode.
+ if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" {
+ parentDiffPath, err := d.getDiffPath(parent)
+ if err != nil {
+ return err
}
+ parentSt, err := os.Stat(parentDiffPath)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil {
+ return err
+ }
+ }
+
+ if d.usingComposefs {
toc := diffOutput.Artifacts[tocArtifact]
+ verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string)
if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
return err
}
}
- diffPath, err := d.getDiffPath(id)
- if err != nil {
- return err
- }
if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
return err
}
@@ -2193,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string {
}
func (d *Driver) getDiffPath(id string) (string, error) {
- dir, imagestore, _ := d.dir2(id)
- base := dir
- if imagestore != "" {
- base = imagestore
- }
- return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
+ dir := d.dir(id)
+ return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@@ -2289,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string {
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
var err error
- dir, imagestore, _ := d.dir2(id)
- base := dir
- if imagestore != "" {
- base = imagestore
- }
- diffDir := filepath.Join(base, "diff")
+ dir := d.dir(id)
+ diffDir := filepath.Join(dir, "diff")
rootUID, rootGID := 0, 0
if toHost != nil {
diff --git a/drivers/overlay/overlay_nocgo.go b/drivers/overlay/overlay_nocgo.go
index 2a7a307..d4f540c 100644
--- a/drivers/overlay/overlay_nocgo.go
+++ b/drivers/overlay/overlay_nocgo.go
@@ -4,6 +4,7 @@
package overlay
import (
+ "fmt"
"path"
"github.com/containers/storage/pkg/directory"
@@ -15,3 +16,15 @@ import (
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}
+
+func getComposeFsHelper() (string, error) {
+ return "", fmt.Errorf("composefs not supported on this build")
+}
+
+func mountComposefsBlob(dataDir, mountPoint string) error {
+ return fmt.Errorf("composefs not supported on this build")
+}
+
+func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
+ return fmt.Errorf("composefs not supported on this build")
+}
diff --git a/drivers/vfs/driver.go b/drivers/vfs/driver.go
index 599cf09..9b55225 100644
--- a/drivers/vfs/driver.go
+++ b/drivers/vfs/driver.go
@@ -31,8 +31,9 @@ func init() {
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
d := &Driver{
name: "vfs",
- homes: []string{home},
+ home: home,
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ imageStore: options.ImageStore,
}
rootIDs := d.idMappings.RootPair()
@@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
key = strings.ToLower(key)
switch key {
case "vfs.imagestore", ".imagestore":
- d.homes = append(d.homes, strings.Split(val, ",")...)
+ d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
continue
case "vfs.mountopt":
return nil, fmt.Errorf("vfs driver does not support mount options")
@@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("vfs driver does not support %s options", key)
}
}
- // If --imagestore is provided, lets add writable graphRoot
- // to vfs's additional image store, as it is done for
- // `overlay` driver.
- if options.ImageStore != "" {
- d.homes = append(d.homes, options.ImageStore)
- }
+
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
@@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
name string
- homes []string
+ home string
+ additionalHomes []string
idMappings *idtools.IDMappings
ignoreChownErrors bool
naiveDiff graphdriver.DiffDriver
updater graphdriver.LayerIDMapUpdater
+ imageStore string
}
func (d *Driver) String() string {
@@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
idMappings = opts.IDMappings
}
- dir := d.dir(id)
+ dir := d.dir2(id, ro)
rootIDs := idMappings.RootPair()
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
return err
@@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
return nil
}
-func (d *Driver) dir(id string) string {
- for i, home := range d.homes {
- if i > 0 {
- home = filepath.Join(home, d.String())
+func (d *Driver) dir2(id string, useImageStore bool) string {
+ var homedir string
+
+ if useImageStore && d.imageStore != "" {
+ homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
+ } else {
+ homedir = filepath.Join(d.home, "dir", filepath.Base(id))
+ }
+ if _, err := os.Stat(homedir); err != nil {
+ additionalHomes := d.additionalHomes[:]
+ if d.imageStore != "" {
+ additionalHomes = append(additionalHomes, d.imageStore)
}
- candidate := filepath.Join(home, "dir", filepath.Base(id))
- fi, err := os.Stat(candidate)
- if err == nil && fi.IsDir() {
- return candidate
+ for _, home := range additionalHomes {
+ candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
+ fi, err := os.Stat(candidate)
+ if err == nil && fi.IsDir() {
+ return candidate
+ }
}
}
- return filepath.Join(d.homes[0], "dir", filepath.Base(id))
+ return homedir
+}
+
+func (d *Driver) dir(id string) string {
+ return d.dir2(id, false)
}
// Remove deletes the content from the directory for a given id.
@@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool {
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
- entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
+ entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
if err != nil {
return nil, err
}
@@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) {
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
- if len(d.homes) > 1 {
- return d.homes[1:]
+ if len(d.additionalHomes) > 0 {
+ return d.additionalHomes
}
return nil
}
diff --git a/go.mod b/go.mod
index 005b20c..805d498 100644
--- a/go.mod
+++ b/go.mod
@@ -5,35 +5,35 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v1.3.2
github.com/Microsoft/go-winio v0.6.1
- github.com/Microsoft/hcsshim v0.12.0-rc.2
+ github.com/Microsoft/hcsshim v0.12.0-rc.3
github.com/containerd/stargz-snapshotter/estargz v0.15.1
github.com/cyphar/filepath-securejoin v0.2.4
github.com/docker/go-units v0.5.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.17.4
+ github.com/klauspost/compress v1.17.7
github.com/klauspost/pgzip v1.2.6
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs/v3 v3.0.1
github.com/moby/sys/mountinfo v0.7.1
+ github.com/moby/sys/user v0.1.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.1.11
- github.com/opencontainers/runtime-spec v1.1.0
+ github.com/opencontainers/runtime-spec v1.2.0
github.com/opencontainers/selinux v1.11.0
github.com/sirupsen/logrus v1.9.3
- github.com/stretchr/testify v1.8.4
+ github.com/stretchr/testify v1.9.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia/v2 v2.3.1
github.com/ulikunitz/xz v0.5.11
github.com/vbatts/tar-split v0.11.5
- golang.org/x/sys v0.16.0
+ golang.org/x/sys v0.17.0
gotest.tools v2.2.0+incompatible
)
require (
github.com/containerd/cgroups/v3 v3.0.2 // indirect
- github.com/containerd/containerd v1.7.11 // indirect
+ github.com/containerd/errdefs v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
@@ -48,7 +48,7 @@ require (
golang.org/x/mod v0.13.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/tools v0.14.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index eb6eb54..8e2d31d 100644
--- a/go.sum
+++ b/go.sum
@@ -4,15 +4,15 @@ github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk=
-github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw=
+github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak=
+github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
-github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw=
-github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE=
+github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
+github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
@@ -66,8 +66,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
+github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
@@ -76,6 +76,8 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
+github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -83,10 +85,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/runc v1.1.11 h1:9LjxyVlE0BPMRP2wuQDRlHV4941Jp9rc3F0+YKimopA=
-github.com/opencontainers/runc v1.1.11/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
-github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
-github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -104,8 +104,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
@@ -143,8 +143,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
-golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
@@ -161,8 +161,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
diff --git a/idset_test.go b/idset_test.go
index e6a789d..45c6c96 100644
--- a/idset_test.go
+++ b/idset_test.go
@@ -685,7 +685,7 @@ func TestIntervalIsZero(t *testing.T) {
}
}
-// assertIntervalSame aserts `got` equals to `want` considering zero check. If the wanted interval
+// assertIntervalSame asserts `got` equals to `want` considering zero check. If the wanted interval
// is empty, we only want to assert IsZero() == true, instead of the exact number.
func assertIntervalSame(t *testing.T, got intervalset.Interval, want *interval, name string) {
t.Helper()
diff --git a/layers.go b/layers.go
index 8dfce5e..f132526 100644
--- a/layers.go
+++ b/layers.go
@@ -181,6 +181,13 @@ type DiffOptions struct {
Compression *archive.Compression
}
+// stagedLayerOptions are the options passed to .create to populate a staged
+// layer
+type stagedLayerOptions struct {
+ DiffOutput *drivers.DriverWithDifferOutput
+ DiffOptions *drivers.ApplyDiffWithDifferOpts
+}
+
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
@@ -267,7 +274,7 @@ type rwLayerStore interface {
// underlying drivers do not themselves distinguish between writeable
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
- create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
+ create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@@ -312,8 +319,8 @@ type rwLayerStore interface {
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
- // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
- ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
+ // applyDiffFromStagingDirectory uses diffOutput.Target to create the diff.
+ applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// DifferTarget gets the location where files are stored for the layer.
DifferTarget(id string) (string, error)
@@ -327,10 +334,71 @@ type rwLayerStore interface {
GarbageCollect() error
}
+type multipleLockFile struct {
+ lockfiles []*lockfile.LockFile
+}
+
+func (l multipleLockFile) Lock() {
+ for _, lock := range l.lockfiles {
+ lock.Lock()
+ }
+}
+
+func (l multipleLockFile) RLock() {
+ for _, lock := range l.lockfiles {
+ lock.RLock()
+ }
+}
+
+func (l multipleLockFile) Unlock() {
+ for _, lock := range l.lockfiles {
+ lock.Unlock()
+ }
+}
+
+func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
+ // Look up only the first lockfile, since this is the value returned by RecordWrite().
+ return l.lockfiles[0].ModifiedSince(lastWrite)
+}
+
+func (l multipleLockFile) AssertLockedForWriting() {
+ for _, lock := range l.lockfiles {
+ lock.AssertLockedForWriting()
+ }
+}
+
+func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
+ return l.lockfiles[0].GetLastWrite()
+}
+
+func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
+ var lastWrite *lockfile.LastWrite
+ for _, lock := range l.lockfiles {
+ lw, err := lock.RecordWrite()
+ if err != nil {
+ return lw, err
+ }
+ // Return the first value we get so we know that
+ // all the locks have a write time >= to this one.
+ if lastWrite == nil {
+ lastWrite = &lw
+ }
+ }
+ return *lastWrite, nil
+}
+
+func (l multipleLockFile) IsReadWrite() bool {
+ return l.lockfiles[0].IsReadWrite()
+}
+
+func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
+ return &multipleLockFile{lockfiles: l}
+}
+
type layerStore struct {
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
// They are safe to access without any other locking.
- lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
+ lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
rundir string
jsonPath [numLayerLocationIndex]string
@@ -1016,22 +1084,37 @@ func (r *layerStore) saveMounts() error {
return r.loadMounts()
}
-func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
+func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
+ if imagedir != "" {
+ if err := os.MkdirAll(imagedir, 0o700); err != nil {
+ return nil, err
+ }
+ }
// Note: While the containers.lock file is in rundir for transient stores
// we don't want to do this here, because the non-transient layers in
// layers.json might be used externally as a read-only layer (using e.g.
// additionalimagestores), and that would look for the lockfile in the
// same directory
+ var lockFiles []*lockfile.LockFile
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
+ lockFiles = append(lockFiles, lockFile)
+ if imagedir != "" {
+ lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
+ if err != nil {
+ return nil, err
+ }
+ lockFiles = append(lockFiles, lockFile)
+ }
+
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
@@ -1041,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
volatileDir = rundir
}
rlstore := layerStore{
- lockfile: lockFile,
+ lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
@@ -1078,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
return nil, err
}
rlstore := layerStore{
- lockfile: lockfile,
+ lockfile: newMultipleLockFile(lockfile),
mountsLockfile: nil,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
@@ -1232,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
}
// Requires startWriting.
-func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
+func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
@@ -1426,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
+ } else if slo != nil {
+ if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
+ cleanupFailureContext = "applying staged directory diff"
+ return nil, -1, err
+ }
} else {
// applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
@@ -2286,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
uncompressedDigest = layerOptions.UncompressedDigest
- } else {
+ } else if compression != archive.Uncompressed {
uncompressedDigester = digest.Canonical.Digester()
}
@@ -2365,10 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if uncompressedDigester != nil {
uncompressedDigest = uncompressedDigester.Digest()
}
+ if uncompressedDigest == "" && compression == archive.Uncompressed {
+ uncompressedDigest = compressedDigest
+ }
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
layer.CompressedDigest = compressedDigest
- layer.CompressedSize = compressedCounter.Count
+ if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
+ layer.CompressedSize = *layerOptions.OriginalSize
+ } else {
+ layer.CompressedSize = compressedCounter.Count
+ }
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
@@ -2407,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
}
// Requires startWriting.
-func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
+func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return ErrNotSupported
@@ -2426,7 +2521,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
}
}
- err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
+ err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options)
if err != nil {
return err
}
@@ -2446,6 +2541,10 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.Flags[k] = v
}
}
+ if err = r.saveFor(layer); err != nil {
+ return err
+ }
+
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
@@ -2475,9 +2574,6 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
- if err = r.saveFor(layer); err != nil {
- return err
- }
return err
}
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index 51d0a66..70f76d6 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -339,12 +339,43 @@ func (compression *Compression) Extension() string {
return ""
}
+// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
+// prevent tar.FileInfoHeader from introspecting it and potentially calling into
+// glibc.
+type nosysFileInfo struct {
+ os.FileInfo
+}
+
+func (fi nosysFileInfo) Sys() interface{} {
+ // A Sys value of type *tar.Header is safe as it is system-independent.
+ // The tar.FileInfoHeader function copies the fields into the returned
+ // header without performing any OS lookups.
+ if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
+ return sys
+ }
+ return nil
+}
+
+// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi.
+var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error
+
+func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
+ if sysStatOverride == nil {
+ return tar.FileInfoHeader(fi, link)
+ }
+ hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
+ if err != nil {
+ return nil, err
+ }
+ return hdr, sysStatOverride(fi, hdr)
+}
+
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
- hdr, err := tar.FileInfoHeader(fi, link)
+ hdr, err := fileInfoHeaderNoLookups(fi, link)
if err != nil {
return nil, err
}
@@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
return err
}
for _, key := range xattrs {
- if strings.HasPrefix(key, "user.") {
+ if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") {
value, err := system.Lgetxattr(path, key)
if err != nil {
if errors.Is(err, system.E2BIG) {
diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go
index 88192f2..c681103 100644
--- a/pkg/archive/archive_unix.go
+++ b/pkg/archive/archive_unix.go
@@ -15,6 +15,31 @@ import (
"golang.org/x/sys/unix"
)
+func init() {
+ sysStatOverride = statUnix
+}
+
+// statUnix populates hdr from system-dependent fields of fi without performing
+// any OS lookups.
+// Adapted from Moby.
+func statUnix(fi os.FileInfo, hdr *tar.Header) error {
+ s, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ hdr.Uid = int(s.Uid)
+ hdr.Gid = int(s.Gid)
+
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
+ }
+
+ return nil
+}
+
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
diff --git a/pkg/chunked/cache_linux.go b/pkg/chunked/cache_linux.go
index aa4f57e..1e3ad86 100644
--- a/pkg/chunked/cache_linux.go
+++ b/pkg/chunked/cache_linux.go
@@ -25,7 +25,7 @@ import (
const (
cacheKey = "chunked-manifest-cache"
- cacheVersion = 1
+ cacheVersion = 2
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
@@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
return string(digester.Digest()), nil
}
-// generateFileLocation generates a file location in the form $OFFSET@$PATH
-func generateFileLocation(path string, offset uint64) []byte {
- return []byte(fmt.Sprintf("%d@%s", offset, path))
+// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH
+func generateFileLocation(path string, offset, len uint64) []byte {
+ return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path))
}
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
@@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
var tags []string
for _, k := range toc {
if k.Digest != "" {
- location := generateFileLocation(k.Name, 0)
+ location := generateFileLocation(k.Name, 0, uint64(k.Size))
off := uint64(vdata.Len())
l := uint64(len(location))
@@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
digestLen = len(k.Digest)
}
if k.ChunkDigest != "" {
- location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
+ location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize))
off := uint64(vdata.Len())
l := uint64(len(location))
d := generateTag(k.ChunkDigest, off, l)
@@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
if digest == d {
startOff := i*metadata.tagLen + metadata.digestLen
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
+
off, _ := strconv.ParseInt(parts[0], 10, 64)
+
len, _ := strconv.ParseInt(parts[1], 10, 64)
return digest, uint64(off), uint64(len)
}
@@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
defer c.mutex.RUnlock()
for _, layer := range c.layers {
- digest, off, len := findTag(digest, layer.metadata)
+ digest, off, tagLen := findTag(digest, layer.metadata)
if digest != "" {
- position := string(layer.metadata.vdata[off : off+len])
- parts := strings.SplitN(position, "@", 2)
+ position := string(layer.metadata.vdata[off : off+tagLen])
+ parts := strings.SplitN(position, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
- return layer.target, parts[1], offFile, nil
+ // parts[1] is the chunk length, currently unused.
+ return layer.target, parts[2], offFile, nil
}
}
diff --git a/pkg/chunked/cache_linux_test.go b/pkg/chunked/cache_linux_test.go
index 56e9bf0..957bc27 100644
--- a/pkg/chunked/cache_linux_test.go
+++ b/pkg/chunked/cache_linux_test.go
@@ -130,7 +130,7 @@ func TestWriteCache(t *testing.T) {
if digest != r.Digest {
t.Error("wrong file found")
}
- expectedLocation := generateFileLocation(r.Name, 0)
+ expectedLocation := generateFileLocation(r.Name, 0, uint64(r.Size))
location := cache.vdata[off : off+len]
if !bytes.Equal(location, expectedLocation) {
t.Errorf("wrong file found %q instead of %q", location, expectedLocation)
@@ -149,7 +149,7 @@ func TestWriteCache(t *testing.T) {
if digest != fingerprint {
t.Error("wrong file found")
}
- expectedLocation = generateFileLocation(r.Name, 0)
+ expectedLocation = generateFileLocation(r.Name, 0, uint64(r.Size))
location = cache.vdata[off : off+len]
if !bytes.Equal(location, expectedLocation) {
t.Errorf("wrong file found %q instead of %q", location, expectedLocation)
@@ -164,7 +164,7 @@ func TestWriteCache(t *testing.T) {
if digest != r.ChunkDigest {
t.Error("wrong digest found")
}
- expectedLocation := generateFileLocation(r.Name, uint64(r.ChunkOffset))
+ expectedLocation := generateFileLocation(r.Name, uint64(r.ChunkOffset), uint64(r.ChunkSize))
location := cache.vdata[off : off+len]
if !bytes.Equal(location, expectedLocation) {
t.Errorf("wrong file found %q instead of %q", location, expectedLocation)
diff --git a/pkg/chunked/compression_linux.go b/pkg/chunked/compression_linux.go
index 1d8141e..112ca2c 100644
--- a/pkg/chunked/compression_linux.go
+++ b/pkg/chunked/compression_linux.go
@@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
}
-func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
- d, err := digest.Parse(expectedUncompressedChecksum)
+func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
+ d, err := digest.Parse(expectedCompressedChecksum)
if err != nil {
return nil, err
}
diff --git a/pkg/chunked/dump/dump.go b/pkg/chunked/dump/dump.go
index 5e56968..d3c105c 100644
--- a/pkg/chunked/dump/dump.go
+++ b/pkg/chunked/dump/dump.go
@@ -4,6 +4,7 @@ import (
"bufio"
"fmt"
"io"
+ "path/filepath"
"strings"
"time"
"unicode"
@@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) {
return mode, nil
}
-func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
- path := strings.TrimRight(entry.Name, "/")
- if path == "" {
+func sanitizeName(name string) string {
+ path := filepath.Clean(name)
+ if path == "." {
path = "/"
} else if path[0] != '/' {
path = "/" + path
}
+ return path
+}
+
+func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
+ path := sanitizeName(entry.Name)
if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
return err
@@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
var payload string
if entry.Linkname != "" {
- payload = entry.Linkname
- if entry.Type == internal.TypeLink && payload[0] != '/' {
- payload = "/" + payload
+ if entry.Type == internal.TypeSymlink {
+ payload = entry.Linkname
+ } else {
+ payload = sanitizeName(entry.Linkname)
}
} else {
if len(entry.Digest) > 10 {
@@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
if e.Linkname == "" {
continue
}
+ if e.Type == internal.TypeSymlink {
+ continue
+ }
links[e.Linkname] = links[e.Linkname] + 1
}
- if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") {
+ if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") {
root := &internal.FileMetadata{
Name: "/",
Type: internal.TypeDir,
diff --git a/pkg/chunked/storage_linux.go b/pkg/chunked/storage_linux.go
index 8c54a5a..f300df3 100644
--- a/pkg/chunked/storage_linux.go
+++ b/pkg/chunked/storage_linux.go
@@ -25,6 +25,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
@@ -40,12 +41,14 @@ import (
const (
maxNumberMissingChunks = 1024
+ autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
chunkedData = "zstd-chunked-data"
chunkedLayerDataKey = "zstd-chunked-layer-data"
tocKey = "toc"
+ fsVerityDigestsKey = "fs-verity-digests"
fileTypeZstdChunked = iota
fileTypeEstargz
@@ -71,11 +74,9 @@ type chunkedDiffer struct {
zstdReader *zstd.Decoder
rawReader io.Reader
- // contentDigest is the digest of the uncompressed content
- // (diffID) when the layer is fully retrieved. If the layer
- // is not fully retrieved, instead of using the digest of the
- // uncompressed content, it refers to the digest of the TOC.
- contentDigest digest.Digest
+ // tocDigest is the digest of the TOC document when the layer
+ // is partially pulled.
+ tocDigest digest.Digest
// convertedToZstdChunked is set to true if the layer needs to
// be converted to the zstd:chunked format before it can be
@@ -94,6 +95,10 @@ type chunkedDiffer struct {
blobSize int64
storeOpts *types.StoreOptions
+
+ useFsVerity graphdriver.DifferFsVerity
+ fsVerityDigests map[string]string
+ fsVerityMutex sync.Mutex
}
var xattrsToIgnore = map[string]interface{}{
@@ -237,6 +242,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
return nil, err
}
+ if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
+ return nil, errors.New("enable_partial_images not configured")
+ }
+
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
@@ -265,6 +274,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
}
return &chunkedDiffer{
+ fsVerityDigests: make(map[string]string),
blobDigest: blobDigest,
blobSize: blobSize,
convertToZstdChunked: true,
@@ -285,22 +295,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
return nil, err
}
- contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
+ tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
}
return &chunkedDiffer{
- blobSize: blobSize,
- contentDigest: contentDigest,
- copyBuffer: makeCopyBuffer(),
- fileType: fileTypeZstdChunked,
- layersCache: layersCache,
- manifest: manifest,
- storeOpts: storeOpts,
- stream: iss,
- tarSplit: tarSplit,
- tocOffset: tocOffset,
+ fsVerityDigests: make(map[string]string),
+ blobSize: blobSize,
+ tocDigest: tocDigest,
+ copyBuffer: makeCopyBuffer(),
+ fileType: fileTypeZstdChunked,
+ layersCache: layersCache,
+ manifest: manifest,
+ storeOpts: storeOpts,
+ stream: iss,
+ tarSplit: tarSplit,
+ tocOffset: tocOffset,
}, nil
}
@@ -314,21 +325,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
return nil, err
}
- contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
+ tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
}
return &chunkedDiffer{
- blobSize: blobSize,
- contentDigest: contentDigest,
- copyBuffer: makeCopyBuffer(),
- fileType: fileTypeEstargz,
- layersCache: layersCache,
- manifest: manifest,
- storeOpts: storeOpts,
- stream: iss,
- tocOffset: tocOffset,
+ fsVerityDigests: make(map[string]string),
+ blobSize: blobSize,
+ tocDigest: tocDigest,
+ copyBuffer: makeCopyBuffer(),
+ fileType: fileTypeEstargz,
+ layersCache: layersCache,
+ manifest: manifest,
+ storeOpts: storeOpts,
+ stream: iss,
+ tocOffset: tocOffset,
}, nil
}
@@ -925,6 +937,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT
return nil
}
+type recordFsVerityFunc func(string, *os.File) error
+
type destinationFile struct {
digester digest.Digester
dirfd int
@@ -934,9 +948,10 @@ type destinationFile struct {
options *archive.TarOptions
skipValidation bool
to io.Writer
+ recordFsVerity recordFsVerityFunc
}
-func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) {
+func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
if err != nil {
return nil, err
@@ -963,15 +978,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar
options: options,
dirfd: dirfd,
skipValidation: skipValidation,
+ recordFsVerity: recordFsVerity,
}, nil
}
func (d *destinationFile) Close() (Err error) {
defer func() {
- err := d.file.Close()
+ var roFile *os.File
+ var err error
+
+ if d.recordFsVerity != nil {
+ roFile, err = reopenFileReadOnly(d.file)
+ if err == nil {
+ defer roFile.Close()
+ } else if Err == nil {
+ Err = err
+ }
+ }
+
+ err = d.file.Close()
if Err == nil {
Err = err
}
+
+ if Err == nil && roFile != nil {
+ Err = d.recordFsVerity(d.metadata.Name, roFile)
+ }
}()
if !d.skipValidation {
@@ -994,6 +1026,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) {
close(errors)
}
+func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
+ if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
+ return nil
+ }
+ // fsverity.EnableVerity doesn't return an error if fs-verity was already
+ // enabled on the file.
+ err := fsverity.EnableVerity(path, int(roFile.Fd()))
+ if err != nil {
+ if c.useFsVerity == graphdriver.DifferFsVerityRequired {
+ return err
+ }
+
+ // If it is not required, ignore the error if the filesystem does not support it.
+ if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) {
+ return nil
+ }
+ }
+ verity, err := fsverity.MeasureVerity(path, int(roFile.Fd()))
+ if err != nil {
+ return err
+ }
+
+ c.fsVerityMutex.Lock()
+ c.fsVerityDigests[path] = verity
+ c.fsVerityMutex.Unlock()
+
+ return nil
+}
+
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
var destFile *destinationFile
@@ -1081,7 +1142,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
filesToClose <- destFile
}
- destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation)
+ recordFsVerity := c.recordFsVerity
+ if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
+ recordFsVerity = nil
+ }
+ destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity)
if err != nil {
Err = err
goto exit
@@ -1116,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
- getGap := func(missingParts []missingPart, i int) int {
+ getGap := func(missingParts []missingPart, i int) uint64 {
prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
- return int(missingParts[i].SourceChunk.Offset - prev)
- }
- getCost := func(missingParts []missingPart, i int) int {
- cost := getGap(missingParts, i)
- if missingParts[i-1].OriginFile != nil {
- cost += int(missingParts[i-1].SourceChunk.Length)
- }
- if missingParts[i].OriginFile != nil {
- cost += int(missingParts[i].SourceChunk.Length)
- }
- return cost
+ return missingParts[i].SourceChunk.Offset - prev
}
- // simple case: merge chunks from the same file.
+ // simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later.
newMissingParts := missingParts[0:1]
prevIndex := 0
for i := 1; i < len(missingParts); i++ {
@@ -1151,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
}
missingParts = newMissingParts
- if len(missingParts) <= target {
- return missingParts
- }
-
- // this implementation doesn't account for duplicates, so it could merge
- // more than necessary to reach the specified target. Since target itself
- // is a heuristic value, it doesn't matter.
- costs := make([]int, len(missingParts)-1)
- for i := 1; i < len(missingParts); i++ {
- costs[i-1] = getCost(missingParts, i)
+ type gap struct {
+ from int
+ to int
+ cost uint64
+ }
+ var requestGaps []gap
+ lastOffset := int(-1)
+ numberSourceChunks := 0
+ for i, c := range missingParts {
+ if c.OriginFile != nil || c.Hole {
+ // it does not require a network request
+ continue
+ }
+ numberSourceChunks++
+ if lastOffset >= 0 {
+ prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length
+ cost := c.SourceChunk.Offset - prevEnd
+ g := gap{
+ from: lastOffset,
+ to: i,
+ cost: cost,
+ }
+ requestGaps = append(requestGaps, g)
+ }
+ lastOffset = i
}
- sort.Ints(costs)
-
- toShrink := len(missingParts) - target
- if toShrink >= len(costs) {
- toShrink = len(costs) - 1
+ sort.Slice(requestGaps, func(i, j int) bool {
+ return requestGaps[i].cost < requestGaps[j].cost
+ })
+ toMergeMap := make([]bool, len(missingParts))
+ remainingToMerge := numberSourceChunks - target
+ for _, g := range requestGaps {
+ if remainingToMerge < 0 && g.cost > autoMergePartsThreshold {
+ continue
+ }
+ for i := g.from + 1; i <= g.to; i++ {
+ toMergeMap[i] = true
+ }
+ remainingToMerge--
}
- targetValue := costs[toShrink]
newMissingParts = missingParts[0:1]
for i := 1; i < len(missingParts); i++ {
- if getCost(missingParts, i) > targetValue {
+ if !toMergeMap[i] {
newMissingParts = append(newMissingParts, missingParts[i])
} else {
gap := getGap(missingParts, i)
@@ -1204,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
}
+ missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
calculateChunksToRequest()
// There are some missing files. Prepare a multirange request for the missing chunks.
@@ -1217,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
if _, ok := err.(ErrBadRequest); ok {
- requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
- if requested < 64 {
+ if len(chunksToRequest) < 64 {
return err
}
// Merge more chunks to request
- missingParts = mergeMissingChunks(missingParts, requested/2)
+ missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
calculateChunksToRequest()
continue
}
@@ -1412,15 +1489,39 @@ type findAndCopyFileOptions struct {
options *archive.TarOptions
}
+func reopenFileReadOnly(f *os.File) (*os.File, error) {
+ path := fmt.Sprintf("/proc/self/fd/%d", f.Fd())
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), f.Name()), nil
+}
+
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
finalizeFile := func(dstFile *os.File) error {
- if dstFile != nil {
- defer dstFile.Close()
- if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
- return err
- }
+ if dstFile == nil {
+ return nil
}
- return nil
+ err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false)
+ if err != nil {
+ dstFile.Close()
+ return err
+ }
+ var roFile *os.File
+ if c.useFsVerity != graphdriver.DifferFsVerityDisabled {
+ roFile, err = reopenFileReadOnly(dstFile)
+ }
+ dstFile.Close()
+ if err != nil {
+ return err
+ }
+ if roFile == nil {
+ return nil
+ }
+
+ defer roFile.Close()
+ return c.recordFsVerity(r.Name, roFile)
}
found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
@@ -1522,9 +1623,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
}()
+ c.useFsVerity = differOpts.UseFsVerity
+
// stream to use for reading the zstd:chunked or Estargz file.
stream := c.stream
+ var uncompressedDigest digest.Digest
+
if c.convertToZstdChunked {
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
if err != nil {
@@ -1575,13 +1680,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
c.fileType = fileTypeZstdChunked
c.manifest = manifest
c.tarSplit = tarSplit
-
- // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest.
- c.contentDigest = diffID
c.tocOffset = tocOffset
// the file was generated by us and the digest for each file was already computed, no need to validate it again.
c.skipValidation = true
+ // since we retrieved the whole file and it was validated, set the uncompressed digest.
+ uncompressedDigest = diffID
}
lcd := chunkedLayerData{
@@ -1610,11 +1714,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Artifacts: map[string]interface{}{
tocKey: toc,
},
- TOCDigest: c.contentDigest,
- }
-
- if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) {
- return output, errors.New("enable_partial_images not configured")
+ TOCDigest: c.tocDigest,
+ UncompressedDigest: uncompressedDigest,
}
// When the hard links deduplication is used, file attributes are ignored because setting them
@@ -1731,13 +1832,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
mode := os.FileMode(r.Mode)
- r.Name = filepath.Clean(r.Name)
- r.Linkname = filepath.Clean(r.Linkname)
-
t, err := typeToTarType(r.Type)
if err != nil {
return output, err
}
+
+ r.Name = filepath.Clean(r.Name)
+ // do not modify the value of symlinks
+ if r.Linkname != "" && t != tar.TypeSymlink {
+ r.Linkname = filepath.Clean(r.Linkname)
+ }
+
if whiteoutConverter != nil {
hdr := archivetar.Header{
Typeflag: t,
@@ -1783,6 +1888,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
case tar.TypeDir:
+ if r.Name == "" || r.Name == "." {
+ output.RootDirMode = &mode
+ }
if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
return output, err
}
@@ -1904,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
// There are some missing files. Prepare a multirange request for the missing chunks.
if len(missingParts) > 0 {
- missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
return output, err
}
@@ -1920,6 +2027,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
+ output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests
+
return output, nil
}
@@ -1979,7 +2088,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
e.Chunks = make([]*internal.FileMetadata, nChunks+1)
for j := 0; j <= nChunks; j++ {
- e.Chunks[j] = &entries[i+j]
+ // we need a copy here, otherwise we override the
+ // .Size later
+ copy := entries[i+j]
+ e.Chunks[j] = &copy
e.EndOffset = entries[i+j].EndOffset
}
i += nChunks
diff --git a/pkg/chunked/toc/toc.go b/pkg/chunked/toc/toc.go
index 9cfd97d..6fbaa41 100644
--- a/pkg/chunked/toc/toc.go
+++ b/pkg/chunked/toc/toc.go
@@ -1,6 +1,8 @@
package toc
import (
+ "errors"
+
"github.com/containers/storage/pkg/chunked/internal"
digest "github.com/opencontainers/go-digest"
)
@@ -16,19 +18,24 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// table of contents (TOC) from the image's annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
- if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
- d, err := digest.Parse(contentDigest)
+ d1, ok1 := annotations[tocJSONDigestAnnotation]
+ d2, ok2 := annotations[internal.ManifestChecksumKey]
+ switch {
+ case ok1 && ok2:
+ return nil, errors.New("both zstd:chunked and eStargz TOC found")
+ case ok1:
+ d, err := digest.Parse(d1)
if err != nil {
return nil, err
}
return &d, nil
- }
- if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
- d, err := digest.Parse(contentDigest)
+ case ok2:
+ d, err := digest.Parse(d2)
if err != nil {
return nil, err
}
return &d, nil
+ default:
+ return nil, nil
}
- return nil, nil
}
diff --git a/pkg/fsverity/fsverity_linux.go b/pkg/fsverity/fsverity_linux.go
new file mode 100644
index 0000000..5b21c4b
--- /dev/null
+++ b/pkg/fsverity/fsverity_linux.go
@@ -0,0 +1,45 @@
+package fsverity
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// verityDigest struct represents the digest used for verifying the integrity of a file.
+type verityDigest struct {
+ Fsv unix.FsverityDigest
+ Buf [64]byte
+}
+
+// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
+// in read-only mode.
+// The 'description' parameter is a human-readable description of the file.
+func EnableVerity(description string, fd int) error {
+ enableArg := unix.FsverityEnableArg{
+ Version: 1,
+ Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
+ Block_size: 4096,
+ }
+
+ _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
+ if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
+ return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
+ }
+ return nil
+}
+
+// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
+// The 'description' parameter is a human-readable description of the file.
+func MeasureVerity(description string, fd int) (string, error) {
+ var digest verityDigest
+ digest.Fsv.Size = 64
+ _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
+ if e1 != 0 {
+ return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
+ }
+ return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
+}
diff --git a/pkg/fsverity/fsverity_unsupported.go b/pkg/fsverity/fsverity_unsupported.go
new file mode 100644
index 0000000..46e68c5
--- /dev/null
+++ b/pkg/fsverity/fsverity_unsupported.go
@@ -0,0 +1,21 @@
+//go:build !linux
+// +build !linux
+
+package fsverity
+
+import (
+ "fmt"
+)
+
+// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
+// in read-only mode.
+// The 'description' parameter is a human-readable description of the file.
+func EnableVerity(description string, fd int) error {
+ return fmt.Errorf("fs-verity is not supported on this platform")
+}
+
+// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
+// The 'description' parameter is a human-readable description of the file.
+func MeasureVerity(description string, fd int) (string, error) {
+ return "", fmt.Errorf("fs-verity is not supported on this platform")
+}
diff --git a/pkg/homedir/homedir_others.go b/pkg/homedir/homedir_others.go
index b02812e..9057fe1 100644
--- a/pkg/homedir/homedir_others.go
+++ b/pkg/homedir/homedir_others.go
@@ -1,5 +1,5 @@
-//go:build !linux && !darwin && !freebsd
-// +build !linux,!darwin,!freebsd
+//go:build !linux && !darwin && !freebsd && !windows
+// +build !linux,!darwin,!freebsd,!windows
package homedir
diff --git a/pkg/homedir/homedir_windows.go b/pkg/homedir/homedir_windows.go
index af65f2c..a76610f 100644
--- a/pkg/homedir/homedir_windows.go
+++ b/pkg/homedir/homedir_windows.go
@@ -5,6 +5,7 @@ package homedir
import (
"os"
+ "path/filepath"
)
// Key returns the env var name for the user's home dir based on
@@ -25,8 +26,36 @@ func Get() string {
return home
}
+// GetConfigHome returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func GetConfigHome() (string, error) {
+ return filepath.Join(Get(), ".config"), nil
+}
+
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
return "%USERPROFILE%" // be careful while using in format functions
}
+
+// StickRuntimeDirContents is a no-op on Windows
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ return nil, nil
+}
+
+// GetRuntimeDir returns a directory suitable to store runtime files.
+// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
+// directory for the current user.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ data, err := GetDataHome()
+ if err != nil {
+ return "", err
+ }
+ runtimeDir := filepath.Join(data, "containers", "storage")
+ return runtimeDir, nil
+}
diff --git a/pkg/idtools/idtools_unix.go b/pkg/idtools/idtools_unix.go
index 4701dc5..d7cb4ac 100644
--- a/pkg/idtools/idtools_unix.go
+++ b/pkg/idtools/idtools_unix.go
@@ -14,7 +14,7 @@ import (
"syscall"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/moby/sys/user"
)
var (
diff --git a/storage.conf b/storage.conf
index d91fa98..924e8f1 100644
--- a/storage.conf
+++ b/storage.conf
@@ -59,7 +59,7 @@ additionalimagestores = [
# can deduplicate pulling of content, disk storage of content and can allow the
# kernel to use less memory when running containers.
-# containers/storage supports three keys
+# containers/storage supports four keys
# * enable_partial_images="true" | "false"
# Tells containers/storage to look for files previously pulled in storage
# rather then always pulling them from the container registry.
@@ -70,7 +70,12 @@ additionalimagestores = [
# Tells containers/storage where an ostree repository exists that might have
# previously pulled content which can be used when attempting to avoid
# pulling content from the container registry
-pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
+# * convert_images = "false" | "true"
+# If set to true, containers/storage will convert images to a
+# format compatible with partial pulls in order to take advantage
+# of local deduplication and hard linking. It is an expensive
+# operation so it is not enabled by default.
+pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
diff --git a/store.go b/store.go
index 49a4ff1..c6f1251 100644
--- a/store.go
+++ b/store.go
@@ -71,6 +71,19 @@ type metadataStore interface {
rwMetadataStore
}
+// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
+type ApplyStagedLayerOptions struct {
+ ID string // Mandatory
+ ParentLayer string // Optional
+ Names []string // Optional
+ MountLabel string // Optional
+ Writeable bool // Optional
+ LayerOptions *LayerOptions // Optional
+
+ DiffOutput *drivers.DriverWithDifferOutput // Mandatory
+ DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
+}
+
// An roBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type roBigDataStore interface {
@@ -318,11 +331,21 @@ type Store interface {
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
+ // Deprecated: it will be removed soon. Use ApplyStagedLayer instead.
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
+ // Deprecated: it will be removed soon. Use CleanupStagedLayer instead.
CleanupStagingDirectory(stagingDirectory string) error
+ // ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory,
+ // marking the layer for automatic removal if applying the diff fails
+ // for any reason.
+ ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
+
+ // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
+ CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
+
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
@@ -397,6 +420,18 @@ type Store interface {
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
+ // ImageDirectory returns a path of a directory which the caller can
+ // use to store data, specific to the image, which the library does not
+ // directly manage. The directory will be deleted when the image is
+ // deleted.
+ ImageDirectory(id string) (string, error)
+
+ // ImageRunDirectory returns a path of a directory which the caller can
+ // use to store data, specific to the image, which the library does not
+ // directly manage. The directory will be deleted when the host system
+ // is restarted.
+ ImageRunDirectory(id string) (string, error)
+
// ListLayerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a layer.
ListLayerBigData(id string) ([]string, error)
@@ -568,10 +603,19 @@ type LayerOptions struct {
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
- // OriginalDigest specifies a digest of the tarstream (diff), if one is
+ // OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
// provided along with these LayerOptions, and reliably known by the caller.
+ // The digest might not be exactly the digest of the provided tarstream
+ // (e.g. the digest might be of a compressed representation, while providing
+ // an uncompressed one); in that case the caller is responsible for the two matching.
// Use the default "" if this fields is not applicable or the value is not known.
OriginalDigest digest.Digest
+ // OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
+ // to OriginalDigest.
+ // If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
+ // not the tarstream.
+ // Use nil if not applicable or not known.
+ OriginalSize *int64
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
// of the tarstream (diff), if one is provided along with these LayerOptions,
// and reliably known by the caller.
@@ -928,11 +972,13 @@ func (s *store) load() error {
if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
- ris, err := newImageStore(gipath)
+ imageStore, err := newImageStore(gipath)
if err != nil {
return err
}
- s.imageStore = ris
+ s.imageStore = imageStore
+
+ s.rwImageStores = []rwImageStore{imageStore}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0o700); err != nil {
@@ -950,13 +996,16 @@ func (s *store) load() error {
s.containerStore = rcs
- for _, store := range driver.AdditionalImageStores() {
+ additionalImageStores := s.graphDriver.AdditionalImageStores()
+ if s.imageStoreDir != "" {
+ additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
+ }
+
+ for _, store := range additionalImageStores {
gipath := filepath.Join(store, driverPrefix+"images")
var ris roImageStore
- if s.imageStoreDir != "" && store == s.graphRoot {
- // If --imagestore was set and current store
- // is `graphRoot` then mount it as a `rw` additional
- // store instead of `readonly` additional store.
+ // both the graphdriver and the imagestore must be used read-write.
+ if store == s.imageStoreDir || store == s.graphRoot {
imageStore, err := newImageStore(gipath)
if err != nil {
return err
@@ -1041,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() {
// Almost all users should use startUsingGraphDriver instead.
// The caller must hold s.graphLock.
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
- driverRoot := s.imageStoreDir
- imageStoreBase := s.graphRoot
- if driverRoot == "" {
- driverRoot = s.graphRoot
- imageStoreBase = ""
- }
config := drivers.Options{
- Root: driverRoot,
- ImageStore: imageStoreBase,
+ Root: s.graphRoot,
+ ImageStore: s.imageStoreDir,
RunRoot: s.runRoot,
DriverPriority: s.graphDriverPriority,
DriverOptions: s.graphOptions,
@@ -1079,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
- imgStoreRoot := s.imageStoreDir
- if imgStoreRoot == "" {
- imgStoreRoot = s.graphRoot
- }
- glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
+ glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0o700); err != nil {
return nil, err
}
- rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
+ ilpath := ""
+ if s.imageStoreDir != "" {
+ ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
+ }
+ rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
if err != nil {
return nil, err
}
@@ -1118,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
+
for _, store := range s.graphDriver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
+
rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
if err != nil {
return nil, err
@@ -1400,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return true
}
-func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
- var parentLayer *Layer
+func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil {
return nil, -1, err
@@ -1414,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err
}
defer s.containerStore.stopWriting()
+
+ var parentLayer *Layer
var options LayerOptions
if lOptions != nil {
options = *lOptions
@@ -1473,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
}
layerOptions := LayerOptions{
OriginalDigest: options.OriginalDigest,
+ OriginalSize: options.OriginalSize,
UncompressedDigest: options.UncompressedDigest,
Flags: options.Flags,
}
@@ -1486,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
GIDMap: copyIDMap(gidMap),
}
}
- return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
+ return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
+}
+
+func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
+ return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@@ -1696,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
}
}
layerOptions.TemplateLayer = layer.ID
- mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
+ mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
@@ -1867,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel
}
- clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
+ clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
if err != nil {
return nil, err
}
@@ -2540,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
// Delete image from all available imagestores configured to be used.
imageFound := false
- for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
+ for _, is := range s.rwImageStores {
if is != s.imageStore {
// This is an additional writeable image store
// so we must perform lock
@@ -2932,15 +2983,28 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
}
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
+ if stagingDirectory != diffOutput.Target {
+ return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory")
+ }
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
if !rlstore.Exists(to) {
return struct{}{}, ErrLayerUnknown
}
- return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
+ return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options)
})
return err
}
+func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
+ slo := stagedLayerOptions{
+ DiffOutput: args.DiffOutput,
+ DiffOptions: args.DiffOptions,
+ }
+
+ layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
+ return layer, err
+}
+
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
@@ -2948,6 +3012,13 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
return err
}
+func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
+ _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
+ return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
+ })
+ return err
+}
+
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
if to != "" && !rlstore.Exists(to) {
@@ -3311,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
return nil, ErrContainerUnknown
}
+func (s *store) ImageDirectory(id string) (string, error) {
+ foundImage := false
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
+ if store.Exists(id) {
+ foundImage = true
+ }
+ middleDir := s.graphDriverName + "-images"
+ gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(gipath, 0o700); err != nil {
+ return "", true, err
+ }
+ return gipath, true, nil
+ }); done {
+ return res, err
+ }
+ if foundImage {
+ return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
+ }
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+}
+
func (s *store) ContainerDirectory(id string) (string, error) {
res, _, err := readContainerStore(s, func() (string, bool, error) {
id, err := s.containerStore.Lookup(id)
@@ -3328,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) {
return res, err
}
+func (s *store) ImageRunDirectory(id string) (string, error) {
+ foundImage := false
+ if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
+ if store.Exists(id) {
+ foundImage = true
+ }
+
+ middleDir := s.graphDriverName + "-images"
+ rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
+ if err := os.MkdirAll(rcpath, 0o700); err != nil {
+ return "", true, err
+ }
+ return rcpath, true, nil
+ }); done {
+ return res, err
+ }
+ if foundImage {
+ return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
+ }
+ return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
+}
+
func (s *store) ContainerRunDirectory(id string) (string, error) {
res, _, err := readContainerStore(s, func() (string, bool, error) {
id, err := s.containerStore.Lookup(id)
diff --git a/tests/image-dirs.bats b/tests/image-dirs.bats
new file mode 100644
index 0000000..64f799e
--- /dev/null
+++ b/tests/image-dirs.bats
@@ -0,0 +1,39 @@
+#!/usr/bin/env bats
+
+load helpers
+
+@test "image-dirs" {
+ # Create a layer.
+ run storage --debug=false create-layer
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ layer=$output
+
+ # Check that the layer can be found.
+ storage exists -l $layer
+
+ # Create an image using the layer.
+ run storage --debug=false create-image -m danger $layer
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ image=${output%% *}
+
+ # Check that the image can be found.
+ storage exists -i $image
+
+ # Check that the image's user data directory is somewhere under the root.
+ run storage --debug=false get-image-dir $image
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ dir=${output%% *}
+ touch "$dir"/dirfile
+ echo "$dir"/dirfile | grep -q ^"${TESTDIR}/root/"
+
+ # Check that the image's user run data directory is somewhere under the run root.
+ run storage --debug=false get-image-run-dir $image
+ [ "$status" -eq 0 ]
+ [ "$output" != "" ]
+ rundir=${output%% *}
+ touch "$rundir"/rundirfile
+ echo "$rundir"/rundirfile | grep -q ^"${TESTDIR}/runroot/"
+}
diff --git a/userns.go b/userns.go
index 32ae830..5712073 100644
--- a/userns.go
+++ b/userns.go
@@ -11,7 +11,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
- libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
+ libcontainerUser "github.com/moby/sys/user"
"github.com/sirupsen/logrus"
)
@@ -175,7 +175,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
- clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
+ clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
if err != nil {
return 0, err
}