summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:06:25 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 17:06:25 +0000
commitf115bb55d7eec53ad9ce2505dec9a7e0eed12536 (patch)
tree5c161bdd6ad6304914773103edbbe16d403d3d18
parentInitial commit. (diff)
downloadgolang-github-containers-image-f115bb55d7eec53ad9ce2505dec9a7e0eed12536.tar.xz
golang-github-containers-image-f115bb55d7eec53ad9ce2505dec9a7e0eed12536.zip
Adding upstream version 5.29.2.upstream/5.29.2upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.cirrus.yml173
-rw-r--r--.github/renovate.json555
-rw-r--r--.github/workflows/check_cirrus_cron.yml20
-rw-r--r--.github/workflows/rerun_cirrus_cron.yml19
-rw-r--r--.gitignore12
-rw-r--r--.golangci.yml4
-rw-r--r--.pullapprove.yml9
-rw-r--r--CODE-OF-CONDUCT.md3
-rw-r--r--CONTRIBUTING.md144
-rw-r--r--LICENSE189
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile92
-rw-r--r--README.md82
-rw-r--r--SECURITY.md3
-rwxr-xr-xcontrib/cirrus/runner.sh98
-rw-r--r--copy/blob.go187
-rw-r--r--copy/blob_test.go76
-rw-r--r--copy/compression.go355
-rw-r--r--copy/copy.go394
-rw-r--r--copy/digesting_reader.go62
-rw-r--r--copy/digesting_reader_test.go77
-rw-r--r--copy/encryption.go134
l---------copy/fixtures/Hello.bz21
l---------copy/fixtures/Hello.gz1
-rw-r--r--copy/fixtures/Hello.stdbin0 -> 18 bytes
l---------copy/fixtures/Hello.uncompressed1
l---------copy/fixtures/Hello.xz1
-rw-r--r--copy/fixtures/Hello.zstbin0 -> 18 bytes
-rw-r--r--copy/manifest.go224
-rw-r--r--copy/manifest_test.go484
-rw-r--r--copy/multiple.go351
-rw-r--r--copy/multiple_test.go162
-rw-r--r--copy/progress_bars.go160
-rw-r--r--copy/progress_channel.go79
-rw-r--r--copy/progress_channel_test.go80
-rw-r--r--copy/sign.go115
-rw-r--r--copy/sign_test.go163
-rw-r--r--copy/single.go907
-rw-r--r--copy/single_test.go141
-rw-r--r--directory/directory_dest.go284
-rw-r--r--directory/directory_src.go102
-rw-r--r--directory/directory_test.go209
-rw-r--r--directory/directory_transport.go188
-rw-r--r--directory/directory_transport_test.go230
-rw-r--r--directory/explicitfilepath/path.go55
-rw-r--r--directory/explicitfilepath/path_test.go172
-rw-r--r--doc.go70
-rw-r--r--docker/archive/dest.go82
-rw-r--r--docker/archive/dest_test.go5
-rw-r--r--docker/archive/fixtures/almostempty.tarbin0 -> 10752 bytes
-rw-r--r--docker/archive/reader.go121
-rw-r--r--docker/archive/src.go41
-rw-r--r--docker/archive/src_test.go5
-rw-r--r--docker/archive/transport.go206
-rw-r--r--docker/archive/transport_test.go284
-rw-r--r--docker/archive/writer.go103
-rw-r--r--docker/body_reader.go253
-rw-r--r--docker/body_reader_test.go196
-rw-r--r--docker/cache.go23
-rw-r--r--docker/daemon/client.go96
-rw-r--r--docker/daemon/client_test.go108
-rw-r--r--docker/daemon/daemon_dest.go186
-rw-r--r--docker/daemon/daemon_dest_test.go5
-rw-r--r--docker/daemon/daemon_src.go56
-rw-r--r--docker/daemon/daemon_src_test.go5
-rw-r--r--docker/daemon/daemon_transport.go219
-rw-r--r--docker/daemon/daemon_transport_test.go242
-rw-r--r--docker/daemon/testdata/certs/ca.pem18
-rw-r--r--docker/daemon/testdata/certs/cert.pem18
-rw-r--r--docker/daemon/testdata/certs/key.pem27
-rw-r--r--docker/distribution_error.go148
-rw-r--r--docker/distribution_error_test.go113
-rw-r--r--docker/docker_client.go1166
-rw-r--r--docker/docker_client_test.go412
-rw-r--r--docker/docker_image.go166
-rw-r--r--docker/docker_image_dest.go919
-rw-r--r--docker/docker_image_dest_test.go36
-rw-r--r--docker/docker_image_src.go798
-rw-r--r--docker/docker_image_src_test.go213
-rw-r--r--docker/docker_transport.go215
-rw-r--r--docker/docker_transport_test.go289
-rw-r--r--docker/errors.go101
-rw-r--r--docker/errors_test.go200
-rw-r--r--docker/fixtures/registries.d/emptyConfig.yaml1
-rw-r--r--docker/fixtures/registries.d/internal-example.com.yaml18
-rw-r--r--docker/fixtures/registries.d/internet-user.yaml12
-rw-r--r--docker/fixtures/registries.d/invalid-but.notyaml1
-rw-r--r--docker/internal/tarfile/dest.go173
-rw-r--r--docker/internal/tarfile/reader.go273
-rw-r--r--docker/internal/tarfile/src.go319
-rw-r--r--docker/internal/tarfile/src_test.go66
-rw-r--r--docker/internal/tarfile/types.go28
-rw-r--r--docker/internal/tarfile/writer.go379
-rw-r--r--docker/paths_common.go6
-rw-r--r--docker/paths_freebsd.go6
-rw-r--r--docker/policyconfiguration/naming.go78
-rw-r--r--docker/policyconfiguration/naming_test.go86
-rw-r--r--docker/reference/README.md2
-rw-r--r--docker/reference/helpers.go42
-rw-r--r--docker/reference/normalize.go181
-rw-r--r--docker/reference/normalize_test.go652
-rw-r--r--docker/reference/reference.go433
-rw-r--r--docker/reference/reference_test.go657
-rw-r--r--docker/reference/regexp-additions.go6
-rw-r--r--docker/reference/regexp.go156
-rw-r--r--docker/reference/regexp_test.go525
-rw-r--r--docker/registries_d.go293
-rw-r--r--docker/registries_d_test.go336
-rw-r--r--docker/tarfile/dest.go119
-rw-r--r--docker/tarfile/doc.go3
-rw-r--r--docker/tarfile/src.go104
-rw-r--r--docker/tarfile/types.go8
-rw-r--r--docker/wwwauthenticate.go172
-rw-r--r--docker/wwwauthenticate_test.go45
-rw-r--r--docs/atomic-signature-embedded-json.json66
-rw-r--r--docs/containers-auth.json.5.md105
-rw-r--r--docs/containers-certs.d.5.md28
-rw-r--r--docs/containers-policy.json.5.md493
-rw-r--r--docs/containers-registries.conf.5.md323
-rw-r--r--docs/containers-registries.conf.d.5.md37
-rw-r--r--docs/containers-registries.d.5.md140
-rw-r--r--docs/containers-signature.5.md246
-rw-r--r--docs/containers-sigstore-signing-params.yaml.5.md117
-rw-r--r--docs/containers-transports.5.md138
-rw-r--r--docs/signature-protocols.md136
-rw-r--r--go.mod144
-rw-r--r--go.sum644
-rwxr-xr-xhack/get_ci_vm.sh62
-rwxr-xr-xhack/validate.sh25
-rw-r--r--image/docker_schema2.go14
-rw-r--r--image/sourced.go37
-rw-r--r--image/unparsed.go41
-rw-r--r--internal/blobinfocache/blobinfocache.go70
-rw-r--r--internal/blobinfocache/types.go53
-rw-r--r--internal/image/common_test.go53
-rw-r--r--internal/image/docker_list.go34
-rw-r--r--internal/image/docker_schema1.go257
-rw-r--r--internal/image/docker_schema1_test.go722
-rw-r--r--internal/image/docker_schema2.go413
-rw-r--r--internal/image/docker_schema2_test.go726
-rw-r--r--internal/image/fixtures/oci1-all-media-types-config.json161
-rw-r--r--internal/image/fixtures/oci1-all-media-types-to-schema2-config.json161
-rw-r--r--internal/image/fixtures/oci1-all-media-types-to-schema2.json41
-rw-r--r--internal/image/fixtures/oci1-all-media-types.json41
-rw-r--r--internal/image/fixtures/oci1-artifact.json43
-rw-r--r--internal/image/fixtures/oci1-config-extra-fields.json158
-rw-r--r--internal/image/fixtures/oci1-config.json1
-rw-r--r--internal/image/fixtures/oci1-extra-config-fields.json43
-rw-r--r--internal/image/fixtures/oci1-invalid-media-type.json15
-rw-r--r--internal/image/fixtures/oci1-to-schema1.json1
-rw-r--r--internal/image/fixtures/oci1-to-schema2-config.json1
-rw-r--r--internal/image/fixtures/oci1-to-schema2.json37
-rw-r--r--internal/image/fixtures/oci1.encrypted.json43
-rw-r--r--internal/image/fixtures/oci1.json43
-rw-r--r--internal/image/fixtures/schema1-for-oci-config.json29
-rw-r--r--internal/image/fixtures/schema1-to-oci1-config.json82
-rw-r--r--internal/image/fixtures/schema1-to-oci1.json41
-rw-r--r--internal/image/fixtures/schema1-to-schema2-config.json163
-rw-r--r--internal/image/fixtures/schema1-to-schema2.json41
-rw-r--r--internal/image/fixtures/schema1.json62
-rw-r--r--internal/image/fixtures/schema2-all-media-types-to-oci1.json36
-rw-r--r--internal/image/fixtures/schema2-all-media-types.json36
-rw-r--r--internal/image/fixtures/schema2-config.json1
-rw-r--r--internal/image/fixtures/schema2-invalid-media-type.json36
-rw-r--r--internal/image/fixtures/schema2-to-oci1-config.json105
-rw-r--r--internal/image/fixtures/schema2-to-oci1.json30
-rw-r--r--internal/image/fixtures/schema2-to-schema1-by-docker.json116
-rw-r--r--internal/image/fixtures/schema2.json36
-rw-r--r--internal/image/manifest.go121
-rw-r--r--internal/image/manifest_test.go71
-rw-r--r--internal/image/memory.go64
-rw-r--r--internal/image/oci.go336
-rw-r--r--internal/image/oci_index.go34
-rw-r--r--internal/image/oci_test.go891
-rw-r--r--internal/image/sourced.go134
-rw-r--r--internal/image/unparsed.go119
-rw-r--r--internal/imagedestination/impl/compat.go101
-rw-r--r--internal/imagedestination/impl/helpers.go25
-rw-r--r--internal/imagedestination/impl/helpers_test.go29
-rw-r--r--internal/imagedestination/impl/properties.go72
-rw-r--r--internal/imagedestination/stubs/put_blob_partial.go52
-rw-r--r--internal/imagedestination/stubs/signatures.go50
-rw-r--r--internal/imagedestination/stubs/stubs.go27
-rw-r--r--internal/imagedestination/wrapper.go96
-rw-r--r--internal/imagesource/impl/compat.go55
-rw-r--r--internal/imagesource/impl/layer_infos.go23
-rw-r--r--internal/imagesource/impl/properties.go27
-rw-r--r--internal/imagesource/impl/signatures.go19
-rw-r--r--internal/imagesource/stubs/get_blob_at.go52
-rw-r--r--internal/imagesource/stubs/stubs.go28
-rw-r--r--internal/imagesource/wrapper.go56
-rw-r--r--internal/iolimits/iolimits.go58
-rw-r--r--internal/iolimits/iolimits_test.go37
-rw-r--r--internal/manifest/common.go72
-rw-r--r--internal/manifest/common_test.go91
-rw-r--r--internal/manifest/docker_schema2.go15
-rw-r--r--internal/manifest/docker_schema2_list.go314
-rw-r--r--internal/manifest/docker_schema2_list_test.go109
-rw-r--r--internal/manifest/errors.go56
-rw-r--r--internal/manifest/list.go131
-rw-r--r--internal/manifest/list_test.go161
-rw-r--r--internal/manifest/manifest.go167
-rw-r--r--internal/manifest/manifest_test.go134
-rw-r--r--internal/manifest/oci_index.go446
-rw-r--r--internal/manifest/oci_index_test.go265
-rw-r--r--internal/manifest/testdata/non-json.manifest.jsonbin0 -> 411 bytes
-rw-r--r--internal/manifest/testdata/oci1.index.zstd-selection.json66
-rw-r--r--internal/manifest/testdata/oci1.index.zstd-selection2.json96
-rw-r--r--internal/manifest/testdata/oci1index.json31
-rw-r--r--internal/manifest/testdata/ocilist-variants.json67
-rw-r--r--internal/manifest/testdata/ociv1.artifact.json10
-rw-r--r--internal/manifest/testdata/ociv1.image.index.json31
-rw-r--r--internal/manifest/testdata/ociv1.manifest.json30
-rw-r--r--internal/manifest/testdata/ociv1nomime.artifact.json9
-rw-r--r--internal/manifest/testdata/ociv1nomime.image.index.json30
-rw-r--r--internal/manifest/testdata/ociv1nomime.manifest.json29
l---------internal/manifest/testdata/schema2-to-schema1-by-docker.json1
-rw-r--r--internal/manifest/testdata/schema2list-variants.json44
-rw-r--r--internal/manifest/testdata/schema2list.json72
-rw-r--r--internal/manifest/testdata/unknown-version.manifest.json5
-rw-r--r--internal/manifest/testdata/v2list.manifest.json56
-rw-r--r--internal/manifest/testdata/v2s1-invalid-signatures.manifest.json11
-rw-r--r--internal/manifest/testdata/v2s1-unsigned.manifest.json28
-rw-r--r--internal/manifest/testdata/v2s1.manifest.json44
-rw-r--r--internal/manifest/testdata/v2s2.manifest.json26
-rw-r--r--internal/manifest/testdata/v2s2nomime.manifest.json10
-rw-r--r--internal/manifest/testdata_info_test.go12
-rw-r--r--internal/pkg/platform/platform_matcher.go197
-rw-r--r--internal/pkg/platform/platform_matcher_test.go61
-rw-r--r--internal/private/private.go164
-rw-r--r--internal/putblobdigest/put_blob_digest.go57
-rw-r--r--internal/putblobdigest/put_blob_digest_test.go74
-rw-r--r--internal/rootless/rootless.go25
-rw-r--r--internal/set/set.go52
-rw-r--r--internal/set/set_test.go77
-rw-r--r--internal/signature/signature.go102
-rw-r--r--internal/signature/signature_test.go94
-rw-r--r--internal/signature/sigstore.go87
-rw-r--r--internal/signature/sigstore_test.go71
-rw-r--r--internal/signature/simple.go29
-rw-r--r--internal/signature/simple_test.go36
l---------internal/signature/testdata/simple.signature1
-rw-r--r--internal/signer/signer.go47
-rw-r--r--internal/signer/signer_test.go87
-rw-r--r--internal/streamdigest/fixtures/Hello.uncompressed1
-rw-r--r--internal/streamdigest/stream_digest.go40
-rw-r--r--internal/streamdigest/stream_digest_test.go36
-rw-r--r--internal/testing/explicitfilepath-tmpdir/tmpdir.go29
-rw-r--r--internal/testing/gpgagent/gpg_agent.go16
-rw-r--r--internal/testing/mocks/image_reference.go56
-rw-r--r--internal/testing/mocks/image_source.go47
-rw-r--r--internal/testing/mocks/image_transport.go24
-rw-r--r--internal/testing/mocks/unparsed_image.go31
-rw-r--r--internal/tmpdir/tmpdir.go44
-rw-r--r--internal/tmpdir/tmpdir_test.go54
-rw-r--r--internal/unparsedimage/wrapper.go38
-rw-r--r--internal/uploadreader/upload_reader.go61
-rw-r--r--internal/uploadreader/upload_reader_test.go34
-rw-r--r--internal/useragent/useragent.go6
-rw-r--r--manifest/common.go152
-rw-r--r--manifest/common_test.go328
-rw-r--r--manifest/docker_schema1.go331
-rw-r--r--manifest/docker_schema1_test.go273
-rw-r--r--manifest/docker_schema2.go306
-rw-r--r--manifest/docker_schema2_list.go32
-rw-r--r--manifest/docker_schema2_list_test.go28
-rw-r--r--manifest/docker_schema2_test.go283
l---------manifest/fixtures/non-json.manifest.json1
l---------manifest/fixtures/ociv1.artifact.json1
-rw-r--r--manifest/fixtures/ociv1.encrypted.manifest.json39
l---------manifest/fixtures/ociv1.image.index.json1
-rw-r--r--manifest/fixtures/ociv1.invalid.mediatype.manifest.json29
l---------manifest/fixtures/ociv1.manifest.json1
-rw-r--r--manifest/fixtures/ociv1.nondistributable.gzip.manifest.json20
-rw-r--r--manifest/fixtures/ociv1.nondistributable.manifest.json20
-rw-r--r--manifest/fixtures/ociv1.nondistributable.zstd.manifest.json20
-rw-r--r--manifest/fixtures/ociv1.uncompressed.manifest.json30
-rw-r--r--manifest/fixtures/ociv1.zstd.manifest.json30
l---------manifest/fixtures/ociv1nomime.artifact.json1
l---------manifest/fixtures/ociv1nomime.image.index.json1
l---------manifest/fixtures/ociv1nomime.manifest.json1
l---------manifest/fixtures/schema2-to-schema1-by-docker.json1
l---------manifest/fixtures/unknown-version.manifest.json1
l---------manifest/fixtures/v2list.manifest.json1
l---------manifest/fixtures/v2s1-invalid-signatures.manifest.json1
l---------manifest/fixtures/v2s1-unsigned.manifest.json1
l---------manifest/fixtures/v2s1.manifest.json1
l---------manifest/fixtures/v2s2.manifest.json1
-rw-r--r--manifest/fixtures/v2s2.nondistributable.gzip.manifest.json20
-rw-r--r--manifest/fixtures/v2s2.nondistributable.manifest.json20
-rw-r--r--manifest/fixtures/v2s2.uncompressed.manifest.json26
l---------manifest/fixtures/v2s2nomime.manifest.json1
-rw-r--r--manifest/fixtures_info_test.go12
-rw-r--r--manifest/list.go35
-rw-r--r--manifest/list_test.go141
-rw-r--r--manifest/manifest.go170
-rw-r--r--manifest/manifest_test.go169
-rw-r--r--manifest/oci.go274
-rw-r--r--manifest/oci_index.go27
-rw-r--r--manifest/oci_index_test.go28
-rw-r--r--manifest/oci_test.go406
-rw-r--r--oci/archive/oci_dest.go189
-rw-r--r--oci/archive/oci_dest_test.go5
-rw-r--r--oci/archive/oci_src.go174
-rw-r--r--oci/archive/oci_src_test.go26
-rw-r--r--oci/archive/oci_transport.go200
-rw-r--r--oci/archive/oci_transport_test.go274
-rw-r--r--oci/internal/oci_util.go121
-rw-r--r--oci/internal/oci_util_test.go63
-rw-r--r--oci/layout/fixtures/accepted_certs/cacert.crt14
-rw-r--r--oci/layout/fixtures/accepted_certs/cert.cert14
-rw-r--r--oci/layout/fixtures/accepted_certs/cert.key7
-rwxr-xr-xoci/layout/fixtures/accepted_certs/gencert.sh23
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf72813792094760221
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef16
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be16
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab8611
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee8081380516
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e44021
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f24
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe81
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad516
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe1
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a124
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b9221
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f1
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf016
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde1
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a1
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa70072421
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce316
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a1
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/index.json61
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/info.txt61
-rw-r--r--oci/layout/fixtures/delete_image_multiple_images/oci-layout1
-rw-r--r--oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc1
-rw-r--r--oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc1142330
-rw-r--r--oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a1816
-rw-r--r--oci/layout/fixtures/delete_image_only_one_image/index.json13
-rw-r--r--oci/layout/fixtures/delete_image_only_one_image/oci-layout1
-rw-r--r--oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc1142330
-rw-r--r--oci/layout/fixtures/delete_image_shared_blobs_dir/index.json13
-rw-r--r--oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout1
-rw-r--r--oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc1
-rw-r--r--oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a1816
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc1
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b316
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc1142330
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e4130
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed16
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d81
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/index.json21
-rw-r--r--oci/layout/fixtures/delete_image_two_identical_references/oci-layout1
-rw-r--r--oci/layout/fixtures/manifest/index.json17
-rw-r--r--oci/layout/fixtures/name_lookups/index.json38
-rw-r--r--oci/layout/fixtures/rejected_certs/cert.cert14
-rw-r--r--oci/layout/fixtures/rejected_certs/cert.key7
-rwxr-xr-xoci/layout/fixtures/rejected_certs/gencert.sh22
-rw-r--r--oci/layout/fixtures/two_images_manifest/index.json27
-rw-r--r--oci/layout/oci_delete.go240
-rw-r--r--oci/layout/oci_delete_test.go298
-rw-r--r--oci/layout/oci_dest.go328
-rw-r--r--oci/layout/oci_dest_test.go180
-rw-r--r--oci/layout/oci_src.go216
-rw-r--r--oci/layout/oci_src_test.go143
-rw-r--r--oci/layout/oci_transport.go260
-rw-r--r--oci/layout/oci_transport_test.go371
-rw-r--r--oci/oci.go1
-rw-r--r--openshift/openshift-copies.go1200
-rw-r--r--openshift/openshift-copies_test.go115
-rw-r--r--openshift/openshift.go226
-rw-r--r--openshift/openshift_dest.go248
-rw-r--r--openshift/openshift_dest_test.go5
-rw-r--r--openshift/openshift_src.go174
-rw-r--r--openshift/openshift_src_test.go5
-rw-r--r--openshift/openshift_transport.go153
-rw-r--r--openshift/openshift_transport_test.go130
-rw-r--r--openshift/testdata/admin.kubeconfig20
-rw-r--r--ostree/ostree_dest.go517
-rw-r--r--ostree/ostree_dest_test.go10
-rw-r--r--ostree/ostree_src.go450
-rw-r--r--ostree/ostree_src_test.go8
-rw-r--r--ostree/ostree_transport.go242
-rw-r--r--ostree/ostree_transport_test.go305
-rw-r--r--pkg/blobcache/blobcache.go148
-rw-r--r--pkg/blobcache/blobcache_test.go250
-rw-r--r--pkg/blobcache/dest.go295
-rw-r--r--pkg/blobcache/src.go270
-rw-r--r--pkg/blobcache/src_test.go56
-rw-r--r--pkg/blobinfocache/boltdb/boltdb.go419
-rw-r--r--pkg/blobinfocache/boltdb/boltdb_test.go26
-rw-r--r--pkg/blobinfocache/default.go76
-rw-r--r--pkg/blobinfocache/default_test.go134
-rw-r--r--pkg/blobinfocache/internal/prioritize/prioritize.go139
-rw-r--r--pkg/blobinfocache/internal/prioritize/prioritize_test.go183
-rw-r--r--pkg/blobinfocache/internal/test/test.go389
-rw-r--r--pkg/blobinfocache/memory/memory.go216
-rw-r--r--pkg/blobinfocache/memory/memory_test.go18
-rw-r--r--pkg/blobinfocache/none/none.go50
-rw-r--r--pkg/blobinfocache/none/none_test.go7
-rw-r--r--pkg/blobinfocache/sqlite/sqlite.go575
-rw-r--r--pkg/blobinfocache/sqlite/sqlite_test.go25
-rw-r--r--pkg/cli/environment/environment.go31
-rw-r--r--pkg/cli/passphrase.go36
-rw-r--r--pkg/cli/sigstore/params/sigstore.go75
-rw-r--r--pkg/cli/sigstore/sigstore.go117
-rw-r--r--pkg/compression/compression.go165
-rw-r--r--pkg/compression/compression_test.go128
-rw-r--r--pkg/compression/fixtures/Hello.bz2bin0 -> 43 bytes
-rw-r--r--pkg/compression/fixtures/Hello.gzbin0 -> 25 bytes
-rw-r--r--pkg/compression/fixtures/Hello.uncompressed1
-rw-r--r--pkg/compression/fixtures/Hello.xzbin0 -> 64 bytes
-rw-r--r--pkg/compression/fixtures/Hello.zstbin0 -> 18 bytes
-rw-r--r--pkg/compression/internal/types.go65
-rw-r--r--pkg/compression/types/types.go41
-rw-r--r--pkg/compression/zstd.go59
-rw-r--r--pkg/docker/config/config.go943
-rw-r--r--pkg/docker/config/config_test.go1059
-rw-r--r--pkg/docker/config/testdata/abnormal.json22
-rw-r--r--pkg/docker/config/testdata/config.json5
-rw-r--r--pkg/docker/config/testdata/cred-helper-with-auth-files.conf1
-rw-r--r--pkg/docker/config/testdata/cred-helper.conf1
-rwxr-xr-xpkg/docker/config/testdata/docker-credential-helper-registry27
-rw-r--r--pkg/docker/config/testdata/empty.json1
-rw-r--r--pkg/docker/config/testdata/example.json7
-rw-r--r--pkg/docker/config/testdata/example_identitytoken.json8
-rw-r--r--pkg/docker/config/testdata/full.json28
-rw-r--r--pkg/docker/config/testdata/legacy.json8
-rw-r--r--pkg/docker/config/testdata/refpath.json10
-rw-r--r--pkg/shortnames/shortnames.go476
-rw-r--r--pkg/shortnames/shortnames_test.go603
-rw-r--r--pkg/shortnames/testdata/aliases.conf7
-rw-r--r--pkg/shortnames/testdata/no-reg.conf2
-rw-r--r--pkg/shortnames/testdata/one-reg.conf4
-rw-r--r--pkg/shortnames/testdata/registries.conf.d/config-1.conf9
-rw-r--r--pkg/shortnames/testdata/registries.conf.d/config-2.conf14
-rw-r--r--pkg/shortnames/testdata/registries.conf.d/config-3.conf0
-rw-r--r--pkg/shortnames/testdata/registries.conf.d/config-3.ignore7
-rw-r--r--pkg/shortnames/testdata/two-reg.conf4
-rw-r--r--pkg/strslice/README.md1
-rw-r--r--pkg/strslice/strslice.go30
-rw-r--r--pkg/strslice/strslice_test.go86
-rw-r--r--pkg/sysregistriesv2/paths_common.go12
-rw-r--r--pkg/sysregistriesv2/paths_freebsd.go12
-rw-r--r--pkg/sysregistriesv2/shortnames.go350
-rw-r--r--pkg/sysregistriesv2/shortnames_test.go298
-rw-r--r--pkg/sysregistriesv2/system_registries_v2.go1056
-rw-r--r--pkg/sysregistriesv2/system_registries_v2_test.go975
-rw-r--r--pkg/sysregistriesv2/testdata/aliases.conf7
-rw-r--r--pkg/sysregistriesv2/testdata/base-for-registries.d.conf5
-rw-r--r--pkg/sysregistriesv2/testdata/blocked-conflicts.conf13
-rw-r--r--pkg/sysregistriesv2/testdata/cred-helper.conf10
-rw-r--r--pkg/sysregistriesv2/testdata/empty.conf0
-rw-r--r--pkg/sysregistriesv2/testdata/find-registry.conf62
-rw-r--r--pkg/sysregistriesv2/testdata/insecure-conflicts.conf13
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-aliases.conf3
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-config-level-mirror.conf11
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-conflict-mirror.conf12
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-prefix.conf18
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-search.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-short-name-mode.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/invalid-value-mirror.conf11
-rw-r--r--pkg/sysregistriesv2/testdata/invalidate-cache.conf23
-rw-r--r--pkg/sysregistriesv2/testdata/mirrors.conf13
-rw-r--r--pkg/sysregistriesv2/testdata/missing-mirror-location.conf10
-rw-r--r--pkg/sysregistriesv2/testdata/mixing-v1-v2-empty.conf10
-rw-r--r--pkg/sysregistriesv2/testdata/mixing-v1-v2.conf19
-rw-r--r--pkg/sysregistriesv2/testdata/pull-sources-from-reference.conf108
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d-empty-helpers/empty.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d-usr1/no-usr.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d-usr2/empty-usr.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/config-1.conf10
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/config-2.conf19
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/config-3.conf0
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/config-3.ignore7
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-1.conf3
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-2.conf3
-rw-r--r--pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-3.conf3
-rw-r--r--pkg/sysregistriesv2/testdata/try-update-cache-invalid.conf1
-rw-r--r--pkg/sysregistriesv2/testdata/try-update-cache-valid.conf2
-rw-r--r--pkg/sysregistriesv2/testdata/unmarshal.conf23
-rw-r--r--pkg/sysregistriesv2/testdata/unqualified-search.conf13
-rw-r--r--pkg/sysregistriesv2/testdata/v1-compatibility.conf8
-rw-r--r--pkg/sysregistriesv2/testdata/v1-invalid-block.conf8
-rw-r--r--pkg/sysregistriesv2/testdata/v1-invalid-insecure.conf8
-rw-r--r--pkg/sysregistriesv2/testdata/v1-invalid-search.conf8
-rw-r--r--pkg/tlsclientconfig/testdata/full/ca-cert-1.crt29
-rw-r--r--pkg/tlsclientconfig/testdata/full/ca-cert-2.crt29
-rw-r--r--pkg/tlsclientconfig/testdata/full/client-cert-1.cert29
-rw-r--r--pkg/tlsclientconfig/testdata/full/client-cert-1.key52
-rw-r--r--pkg/tlsclientconfig/testdata/full/client-cert-2.cert29
-rw-r--r--pkg/tlsclientconfig/testdata/full/client-cert-2.key52
l---------pkg/tlsclientconfig/testdata/missing-cert/client-cert-1.key1
l---------pkg/tlsclientconfig/testdata/missing-key/client-cert-1.cert1
l---------pkg/tlsclientconfig/testdata/unreadable-ca/unreadable.crt1
l---------pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.cert1
l---------pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.key1
l---------pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.cert1
l---------pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.key1
-rw-r--r--pkg/tlsclientconfig/tlsclientconfig.go103
-rw-r--r--pkg/tlsclientconfig/tlsclientconfig_test.go134
-rw-r--r--registries.conf77
-rw-r--r--sif/load.go210
-rw-r--r--sif/load_test.go58
-rw-r--r--sif/src.go206
-rw-r--r--sif/src_test.go5
-rw-r--r--sif/transport.go160
-rw-r--r--sif/transport_test.go175
-rw-r--r--signature/docker.go102
-rw-r--r--signature/docker_test.go236
-rw-r--r--signature/fixtures/.gitignore6
-rw-r--r--signature/fixtures/corrupt.signaturebin0 -> 412 bytes
-rw-r--r--signature/fixtures/corrupt.signature-v3bin0 -> 405 bytes
-rw-r--r--signature/fixtures/cosign.pub4
-rw-r--r--signature/fixtures/cosign2.pub4
-rw-r--r--signature/fixtures/dir-img-cosign-fulcio-rekor-valid/manifest.json16
-rw-r--r--signature/fixtures/dir-img-cosign-fulcio-rekor-valid/signature-1bin0 -> 5671 bytes
-rw-r--r--signature/fixtures/dir-img-cosign-key-rekor-valid/manifest.json16
-rw-r--r--signature/fixtures/dir-img-cosign-key-rekor-valid/signature-1bin0 -> 1645 bytes
l---------signature/fixtures/dir-img-cosign-manifest-digest-error/manifest.json1
l---------signature/fixtures/dir-img-cosign-manifest-digest-error/signature-11
l---------signature/fixtures/dir-img-cosign-mixed/manifest.json1
l---------signature/fixtures/dir-img-cosign-mixed/signature-11
l---------signature/fixtures/dir-img-cosign-mixed/signature-21
-rw-r--r--signature/fixtures/dir-img-cosign-modified-manifest/manifest.json17
l---------signature/fixtures/dir-img-cosign-modified-manifest/signature-11
l---------signature/fixtures/dir-img-cosign-no-manifest/signature-11
l---------signature/fixtures/dir-img-cosign-other-attachment/manifest.json1
-rw-r--r--signature/fixtures/dir-img-cosign-other-attachment/signature-1bin0 -> 278 bytes
l---------signature/fixtures/dir-img-cosign-valid-2/manifest.json1
l---------signature/fixtures/dir-img-cosign-valid-2/signature-11
-rw-r--r--signature/fixtures/dir-img-cosign-valid-2/signature-2bin0 -> 591 bytes
-rw-r--r--signature/fixtures/dir-img-cosign-valid-with-tag/manifest.json1
-rw-r--r--signature/fixtures/dir-img-cosign-valid-with-tag/signature-1bin0 -> 659 bytes
-rw-r--r--signature/fixtures/dir-img-cosign-valid/manifest.json1
-rw-r--r--signature/fixtures/dir-img-cosign-valid/signature-1bin0 -> 591 bytes
l---------signature/fixtures/dir-img-manifest-digest-error/manifest.json1
l---------signature/fixtures/dir-img-manifest-digest-error/signature-11
l---------signature/fixtures/dir-img-mixed/manifest.json1
l---------signature/fixtures/dir-img-mixed/signature-11
l---------signature/fixtures/dir-img-mixed/signature-21
-rw-r--r--signature/fixtures/dir-img-modified-manifest/manifest.json27
l---------signature/fixtures/dir-img-modified-manifest/signature-11
l---------signature/fixtures/dir-img-no-manifest/signature-11
l---------signature/fixtures/dir-img-unsigned/manifest.json1
l---------signature/fixtures/dir-img-valid-2/manifest.json1
l---------signature/fixtures/dir-img-valid-2/signature-11
-rw-r--r--signature/fixtures/dir-img-valid-2/signature-2bin0 -> 425 bytes
l---------signature/fixtures/dir-img-valid/manifest.json1
-rw-r--r--signature/fixtures/dir-img-valid/signature-1bin0 -> 427 bytes
-rw-r--r--signature/fixtures/double.signaturebin0 -> 822 bytes
-rw-r--r--signature/fixtures/expired.signaturebin0 -> 217 bytes
-rw-r--r--signature/fixtures/fulcio-cert17
-rw-r--r--signature/fixtures/fulcio-chain27
-rw-r--r--signature/fixtures/fulcio_v1.crt.pem13
-rw-r--r--signature/fixtures/image.manifest.json26
-rw-r--r--signature/fixtures/image.signaturebin0 -> 411 bytes
-rw-r--r--signature/fixtures/invalid-blob.signaturebin0 -> 199 bytes
-rw-r--r--signature/fixtures/invalid-blob.signature-v3bin0 -> 194 bytes
-rw-r--r--signature/fixtures/invalid-reference.signaturebin0 -> 422 bytes
-rw-r--r--signature/fixtures/no-optional-fields.signaturebin0 -> 383 bytes
-rw-r--r--signature/fixtures/policy.json160
-rw-r--r--signature/fixtures/public-key-1.gpgbin0 -> 653 bytes
-rw-r--r--signature/fixtures/public-key-2.gpgbin0 -> 737 bytes
-rw-r--r--signature/fixtures/public-key.gpg19
-rw-r--r--signature/fixtures/pubring.gpgbin0 -> 1478 bytes
l---------signature/fixtures/rekor-payload1
l---------signature/fixtures/rekor-set1
l---------signature/fixtures/rekor-sig1
l---------signature/fixtures/rekor.pub1
-rw-r--r--signature/fixtures/secring.gpgbin0 -> 2809 bytes
-rw-r--r--signature/fixtures/some-rsa-key.pub14
-rw-r--r--signature/fixtures/trustdb.gpgbin0 -> 1440 bytes
-rw-r--r--signature/fixtures/unknown-cosign-key.signaturebin0 -> 592 bytes
-rw-r--r--signature/fixtures/unknown-key.signaturebin0 -> 560 bytes
-rw-r--r--signature/fixtures/unknown-key.signature-v3bin0 -> 549 bytes
-rw-r--r--signature/fixtures/unsigned-encrypted.signaturebin0 -> 233 bytes
-rw-r--r--signature/fixtures/unsigned-literal.signaturebin0 -> 47 bytes
-rw-r--r--signature/fixtures/v2s1-invalid-signatures.manifest.json11
-rw-r--r--signature/fixtures_info_test.go29
-rw-r--r--signature/fulcio_cert.go204
-rw-r--r--signature/fulcio_cert_test.go476
-rw-r--r--signature/internal/errors.go15
-rw-r--r--signature/internal/errors_test.go14
-rw-r--r--signature/internal/fixtures_info_test.go15
-rw-r--r--signature/internal/json.go90
-rw-r--r--signature/internal/json_test.go138
-rw-r--r--signature/internal/rekor_set.go237
-rw-r--r--signature/internal/rekor_set_test.go400
-rw-r--r--signature/internal/sigstore_payload.go202
-rw-r--r--signature/internal/sigstore_payload_test.go329
l---------signature/internal/testdata/cosign.pub1
-rw-r--r--signature/internal/testdata/rekor-cert17
-rw-r--r--signature/internal/testdata/rekor-payload1
-rw-r--r--signature/internal/testdata/rekor-set1
-rw-r--r--signature/internal/testdata/rekor-sig1
-rw-r--r--signature/internal/testdata/rekor.pub4
l---------signature/internal/testdata/valid.signature1
-rw-r--r--signature/mechanism.go97
-rw-r--r--signature/mechanism_gpgme.go207
-rw-r--r--signature/mechanism_gpgme_test.go49
-rw-r--r--signature/mechanism_openpgp.go179
-rw-r--r--signature/mechanism_openpgp_test.go29
-rw-r--r--signature/mechanism_test.go330
-rw-r--r--signature/policy_config.go799
-rw-r--r--signature/policy_config_sigstore.go343
-rw-r--r--signature/policy_config_sigstore_test.go502
-rw-r--r--signature/policy_config_test.go1341
-rw-r--r--signature/policy_eval.go293
-rw-r--r--signature/policy_eval_baselayer.go20
-rw-r--r--signature/policy_eval_baselayer_test.go25
-rw-r--r--signature/policy_eval_signedby.go145
-rw-r--r--signature/policy_eval_signedby_test.go280
-rw-r--r--signature/policy_eval_sigstore.go281
-rw-r--r--signature/policy_eval_sigstore_test.go681
-rw-r--r--signature/policy_eval_simple.go29
-rw-r--r--signature/policy_eval_simple_test.go57
-rw-r--r--signature/policy_eval_test.go499
-rw-r--r--signature/policy_paths_common.go8
-rw-r--r--signature/policy_paths_freebsd.go8
-rw-r--r--signature/policy_reference_match.go154
-rw-r--r--signature/policy_reference_match_test.go556
-rw-r--r--signature/policy_types.go215
-rw-r--r--signature/signer/signer.go9
-rw-r--r--signature/sigstore/copied.go103
-rw-r--r--signature/sigstore/fulcio/fulcio.go155
-rw-r--r--signature/sigstore/generate.go35
-rw-r--r--signature/sigstore/generate_test.go64
-rw-r--r--signature/sigstore/internal/signer.go95
-rw-r--r--signature/sigstore/rekor/leveled_logger.go52
-rw-r--r--signature/sigstore/rekor/rekor.go160
-rw-r--r--signature/sigstore/signer.go60
-rw-r--r--signature/simple.go283
-rw-r--r--signature/simple_test.go405
-rw-r--r--signature/simplesigning/signer.go105
-rw-r--r--signature/simplesigning/signer_test.go235
-rw-r--r--signature/simplesigning/testdata/.gitignore6
l---------signature/simplesigning/testdata/pubring.gpg1
l---------signature/simplesigning/testdata/secring.gpg1
l---------signature/simplesigning/testdata/trustdb.gpg1
-rw-r--r--storage/storage_dest.go958
-rw-r--r--storage/storage_image.go59
-rw-r--r--storage/storage_reference.go316
-rw-r--r--storage/storage_reference_test.go196
-rw-r--r--storage/storage_src.go403
-rw-r--r--storage/storage_src_test.go43
-rw-r--r--storage/storage_test.go722
-rw-r--r--storage/storage_transport.go416
-rw-r--r--storage/storage_transport_test.go187
-rw-r--r--tarball/doc.go61
-rw-r--r--tarball/tarball_reference.go82
-rw-r--r--tarball/tarball_src.go234
-rw-r--r--tarball/tarball_src_test.go5
-rw-r--r--tarball/tarball_transport.go75
-rw-r--r--transports/alltransports/alltransports.go49
-rw-r--r--transports/alltransports/alltransports_test.go63
-rw-r--r--transports/alltransports/docker_daemon.go9
-rw-r--r--transports/alltransports/docker_daemon_stub.go10
-rw-r--r--transports/alltransports/ostree.go9
-rw-r--r--transports/alltransports/ostree_stub.go10
-rw-r--r--transports/alltransports/storage.go9
-rw-r--r--transports/alltransports/storage_stub.go10
-rw-r--r--transports/stub.go36
-rw-r--r--transports/stub_test.go18
-rw-r--r--transports/transports.go90
-rw-r--r--types/types.go717
-rw-r--r--version/version.go18
667 files changed, 71743 insertions, 0 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
new file mode 100644
index 0000000..e1bd7ad
--- /dev/null
+++ b/.cirrus.yml
@@ -0,0 +1,173 @@
+---
+
+# Main collection of env. vars to set for all tasks and scripts.
+env:
+ ####
+ #### Global variables used for all tasks
+ ####
+ # Name of the ultimate destination branch for this CI run
+ DEST_BRANCH: "release-5.29"
+ # c/skopeo branch name which must work with this c/image branch
+ SKOPEO_CI_BRANCH: "release-1.14"
+ # Use GO module mirror (reason unknown, travis did it this way)
+ GOPROXY: https://proxy.golang.org
+ # Overrides default location (/tmp/cirrus) for repo clone
+ GOPATH: "/var/tmp/go"
+ GOBIN: "${GOPATH}/bin"
+ GOCACHE: "${GOPATH}/cache"
+ GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/image"
+ # Required for consistency with containers/skopeo CI
+ SKOPEO_PATH: "${GOPATH}/src/github.com/containers/skopeo"
+ CIRRUS_WORKING_DIR: *gosrc
+ # The default is 'sh' if unspecified
+ CIRRUS_SHELL: "/usr/bin/bash"
+ # Save a little typing (path relative to $CIRRUS_WORKING_DIR)
+ SCRIPT_BASE: "./contrib/cirrus"
+
+ # Normally set to an empty-string.
+ # Temporarily set this to a containers/skopeo PR number to
+ # test changes made there, in a PR in this repository.
+ SKOPEO_PR:
+
+ # Google-cloud VM Images
+ IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
+ DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
+
+ # Container FQIN's (include bleeding-edge development-level container deps.)
+ FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
+ # Built along with the standard PR-based workflow in c/automation_images
+ SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
+
+
+gcp_credentials: ENCRYPTED[38c860dd789c68bd4f38b24d4fa5ddb525346f7ebe02c8bc91532d625f033cb357f9b4a22f09a8299c92bfdad7556ae5]
+
+
+validate_task:
+ # The git-validation tool doesn't work well on branch or tag push,
+ # under Cirrus-CI, due to challenges obtaining the starting commit ID.
+ # Only do validation for PRs.
+ only_if: $CIRRUS_PR != ''
+ gce_instance: &debian_vm
+ image_project: libpod-218412
+ zone: "us-central1-f"
+ cpu: 2
+ memory: "4Gb"
+ # Required to be 200gig, do not modify - has i/o performance impact
+ # according to gcloud CLI tool warning messages.
+ disk: 200
+ image_name: ${DEBIAN_CACHE_IMAGE_NAME}
+ env:
+ HOME: "/root" # default unset, needed by golangci-lint.
+ script: |
+ git remote update
+ make tools
+ ${GOBIN}/git-validation -q -run DCO,short-subject,dangling-whitespace -range $(git merge-base origin/${DEST_BRANCH} HEAD)..${CIRRUS_CHANGE_IN_REPO}
+ make validate
+
+
+cross_task:
+ only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+ gce_instance: *debian_vm
+ script: make cross
+
+
+test_task:
+ alias: test
+ depends_on:
+ - validate
+ only_if: *not_docs
+ gce_instance: *debian_vm
+ matrix:
+ - name: "Test"
+ env:
+ BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
+ - name: "Test w/ opengpg"
+ env:
+ BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
+ script: ${GOSRC}/${SCRIPT_BASE}/runner.sh image_tests
+
+
+#####
+##### NOTE: This task is subtantially duplicated in the containers/skopeo
+##### repository's `.cirrus.yml`. Changes made here should be fully merged
+##### prior to being manually duplicated and maintained in containers/skopeo.
+#####
+test_skopeo_task:
+ alias: test_skopeo
+ only_if: *not_docs
+ depends_on:
+ - validate
+ gce_instance:
+ image_project: libpod-218412
+ zone: "us-central1-f"
+ cpu: 2
+ memory: "4Gb"
+ # Required to be 200gig, do not modify - has i/o performance impact
+ # according to gcloud CLI tool warning messages.
+ disk: 200
+ image_name: ${FEDORA_CACHE_IMAGE_NAME}
+ matrix:
+ - name: "Skopeo Test"
+ env:
+ BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
+ - name: "Skopeo Test w/ opengpg"
+ env:
+ BUILDTAGS: *withopengpg
+ setup_script: >-
+ "${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
+ vendor_script: >-
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
+ build_script: >-
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
+ unit_script: >-
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
+ integration_script: >-
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
+ system_script: >
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
+
+
+# This task is critical. It updates the "last-used by" timestamp stored
+# in metadata for all VM images. This mechanism functions in tandem with
+# an out-of-band pruning operation to remove disused VM images.
+meta_task:
+ name: "VM img. keepalive"
+ alias: meta
+ container: &smallcontainer
+ cpu: 2
+ memory: 2
+ image: quay.io/libpod/imgts:latest
+ env:
+ # Space-separated list of images used by this repository state
+ IMGNAMES: |
+ ${FEDORA_CACHE_IMAGE_NAME}
+ ${DEBIAN_CACHE_IMAGE_NAME}
+ BUILDID: "${CIRRUS_BUILD_ID}"
+ REPOREF: "${CIRRUS_REPO_NAME}"
+ GCPJSON: ENCRYPTED[04306103eee1933f87deb8a5af6514a7e3164aa589d6079abc0451eb2360879430ed020d6e025ca64ef667138ce9d786]
+ GCPNAME: ENCRYPTED[574c8afac5115af72e6722d7c1d1c7f9fca7a5586f3caad45251c1745d9b82d3c012b5e2f914e19fca0de56ce2c10f5d]
+ GCPPROJECT: libpod-218412
+ clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
+ script: /usr/local/bin/entrypoint.sh
+
+
+# Status aggregator for all tests. This task simply ensures a defined
+# set of tasks all passed, and allows confirming that based on the status
+# of this task.
+success_task:
+ name: "Total Success"
+ alias: success
+ # N/B: ALL tasks must be listed here, minus their '_task' suffix.
+ depends_on:
+ - validate
+ - cross
+ - test
+ - test_skopeo
+ - meta
+ container: *smallcontainer
+ env:
+ CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
+ TEST_ENVIRON: container
+ clone_script: *noop
+ script: /bin/true
diff --git a/.github/renovate.json5 b/.github/renovate.json5
new file mode 100644
index 0000000..22f80d3
--- /dev/null
+++ b/.github/renovate.json5
@@ -0,0 +1,55 @@
+/*
+ Renovate is a service similar to GitHub Dependabot, but with
+ (fantastically) more configuration options. So many options
+ in fact, if you're new I recommend glossing over this cheat-sheet
+ prior to the official documentation:
+
+ https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
+
+ Configuration Update/Change Procedure:
+ 1. Make changes
+ 2. Manually validate changes (from repo-root):
+
+ podman run -it \
+ -v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
+ docker.io/renovate/renovate:latest \
+ renovate-config-validator
+ 3. Commit.
+
+ Configuration Reference:
+ https://docs.renovatebot.com/configuration-options/
+
+ Monitoring Dashboard:
+ https://app.renovatebot.com/dashboard#github/containers
+
+ Note: The Renovate bot will create/manage it's business on
+ branches named 'renovate/*'. Otherwise, and by
+ default, the only the copy of this file that matters
+ is the one on the `main` branch. No other branches
+ will be monitored or touched in any way.
+*/
+
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+
+ /*************************************************
+ ****** Global/general configuration options *****
+ *************************************************/
+
+ // Re-use predefined sets of configuration options to DRY
+ "extends": [
+ // https://github.com/containers/automation/blob/main/renovate/defaults.json5
+ "github>containers/automation//renovate/defaults.json5"
+ ],
+
+ // Permit automatic rebasing when base-branch changes by more than
+ // one commit.
+ "rebaseWhen": "behind-base-branch",
+
+ /*************************************************
+ *** Repository-specific configuration options ***
+ *************************************************/
+
+ // Don't leave dep. update. PRs "hanging", assign them to people.
+ "assignees": ["containers/image-maintainers"],
+}
diff --git a/.github/workflows/check_cirrus_cron.yml b/.github/workflows/check_cirrus_cron.yml
new file mode 100644
index 0000000..25bdf0b
--- /dev/null
+++ b/.github/workflows/check_cirrus_cron.yml
@@ -0,0 +1,20 @@
+---
+
+# See also:
+# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
+
+on:
+ # Note: This only applies to the default branch.
+ schedule:
+ # N/B: This should correspond to a period slightly after
+ # the last job finishes running. See job defs. at:
+ # https://cirrus-ci.com/settings/repository/6429752730058752
+ - cron: '03 03 * * 1-5'
+ # Debug: Allow triggering job manually in github-actions WebUI
+ workflow_dispatch: {}
+
+jobs:
+ # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+ call_cron_failures:
+ uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
+ secrets: inherit
diff --git a/.github/workflows/rerun_cirrus_cron.yml b/.github/workflows/rerun_cirrus_cron.yml
new file mode 100644
index 0000000..67bc9fe
--- /dev/null
+++ b/.github/workflows/rerun_cirrus_cron.yml
@@ -0,0 +1,19 @@
+---
+
+# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
+
+on:
+ # Note: This only applies to the default branch.
+ schedule:
+ # N/B: This should correspond to a period slightly after
+ # the last job finishes running. See job defs. at:
+ # https://cirrus-ci.com/settings/repository/6429752730058752
+ - cron: '01 01 * * 1-5'
+ # Debug: Allow triggering job manually in github-actions WebUI
+ workflow_dispatch: {}
+
+jobs:
+ # Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+ call_cron_rerun:
+ uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
+ secrets: inherit
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3f7bdaa
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,12 @@
+vendor
+tools.timestamp
+
+# Idea IDE
+*.iml
+.idea
+
+# Visual Studio Code
+.vscode/*
+
+# trash
+trash.lock
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..bb5d0ea
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,4 @@
+---
+run:
+ concurrency: 6
+ deadline: 5m
diff --git a/.pullapprove.yml b/.pullapprove.yml
new file mode 100644
index 0000000..0da2fcf
--- /dev/null
+++ b/.pullapprove.yml
@@ -0,0 +1,9 @@
+approve_by_comment: true
+approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
+reject_regex: ^Rejected
+reset_on_push: false
+reviewers:
+ teams:
+ - image-maintainers
+ name: default
+ required: 2
diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md
new file mode 100644
index 0000000..88b27b7
--- /dev/null
+++ b/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The image Project Community Code of Conduct
+
+The image project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..1ba458d
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,144 @@
+# Contributing to Containers/Image
+
+We'd love to have you join the community! Below summarizes the processes
+that we follow.
+
+## Topics
+
+* [Reporting Issues](#reporting-issues)
+* [Submitting Pull Requests](#submitting-pull-requests)
+* [Communications](#communications)
+<!--
+* [Becoming a Maintainer](#becoming-a-maintainer)
+-->
+
+## Reporting Issues
+
+Before reporting an issue, check our backlog of
+[open issues](https://github.com/containers/image/issues)
+to see if someone else has already reported it. If so, feel free to add
+your scenario, or additional information, to the discussion. Or simply
+"subscribe" to it to be notified when it is updated.
+
+If you find a new issue with the project we'd love to hear about it! The most
+important aspect of a bug report is that it includes enough information for
+us to reproduce it. So, please include as much detail as possible and try
+to remove the extra stuff that doesn't really relate to the issue itself.
+The easier it is for us to reproduce it, the faster it'll be fixed!
+
+Please don't include any private/sensitive information in your issue!
+
+## Submitting Pull Requests
+
+No Pull Request (PR) is too small! Typos, additional comments in the code,
+new testcases, bug fixes, new features, more documentation, ... it's all
+welcome!
+
+While bug fixes can first be identified via an "issue", that is not required.
+It's ok to just open up a PR with the fix, but make sure you include the same
+information you would have included in an issue - like how to reproduce it.
+
+PRs for new features should include some background on what use cases the
+new code is trying to address. When possible and when it makes sense, try to break-up
+larger PRs into smaller ones - it's easier to review smaller
+code changes. But only if those smaller ones make sense as stand-alone PRs.
+
+Regardless of the type of PR, all PRs should include:
+* well documented code changes
+* additional testcases. Ideally, they should fail w/o your code change applied
+* documentation changes
+
+Squash your commits into logical pieces of work that might want to be reviewed
+separate from the rest of the PRs. Ideally, each commit should implement a single
+idea, and the PR branch should pass the tests at every commit. GitHub makes it easy
+to review the cumulative effect of many commits; so, when in doubt, use smaller commits.
+
+PRs that fix issues should include a reference like `Closes #XXXX` in the
+commit message so that github will automatically close the referenced issue
+when the PR is merged.
+
+<!--
+All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
+-->
+
+### Sign your PRs
+
+The sign-off is a line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are simple: if you can certify
+the below (from [developercertificate.org](https://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
+
+## Communications
+
+For general questions, or discussions, please use the
+IRC group on `irc.freenode.net` called `container-projects`
+that has been setup.
+
+For discussions around issues/bugs and features, you can use the github
+[issues](https://github.com/containers/image/issues)
+and
+[PRs](https://github.com/containers/image/pulls)
+tracking system.
+
+<!--
+## Becoming a Maintainer
+
+To become a maintainer you must first be nominated by an existing maintainer.
+If a majority (>50%) of maintainers agree then the proposal is adopted and
+you will be added to the list.
+
+Removing a maintainer requires at least 75% of the remaining maintainers
+approval, or if the person requests to be removed then it is automatic.
+Normally, a maintainer will only be removed if they are considered to be
+inactive for a long period of time or are viewed as disruptive to the community.
+
+The current list of maintainers can be found in the
+[MAINTAINERS](MAINTAINERS) file.
+-->
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..9535635
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,189 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MAINTAINERS b/MAINTAINERS
new file mode 100644
index 0000000..a5fdce8
--- /dev/null
+++ b/MAINTAINERS
@@ -0,0 +1,6 @@
+Antonio Murdaca <runcom@redhat.com> (@runcom)
+Brandon Philips <brandon.philips@coreos.com> (@philips)
+Miloslav Trmac <mitr@redhat.com> (@mtrmac)
+Dan Walsh <dwalsh@redhat.com> (@dwalsh)
+Nalin Dahyabhai <nalin@redhat.com> (@nalind)
+Valentin Rothberg <rothberg@redhat.com> (@vrothberg)
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..f329ef0
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,92 @@
+.PHONY: all tools test validate lint .gitvalidation fmt
+
+export GOPROXY=https://proxy.golang.org
+
+
+GOBIN := $(shell go env GOBIN)
+ifeq ($(GOBIN),)
+GOBIN := $(shell go env GOPATH)/bin
+endif
+
+# when cross compiling _for_ a Darwin or windows host, then we must use openpgp
+BUILD_TAGS_WINDOWS_CROSS = containers_image_openpgp
+BUILD_TAGS_DARWIN_CROSS = containers_image_openpgp
+
+BUILDTAGS = btrfs_noversion libdm_no_deferred_remove
+BUILDFLAGS := -tags "$(BUILDTAGS)"
+
+PACKAGES := $(shell GO111MODULE=on go list $(BUILDFLAGS) ./...)
+SOURCE_DIRS = $(shell echo $(PACKAGES) | awk 'BEGIN{FS="/"; RS=" "}{print $$4}' | uniq)
+
+PREFIX ?= ${DESTDIR}/usr
+MANINSTALLDIR=${PREFIX}/share/man
+GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
+MANPAGES_MD = $(wildcard docs/*.5.md)
+MANPAGES ?= $(MANPAGES_MD:%.md=%)
+
+# N/B: This value is managed by Renovate, manual changes are
+# possible, as long as they don't disturb the formatting
+# (i.e. DO NOT ADD A 'v' prefix!)
+GOLANGCI_LINT_VERSION := 1.55.2
+
+export PATH := $(PATH):${GOBIN}
+
+all: tools test validate .gitvalidation
+
+build:
+ GO111MODULE="on" go build $(BUILDFLAGS) ./...
+
+$(MANPAGES): %: %.md
+ $(GOMD2MAN) -in $< -out $@
+
+docs: $(MANPAGES)
+
+install-docs: docs
+ install -d -m 755 ${MANINSTALLDIR}/man5
+ install -m 644 docs/*.5 ${MANINSTALLDIR}/man5/
+
+install: install-docs
+
+cross:
+ GOOS=windows $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_WINDOWS_CROSS)"
+ GOOS=darwin $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_DARWIN_CROSS)"
+
+tools: .install.gitvalidation .install.golangci-lint
+
+.install.gitvalidation:
+ if [ ! -x "$(GOBIN)/git-validation" ]; then \
+ GO111MODULE="off" go get $(BUILDFLAGS) github.com/vbatts/git-validation; \
+ fi
+
+.install.golangci-lint:
+ if [ ! -x "$(GOBIN)/golangci-lint" ]; then \
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOBIN) v$(GOLANGCI_LINT_VERSION) ; \
+ fi
+
+clean:
+ rm -rf $(MANPAGES)
+
+test:
+ @GO111MODULE="on" go test $(BUILDFLAGS) -cover ./...
+
+fmt:
+ @gofmt -l -s -w $(SOURCE_DIRS)
+
+validate: lint
+ @BUILDTAGS="$(BUILDTAGS)" hack/validate.sh
+
+lint:
+ $(GOBIN)/golangci-lint run --build-tags "$(BUILDTAGS)"
+
+# When this is running in CI, it will only check the CI commit range
+.gitvalidation:
+ @which $(GOBIN)/git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make clean && make tools'" && false)
+ git fetch -q "https://github.com/containers/image.git" "refs/heads/main"
+ upstream="$$(git rev-parse --verify FETCH_HEAD)" ; \
+ $(GOBIN)/git-validation -q -run DCO,short-subject,dangling-whitespace -range $$upstream..HEAD
+
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang go mod tidy
+
+codespell:
+ codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L keypair,flate,uint,iff,od,ERRO -w
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..034665b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,82 @@
+[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://api.cirrus-ci.com/github/containers/image.svg)](https://cirrus-ci.com/github/containers/image)
+=
+
+`image` is a set of Go libraries aimed at working in various way with
+containers' images and container image registries.
+
+The containers/image library allows application to pull and push images from
+container image registries, like the docker.io and quay.io registries. It also
+implements "simple image signing".
+
+The containers/image library also allows you to inspect a repository on a
+container registry without pulling down the image. This means it fetches the
+repository's manifest and it is able to show you a `docker inspect`-like json
+output about a whole repository or a tag. This library, in contrast to `docker
+inspect`, helps you gather useful information about a repository or a tag
+without requiring you to run `docker pull`.
+
+The containers/image library also allows you to translate from one image format
+to another, for example docker container images to OCI images. It also allows
+you to copy container images between various registries, possibly converting
+them as necessary, and to sign and verify images.
+
+## Command-line usage
+
+The containers/image project is only a library with no user interface;
+you can either incorporate it into your Go programs, or use the `skopeo` tool:
+
+The [skopeo](https://github.com/containers/skopeo) tool uses the
+containers/image library and takes advantage of many of its features,
+e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
+
+## Dependencies
+
+This library ships as a [Go module].
+
+## Building
+
+If you want to see what the library can do, or an example of how it is called,
+consider starting with the [skopeo](https://github.com/containers/skopeo) tool
+instead.
+
+To integrate this library into your project, include it as a [Go module],
+put it into `$GOPATH` or use your preferred vendoring tool to include a copy
+in your project. Ensure that the dependencies documented [in go.mod][go.mod]
+are also available (using those exact versions or different versions of
+your choosing).
+
+This library also depends on some C libraries. Either install them:
+```sh
+Fedora$ dnf install gpgme-devel libassuan-devel # potentially also ostree-devel
+macOS$ brew install gpgme
+```
+or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
+
+[Go module]: https://github.com/golang/go/wiki/Modules
+[go.mod]: https://github.com/containers/image/blob/master/go.mod
+
+### Supported build tags
+
+- `containers_image_docker_daemon_stub`: Don’t import the `docker-daemon:` transport in `github.com/containers/image/transports/alltransports`, to decrease the amount of required dependencies. Use a stub which reports that the transport is not supported instead.
+- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
+the primary downside is that creating new signatures with the Golang-only implementation is not supported.
+- `containers_image_ostree`: Import `ostree:` transport in `github.com/containers/image/transports/alltransports`. This builds the library requiring the `libostree` development libraries. Otherwise a stub which reports that the transport is not supported gets used. The `github.com/containers/image/ostree` package is completely disabled
+and impossible to import when this build tag is not in use.
+- `containers_image_storage_stub`: Don’t import the `containers-storage:` transport in `github.com/containers/image/transports/alltransports`, to decrease the amount of required dependencies. Use a stub which reports that the transport is not supported instead.
+
+## [Contributing](CONTRIBUTING.md)
+
+Information about contributing to this project.
+
+When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
+
+## License
+
+Apache License 2.0
+
+SPDX-License-Identifier: Apache-2.0
+
+## Contact
+
+- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev)
+- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..5a20c0e
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the image Project
+
+The image Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
diff --git a/contrib/cirrus/runner.sh b/contrib/cirrus/runner.sh
new file mode 100755
index 0000000..ca8b067
--- /dev/null
+++ b/contrib/cirrus/runner.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# This script is intended to be executed by automation or humans
+# under a hack/get_ci_vm.sh context. Use under any other circumstances
+# is unlikely to function.
+
+set -e
+
+if [[ -r "/etc/automation_environment" ]]; then
+ source /etc/automation_environment
+ source $AUTOMATION_LIB_PATH/common_lib.sh
+else
+ (
+ echo "WARNING: It does not appear that containers/automation was installed."
+ echo " Functionality of most of ${BASH_SOURCE[0]} will be negatively"
+ echo " impacted."
+ ) > /dev/stderr
+fi
+
+export "PATH=$PATH:$GOPATH/bin"
+
+_run_setup() {
+ req_env_vars SKOPEO_PATH SKOPEO_CI_BRANCH GOSRC BUILDTAGS
+
+ project_module=$(go list .)
+
+ make tools
+
+ rm -rf "${SKOPEO_PATH}"
+ git clone -b ${SKOPEO_CI_BRANCH} \
+ https://github.com/containers/skopeo.git ${SKOPEO_PATH}
+
+ cd "${SKOPEO_PATH}"
+ if [[ -n "$SKOPEO_PR" ]] && [[ $SKOPEO_PR -gt 1000 ]]; then
+ warn "Fetching and checking out code from skopeo pull-request #$SKOPEO_PR"
+ git fetch origin "+refs/pull/$SKOPEO_PR/head"
+ git checkout FETCH_HEAD
+ fi
+
+ msg "Replacing upstream skopeo $SKOPEO_CI_BRANCH branch $project_module module"
+ go mod edit -replace ${project_module}=$GOSRC
+
+ "${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" setup
+}
+
+_run_image_tests() {
+ req_env_vars GOPATH GOSRC
+
+ # Tests in this repo. are intended to run as a regular user.
+ ROOTLESS_USER="testuser$RANDOM"
+ msg "Setting up rootless user '$ROOTLESS_USER'"
+ cd $GOSRC || exit 1
+ # Guarantee independence from specific values
+ rootless_uid=$((RANDOM+1000))
+ rootless_gid=$((RANDOM+1000))
+ msg "Creating $rootless_uid:$rootless_gid $ROOTLESS_USER user"
+ groupadd -g $rootless_gid $ROOTLESS_USER
+ useradd -g $rootless_gid -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
+
+ msg "Setting ownership of $GOPATH and $GOSRC"
+ chown -R $ROOTLESS_USER:$ROOTLESS_USER "$GOPATH" "$GOSRC"
+
+ msg "Creating ssh key pairs"
+ mkdir -p "/root/.ssh" "/home/$ROOTLESS_USER/.ssh"
+ ssh-keygen -t ed25519 -P "" -f "/root/.ssh/id_ed25519"
+
+ msg "Setup authorized_keys"
+ cat /root/.ssh/*.pub >> /home/$ROOTLESS_USER/.ssh/authorized_keys
+
+ msg "Configure ssh file permissions"
+ chmod -R 700 "/root/.ssh"
+ chmod -R 700 "/home/$ROOTLESS_USER/.ssh"
+ chown -R $ROOTLESS_USER:$ROOTLESS_USER "/home/$ROOTLESS_USER/.ssh"
+
+ msg "Ensure the ssh daemon is up and running within 5 minutes"
+ systemctl is-active sshd || \
+ systemctl start sshd
+
+ msg "Setup known_hosts for root"
+ ssh-keyscan localhost > /root/.ssh/known_hosts \
+
+ msg "Executing tests as $ROOTLESS_USER"
+ showrun ssh $ROOTLESS_USER@localhost make -C $GOSRC test "BUILDTAGS='$BUILDTAGS'"
+}
+
+req_env_vars GOSRC
+
+handler="_run_${1}"
+if [ "$(type -t $handler)" != "function" ]; then
+ die "Unknown/Unsupported command-line argument '$1'"
+fi
+
+msg "************************************************************"
+msg "Runner executing $1 on $OS_REL_VER"
+msg "************************************************************"
+
+cd "$GOSRC"
+$handler
diff --git a/copy/blob.go b/copy/blob.go
new file mode 100644
index 0000000..8d5580d
--- /dev/null
+++ b/copy/blob.go
@@ -0,0 +1,187 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps (de/re/)compressing it if canModifyBlob,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
+ isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers;
+ // that pipeline is built by updating stream.
+ // === Input: srcReader
+ stream := sourceStream{
+ reader: srcReader,
+ info: srcInfo,
+ }
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = digestingReader
+
+ // === Update progress bars
+ stream.reader = bar.ProxyReader(stream.reader)
+
+ // === Decrypt the stream, if required.
+ decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
+ originalLayerReader = stream.reader
+ }
+
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
+ // === Deal with layer compression/decompression if necessary
+ compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer compressionStep.close()
+
+ // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
+ if decryptionStep.decrypting && toEncrypt {
+ // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
+ // Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
+ return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
+ }
+ encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // === Report progress using the ic.c.options.Progress channel, if required.
+ if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
+ progressReader := newProgressReader(
+ stream.reader,
+ ic.c.options.Progress,
+ ic.c.options.ProgressInterval,
+ srcInfo,
+ )
+ defer progressReader.reportDone()
+ stream.reader = progressReader
+ }
+
+ // === Finally, send the layer stream to dest.
+ options := private.PutBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ IsConfig: isConfig,
+ EmptyLayer: emptyLayer,
+ }
+ if !isConfig {
+ options.LayerIndex = &layerIndex
+ }
+ destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
+ }
+ uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
+
+ compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
+ decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
+ if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(io.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
+ return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
+ }
+ if digestingReader.validationSucceeded {
+ if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
+ return types.BlobInfo{}, err
+ }
+ }
+
+ return uploadedInfo, nil
+}
+
+// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
+// This allows handles of individual aspects to build the copy pipeline without _too much_
+// specific cooperation by the caller.
+//
+// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
+// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
+type sourceStream struct {
+ reader io.Reader
+ info types.BlobInfo // corresponding to the data available in reader.
+}
+
+// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
+// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
+type errorAnnotationReader struct {
+ reader io.Reader
+}
+
+// Read annotates the error happened during read
+func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
+ n, err = r.reader.Read(b)
+ if err != nil && err != io.EOF {
+ return n, fmt.Errorf("happened during read: %w", err)
+ }
+ return n, err
+}
+
+// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
+func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
+ // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
+ // Handling of compression, encryption, and the related MIME types and the like are all the responsibility
+ // of the generic code in this package.
+ return types.BlobInfo{
+ Digest: uploadedBlob.Digest,
+ Size: uploadedBlob.Size,
+ URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
+ Annotations: inputInfo.Annotations,
+ MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
+ CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
+ CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
+ CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
+ }
+}
diff --git a/copy/blob_test.go b/copy/blob_test.go
new file mode 100644
index 0000000..61f1e36
--- /dev/null
+++ b/copy/blob_test.go
@@ -0,0 +1,76 @@
+package copy
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestUpdatedBlobInfoFromUpload(t *testing.T) {
+ for _, c := range []struct {
+ srcInfo types.BlobInfo
+ uploaded private.UploadedBlob
+ expected types.BlobInfo
+ }{
+ { // A straightforward upload with a known size
+ srcInfo: types.BlobInfo{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ URLs: []string{"https://layer.url"},
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is not set by LayerInfos()
+ },
+ uploaded: private.UploadedBlob{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ },
+ expected: types.BlobInfo{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ URLs: nil,
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is set to the zero value
+ },
+ },
+ { // Upload determining the digest/size
+ srcInfo: types.BlobInfo{
+ Digest: "",
+ Size: -1,
+ URLs: []string{"https://layer.url"},
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is not set by LayerInfos()
+ },
+ uploaded: private.UploadedBlob{
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 513543640,
+ },
+ expected: types.BlobInfo{
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 513543640,
+ URLs: nil,
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is set to the zero value
+ },
+ },
+ } {
+ res := updatedBlobInfoFromUpload(c.srcInfo, c.uploaded)
+ assert.Equal(t, c.expected, res, fmt.Sprintf("%#v", c.uploaded))
+ }
+}
diff --git a/copy/compression.go b/copy/compression.go
new file mode 100644
index 0000000..a42e3b6
--- /dev/null
+++ b/copy/compression.go
@@ -0,0 +1,355 @@
+package copy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+)
+
+var (
+ // defaultCompressionFormat is used if the destination transport requests
+ // compression, and the user does not explicitly instruct us to use an algorithm.
+ defaultCompressionFormat = &compression.Gzip
+
+ // compressionBufferSize is the buffer size used to compress a blob
+ compressionBufferSize = 1048576
+
+ // expectedCompressionFormats is used to check if a blob with a specified media type is compressed
+ // using the algorithm that the media type says it should be compressed with
+ expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
+ imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
+ imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
+ manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
+ }
+)
+
+// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
+type bpDetectCompressionStepData struct {
+ isCompressed bool
+ format compressiontypes.Algorithm // Valid if isCompressed
+ decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
+ srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
+}
+
+// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
+// srcInfo is only used for error messages.
+// Returns data for other steps.
+func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ stream.reader = reader
+
+ res := bpDetectCompressionStepData{
+ isCompressed: decompressor != nil,
+ format: format,
+ decompressor: decompressor,
+ }
+ if res.isCompressed {
+ res.srcCompressorName = format.Name()
+ } else {
+ res.srcCompressorName = internalblobinfocache.Uncompressed
+ }
+
+ if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
+ logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
+ }
+ return res, nil
+}
+
+// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
+type bpCompressionStepData struct {
+ operation types.LayerCompression // Operation to use for updating the blob metadata.
+ uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
+ uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
+ srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
+ uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
+ closers []io.Closer // Objects to close after the upload is done, if any.
+}
+
+// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
+// and must eventually call close.
+func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
+ detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
+ // short-circuit conditions
+ layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
+ if !layerCompressionChangeSupported {
+ logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
+ }
+ if canModifyBlob && layerCompressionChangeSupported {
+ for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
+ ic.bpcPreserveEncrypted,
+ ic.bpcCompressUncompressed,
+ ic.bpcRecompressCompressed,
+ ic.bpcDecompressCompressed,
+ } {
+ res, err := fn(stream, detected)
+ if err != nil {
+ return nil, err
+ }
+ if res != nil {
+ return res, nil
+ }
+ }
+ }
+ return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
+}
+
+// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if isOciEncrypted(stream.info.MediaType) {
+ logrus.Debugf("Using original blob without modification for encrypted blob")
+ // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: nil,
+ srcCompressorName: internalblobinfocache.UnknownCompression,
+ uploadedCompressorName: internalblobinfocache.UnknownCompression,
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
+ logrus.Debugf("Compressing blob on the fly")
+ var uploadedAlgorithm *compressiontypes.Algorithm
+ if ic.compressionFormat != nil {
+ uploadedAlgorithm = ic.compressionFormat
+ } else {
+ uploadedAlgorithm = defaultCompressionFormat
+ }
+
+ reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
+ // Note: reader must be closed on all return paths.
+ stream.reader = reader
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Compress,
+ uploadedAlgorithm: uploadedAlgorithm,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: uploadedAlgorithm.Name(),
+ closers: []io.Closer{reader},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
+ ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
+ // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
+ // re-compressed using the desired format.
+ logrus.Debugf("Blob will be converted")
+
+ decompressed, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ decompressed.Close()
+ }
+ }()
+
+ recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
+ // Note: recompressed must be closed on all return paths.
+ stream.reader = recompressed
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ Digest: "",
+ Size: -1,
+ }
+ succeeded = true
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: ic.compressionFormat,
+ uploadedAnnotations: annotations,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: ic.compressionFormat.Name(),
+ closers: []io.Closer{decompressed, recompressed},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
+func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
+ if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
+ logrus.Debugf("Blob will be decompressed")
+ s, err := detected.decompressor(stream.reader)
+ if err != nil {
+ return nil, err
+ }
+ // Note: s must be closed on all return paths.
+ stream.reader = s
+ stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ Digest: "",
+ Size: -1,
+ }
+ return &bpCompressionStepData{
+ operation: types.Decompress,
+ uploadedAlgorithm: nil,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: internalblobinfocache.Uncompressed,
+ closers: []io.Closer{s},
+ }, nil
+ }
+ return nil, nil
+}
+
+// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
+// This does not change the sourceStream parameter; we include it for symmetry with other
+// pipeline steps.
+func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
+ layerCompressionChangeSupported bool) *bpCompressionStepData {
+ logrus.Debugf("Using original blob without modification")
+ // Remember if the original blob was compressed, and if so how, so that if
+ // LayerInfosForCopy() returned something that differs from what was in the
+ // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
+ // it will be able to correctly derive the MediaType for the copied blob.
+ //
+ // But don’t touch blobs in objects where we can’t change compression,
+ // so that src.UpdatedImage() doesn’t fail; assume that for such blobs
+ // LayerInfosForCopy() should not be making any changes in the first place.
+ var algorithm *compressiontypes.Algorithm
+ if layerCompressionChangeSupported && detected.isCompressed {
+ algorithm = &detected.format
+ } else {
+ algorithm = nil
+ }
+ return &bpCompressionStepData{
+ operation: types.PreserveOriginal,
+ uploadedAlgorithm: algorithm,
+ srcCompressorName: detected.srcCompressorName,
+ uploadedCompressorName: detected.srcCompressorName,
+ }
+}
+
+// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
+func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
+ *operation = d.operation
+ // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
+ *algorithm = d.uploadedAlgorithm
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ maps.Copy(*annotations, d.uploadedAnnotations)
+}
+
+// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
+// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
+func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
+ encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptionStep.encrypting && !decryptionStep.decrypting {
+ // If d.operation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
+ // (because stream.info.Digest == "", this must have been computed afresh).
+ switch d.operation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
+ }
+ }
+ if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
+ if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
+ // HACK: Don’t record zstd:chunked algorithms.
+ // There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
+ // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
+ //
+ // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
+ // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
+ // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
+ // inconsistent data to be logged.
+ c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
+ }
+ }
+ if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
+ d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
+ if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
+ // HACK: Don’t record zstd:chunked algorithms, see above.
+ c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
+ }
+ }
+ return nil
+}
+
+// close closes objects that carry state throughout the compression/decompression operation.
+func (d *bpCompressionStepData) close() {
+ for _, c := range d.closers {
+ c.Close()
+ }
+}
+
+// doCompression reads all input from src and writes its compressed equivalent to dest.
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
+ compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, compressionBufferSize)
+
+ _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
+ if err != nil {
+ compressor.Close()
+ return err
+ }
+
+ return compressor.Close()
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
+}
+
+// compressedStream returns a stream the input reader compressed using format, and a metadata map.
+// The caller must close the returned reader.
+// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
+func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
+ pipeReader, pipeWriter := io.Pipe()
+ annotations := map[string]string{}
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
+ return pipeReader, annotations
+}
diff --git a/copy/copy.go b/copy/copy.go
new file mode 100644
index 0000000..ad1453f
--- /dev/null
+++ b/copy/copy.go
@@ -0,0 +1,394 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagesource"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/signer"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+ "golang.org/x/sync/semaphore"
+ "golang.org/x/term"
+)
+
+var (
+ // ErrDecryptParamsMissing is returned if there is missing decryption parameters
+ ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
+
+ // maxParallelDownloads is used to limit the maximum number of parallel
+ // downloads. Let's follow Firefox by limiting it to 6.
+ maxParallelDownloads = uint(6)
+)
+
+const (
+ // CopySystemImage is the default value which, when set in
+ // Options.ImageListSelection, indicates that the caller expects only one
+ // image to be copied, so if the source reference refers to a list of
+ // images, one that matches the current system will be selected.
+ CopySystemImage ImageListSelection = iota
+ // CopyAllImages is a value which, when set in Options.ImageListSelection,
+ // indicates that the caller expects to copy multiple images, and if
+ // the source reference refers to a list, that the list and every image
+ // to which it refers will be copied. If the source reference refers
+ // to a list, the target reference can not accept lists, an error
+ // should be returned.
+ CopyAllImages
+ // CopySpecificImages is a value which, when set in
+ // Options.ImageListSelection, indicates that the caller expects the
+ // source reference to be either a single image or a list of images,
+ // and if the source reference is a list, wants only specific instances
+ // from it copied (or none of them, if the list of instances to copy is
+ // empty), along with the list itself. If the target reference can
+ // only accept one image (i.e., it cannot accept lists), an error
+ // should be returned.
+ CopySpecificImages
+)
+
+// ImageListSelection is one of CopySystemImage, CopyAllImages, or
+// CopySpecificImages, to control whether, when the source reference is a list,
+// copy.Image() copies only an image which matches the current runtime
+// environment, or all images which match the supplied reference, or only
+// specific images from the source reference.
+type ImageListSelection int
+
+// Options allows supplying non-default configuration modifying the behavior of CopyImage.
+type Options struct {
+ RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature.
+ // Signers to use to add signatures during the copy.
+ // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row.
+ Signers []*signer.Signer
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`.
+ SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path.
+ SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`.
+ SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
+
+ ReportWriter io.Writer
+ SourceCtx *types.SystemContext
+ DestinationCtx *types.SystemContext
+ ProgressInterval time.Duration // time to wait between reports to signal the progress channel
+ Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+
+ // Preserve digests, and fail if we cannot.
+ PreserveDigests bool
+ // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type
+ ForceManifestMIMEType string
+ ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
+ Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
+ // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue,
+ // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time)
+ // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers).
+ // This only affects CopySystemImage.
+ PreferGzipInstances types.OptionalBool
+
+ // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
+
+ // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored.
+ ConcurrentBlobCopiesSemaphore *semaphore.Weighted
+
+ // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set.
+ MaxParallelDownloads uint
+
+ // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already
+ // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option
+ // is slightly pessimistic if the destination image doesn't exist, or is not equivalent.
+ OptimizeDestinationImageAlreadyExists bool
+
+ // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
+ // to not indicate "nondistributable".
+ DownloadForeignLayers bool
+
+ // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform
+ // in the manifest list, a variant with the requested compression will exist.
+ // Invalid when copying a non-multi-architecture image. That will probably
+ // change in the future.
+ EnsureCompressionVariantsExist []OptionCompressionVariant
+ // ForceCompressionFormat ensures that the compression algorithm set in
+ // DestinationCtx.CompressionFormat is used exclusively, and blobs of other
+ // compression algorithms are not reused.
+ ForceCompressionFormat bool
+}
+
+// OptionCompressionVariant allows to supply information about
+// selected compression algorithm and compression level by the
+// end-user. Refer to EnsureCompressionVariantsExist to know
+// more about its usage.
+type OptionCompressionVariant struct {
+ Algorithm compression.Algorithm
+ Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance
+}
+
+// copier allows us to keep track of diffID values for blobs, and other
+// data shared across one or more images in a possible manifest list.
+// The owner must call close() when done.
+type copier struct {
+ policyContext *signature.PolicyContext
+ dest private.ImageDestination
+ rawSource private.ImageSource
+ options *Options // never nil
+
+ reportWriter io.Writer
+ progressOutput io.Writer
+
+ unparsedToplevel *image.UnparsedImage // for rawSource
+ blobInfoCache internalblobinfocache.BlobInfoCache2
+ concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
+ signers []*signer.Signer // Signers to use to create new signatures for the image
+ signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
+}
+
+// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
+func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
+ if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
+ return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
+ }
+ return options.ForceCompressionFormat, nil
+}
+
+// Image copies image from srcRef to destRef, using policyContext to validate
+// source image admissibility. It returns the manifest which was written to
+// the new copy of the image.
+func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
+ if options == nil {
+ options = &Options{}
+ }
+
+ if err := validateImageListSelection(options.ImageListSelection); err != nil {
+ return nil, err
+ }
+
+ reportWriter := io.Discard
+
+ if options.ReportWriter != nil {
+ reportWriter = options.ReportWriter
+ }
+
+ publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
+ if err != nil {
+ return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
+ }
+ dest := imagedestination.FromPublic(publicDest)
+ defer func() {
+ if err := dest.Close(); err != nil {
+ if retErr != nil {
+ retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (dest: %v)", err)
+ }
+ }
+ }()
+
+ publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
+ if err != nil {
+ return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
+ }
+ rawSource := imagesource.FromPublic(publicRawSource)
+ defer func() {
+ if err := rawSource.Close(); err != nil {
+ if retErr != nil {
+ retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
+ } else {
+ retErr = fmt.Errorf(" (src: %v)", err)
+ }
+ }
+ }()
+
+ // If reportWriter is not a TTY (e.g., when piping to a file), do not
+ // print the progress bars to avoid long and hard to parse output.
+ // Instead use printCopyInfo() to print single line "Copying ..." messages.
+ progressOutput := reportWriter
+ if !isTTY(reportWriter) {
+ progressOutput = io.Discard
+ }
+
+ c := &copier{
+ policyContext: policyContext,
+ dest: dest,
+ rawSource: rawSource,
+ options: options,
+
+ reportWriter: reportWriter,
+ progressOutput: progressOutput,
+
+ unparsedToplevel: image.UnparsedInstance(rawSource, nil),
+ // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more).
+ // Conceptually the cache settings should be in copy.Options instead.
+ blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
+ }
+ defer c.close()
+ c.blobInfoCache.Open()
+ defer c.blobInfoCache.Close()
+
+ // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
+ if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
+ c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore
+ if c.concurrentBlobCopiesSemaphore == nil {
+ max := c.options.MaxParallelDownloads
+ if max == 0 {
+ max = maxParallelDownloads
+ }
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max))
+ }
+ } else {
+ c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
+ if c.options.ConcurrentBlobCopiesSemaphore != nil {
+ if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
+ }
+ defer c.options.ConcurrentBlobCopiesSemaphore.Release(1)
+ }
+ }
+
+ if err := c.setupSigners(); err != nil {
+ return nil, err
+ }
+
+ multiImage, err := isMultiImage(ctx, c.unparsedToplevel)
+ if err != nil {
+ return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
+ }
+
+ if !multiImage {
+ if len(options.EnsureCompressionVariantsExist) > 0 {
+ return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
+ }
+ requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
+ return nil, err
+ }
+ // The simple case: just copy a single image.
+ single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
+ if err != nil {
+ return nil, err
+ }
+ copiedManifest = single.manifest
+ } else if c.options.ImageListSelection == CopySystemImage {
+ if len(options.EnsureCompressionVariantsExist) > 0 {
+ return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
+ }
+ requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
+ return nil, err
+ }
+ // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
+ // matches the current system to copy, and copy it.
+ mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
+ }
+ manifestList, err := internalManifest.ListFromBlob(mfest, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
+ }
+ instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx
+ if err != nil {
+ return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
+ }
+ logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
+ unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
+ single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
+ if err != nil {
+ return nil, fmt.Errorf("copying system image from manifest list: %w", err)
+ }
+ copiedManifest = single.manifest
+ } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */
+ // If we were asked to copy multiple images and can't, that's an error.
+ if !supportsMultipleImages(c.dest) {
+ return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
+ }
+ // Copy some or all of the images.
+ switch c.options.ImageListSelection {
+ case CopyAllImages:
+ logrus.Debugf("Source is a manifest list; copying all instances")
+ case CopySpecificImages:
+ logrus.Debugf("Source is a manifest list; copying some instances")
+ }
+ if copiedManifest, err = c.copyMultipleImages(ctx); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil {
+ return nil, fmt.Errorf("committing the finished image: %w", err)
+ }
+
+ return copiedManifest, nil
+}
+
+// Printf writes a formatted string to c.reportWriter.
+// Note that the method name Printf is not entirely arbitrary: (go tool vet)
+// has a built-in list of functions/methods (whatever object they are for)
+// which have their format strings checked; for other names we would have
+// to pass a parameter to every (go tool vet) invocation.
+func (c *copier) Printf(format string, a ...any) {
+ fmt.Fprintf(c.reportWriter, format, a...)
+}
+
+// close tears down state owned by copier.
+func (c *copier) close() {
+ for i, s := range c.signersToClose {
+ if err := s.Close(); err != nil {
+ logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err)
+ }
+ }
+}
+
+// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
+func validateImageListSelection(selection ImageListSelection) error {
+ switch selection {
+ case CopySystemImage, CopyAllImages, CopySpecificImages:
+ return nil
+ default:
+ return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection)
+ }
+}
+
+// Checks if the destination supports accepting multiple images by checking if it can support
+// manifest types that are lists of other manifests.
+func supportsMultipleImages(dest types.ImageDestination) bool {
+ mtypes := dest.SupportedManifestMIMETypes()
+ if len(mtypes) == 0 {
+ // Anything goes!
+ return true
+ }
+ return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage)
+}
+
+// isTTY returns true if the io.Writer is a file and a tty.
+func isTTY(w io.Writer) bool {
+ if f, ok := w.(*os.File); ok {
+ return term.IsTerminal(int(f.Fd()))
+ }
+ return false
+}
diff --git a/copy/digesting_reader.go b/copy/digesting_reader.go
new file mode 100644
index 0000000..901d108
--- /dev/null
+++ b/copy/digesting_reader.go
@@ -0,0 +1,62 @@
+package copy
+
+import (
+ "fmt"
+ "hash"
+ "io"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+type digestingReader struct {
+ source io.Reader
+ digester digest.Digester
+ hash hash.Hash
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
+}
+
+// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
+func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ var digester digest.Digester
+ if err := expectedDigest.Validate(); err != nil {
+ return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
+ }
+ digestAlgorithm := expectedDigest.Algorithm()
+ if !digestAlgorithm.Available() {
+ return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ }
+ digester = digestAlgorithm.Digester()
+
+ return &digestingReader{
+ source: source,
+ digester: digester,
+ hash: digester.Hash(),
+ expectedDigest: expectedDigest,
+ validationFailed: false,
+ }, nil
+}
+
+func (d *digestingReader) Read(p []byte) (int, error) {
+ n, err := d.source.Read(p)
+ if n > 0 {
+ if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil {
+ // Coverage: This should not happen, the hash.Hash interface requires
+ // d.digest.Write to never return an error, and the io.Writer interface
+ // requires n2 == len(input) if no error is returned.
+ return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err)
+ }
+ }
+ if err == io.EOF {
+ actualDigest := d.digester.Digest()
+ if actualDigest != d.expectedDigest {
+ d.validationFailed = true
+ return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ }
+ d.validationSucceeded = true
+ }
+ return n, err
+}
diff --git a/copy/digesting_reader_test.go b/copy/digesting_reader_test.go
new file mode 100644
index 0000000..2e17437
--- /dev/null
+++ b/copy/digesting_reader_test.go
@@ -0,0 +1,77 @@
+package copy
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewDigestingReader(t *testing.T) {
+ // Only the failure cases, success is tested in TestDigestingReaderRead below.
+ source := bytes.NewReader([]byte("abc"))
+ for _, input := range []digest.Digest{
+ "abc", // Not algo:hexvalue
+ "crc32:", // Unknown algorithm, empty value
+ "crc32:012345678", // Unknown algorithm
+ "sha256:", // Empty value
+ "sha256:0", // Invalid hex value
+ "sha256:01", // Invalid length of hex value
+ } {
+ _, err := newDigestingReader(source, input)
+ assert.Error(t, err, input.String())
+ }
+}
+
+func TestDigestingReaderRead(t *testing.T) {
+ cases := []struct {
+ input []byte
+ digest digest.Digest
+ }{
+ {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
+ {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
+ {make([]byte, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
+ }
+ // Valid input
+ for _, c := range cases {
+ source := bytes.NewReader(c.input)
+ reader, err := newDigestingReader(source, c.digest)
+ require.NoError(t, err, c.digest.String())
+ dest := bytes.Buffer{}
+ n, err := io.Copy(&dest, reader)
+ assert.NoError(t, err, c.digest.String())
+ assert.Equal(t, int64(len(c.input)), n, c.digest.String())
+ assert.Equal(t, c.input, dest.Bytes(), c.digest.String())
+ assert.False(t, reader.validationFailed, c.digest.String())
+ assert.True(t, reader.validationSucceeded, c.digest.String())
+ }
+ // Modified input
+ for _, c := range cases {
+ source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
+ reader, err := newDigestingReader(source, c.digest)
+ require.NoError(t, err, c.digest.String())
+ dest := bytes.Buffer{}
+ _, err = io.Copy(&dest, reader)
+ assert.Error(t, err, c.digest.String())
+ assert.True(t, reader.validationFailed, c.digest.String())
+ assert.False(t, reader.validationSucceeded, c.digest.String())
+ }
+ // Truncated input
+ for _, c := range cases {
+ source := bytes.NewReader(c.input)
+ reader, err := newDigestingReader(source, c.digest)
+ require.NoError(t, err, c.digest.String())
+ if len(c.input) != 0 {
+ dest := bytes.Buffer{}
+ truncatedLen := int64(len(c.input) - 1)
+ n, err := io.CopyN(&dest, reader, truncatedLen)
+ assert.NoError(t, err, c.digest.String())
+ assert.Equal(t, truncatedLen, n, c.digest.String())
+ }
+ assert.False(t, reader.validationFailed, c.digest.String())
+ assert.False(t, reader.validationSucceeded, c.digest.String())
+ }
+}
diff --git a/copy/encryption.go b/copy/encryption.go
new file mode 100644
index 0000000..1305676
--- /dev/null
+++ b/copy/encryption.go
@@ -0,0 +1,134 @@
+package copy
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ "github.com/containers/ocicrypt"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+// isOciEncrypted returns a bool indicating if a mediatype is encrypted
+// This function will be moved to be part of OCI spec when adopted.
+func isOciEncrypted(mediatype string) bool {
+ return strings.HasSuffix(mediatype, "+encrypted")
+}
+
+// isEncrypted checks if an image is encrypted
+func isEncrypted(i types.Image) bool {
+ layers := i.LayerInfos()
+ return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
+ return isOciEncrypted(l.MediaType)
+ })
+}
+
+// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
+type bpDecryptionStepData struct {
+ decrypting bool // We are actually decrypting the stream
+}
+
+// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
+// srcInfo is only used for error messages.
+// Returns data for other steps; the caller should eventually use updateCryptoOperation.
+func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
+ if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil {
+ return &bpDecryptionStepData{
+ decrypting: false,
+ }, nil
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ desc := imgspecv1.Descriptor{
+ Annotations: stream.info.Annotations,
+ }
+ reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
+ if err != nil {
+ return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = decryptedDigest
+ stream.info.Size = -1
+ maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
+ return strings.HasPrefix(k, "org.opencontainers.image.enc")
+ })
+ return &bpDecryptionStepData{
+ decrypting: true,
+ }, nil
+}
+
+// updateCryptoOperation sets *operation, if necessary.
+func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
+ if d.decrypting {
+ *operation = types.Decrypt
+ }
+}
+
+// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
+type bpEncryptionStepData struct {
+ encrypting bool // We are actually encrypting the stream
+ finalizer ocicrypt.EncryptLayerFinalizer
+}
+
+// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
+// srcInfo is primarily used for error messages.
+// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
+func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
+ decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
+ if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil {
+ return &bpEncryptionStepData{
+ encrypting: false,
+ }, nil
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ var annotations map[string]string
+ if !decryptionStep.decrypting {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+ reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc)
+ if err != nil {
+ return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
+ return &bpEncryptionStepData{
+ encrypting: true,
+ finalizer: finalizer,
+ }, nil
+}
+
+// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
+func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
+ if !d.encrypting {
+ return nil
+ }
+
+ encryptAnnotations, err := d.finalizer()
+ if err != nil {
+ return fmt.Errorf("Unable to finalize encryption: %w", err)
+ }
+ *operation = types.Encrypt
+ if *annotations == nil {
+ *annotations = map[string]string{}
+ }
+ maps.Copy(*annotations, encryptAnnotations)
+ return nil
+}
diff --git a/copy/fixtures/Hello.bz2 b/copy/fixtures/Hello.bz2
new file mode 120000
index 0000000..fc28d6c
--- /dev/null
+++ b/copy/fixtures/Hello.bz2
@@ -0,0 +1 @@
+../../pkg/compression/fixtures/Hello.bz2 \ No newline at end of file
diff --git a/copy/fixtures/Hello.gz b/copy/fixtures/Hello.gz
new file mode 120000
index 0000000..08aa805
--- /dev/null
+++ b/copy/fixtures/Hello.gz
@@ -0,0 +1 @@
+../../pkg/compression/fixtures/Hello.gz \ No newline at end of file
diff --git a/copy/fixtures/Hello.std b/copy/fixtures/Hello.std
new file mode 100644
index 0000000..02770a6
--- /dev/null
+++ b/copy/fixtures/Hello.std
Binary files differ
diff --git a/copy/fixtures/Hello.uncompressed b/copy/fixtures/Hello.uncompressed
new file mode 120000
index 0000000..49b4662
--- /dev/null
+++ b/copy/fixtures/Hello.uncompressed
@@ -0,0 +1 @@
+../../pkg/compression/fixtures/Hello.uncompressed \ No newline at end of file
diff --git a/copy/fixtures/Hello.xz b/copy/fixtures/Hello.xz
new file mode 120000
index 0000000..77bcd85
--- /dev/null
+++ b/copy/fixtures/Hello.xz
@@ -0,0 +1 @@
+../../pkg/compression/fixtures/Hello.xz \ No newline at end of file
diff --git a/copy/fixtures/Hello.zst b/copy/fixtures/Hello.zst
new file mode 100644
index 0000000..02770a6
--- /dev/null
+++ b/copy/fixtures/Hello.zst
Binary files differ
diff --git a/copy/manifest.go b/copy/manifest.go
new file mode 100644
index 0000000..6f01cf5
--- /dev/null
+++ b/copy/manifest.go
@@ -0,0 +1,224 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
+// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location.
+// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
+var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
+
+// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
+var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest}
+
+// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
+type orderedSet struct {
+ list []string
+ included *set.Set[string]
+}
+
+// newOrderedSet creates a correctly initialized orderedSet.
+// [Sometimes it would be really nice if Golang had constructors…]
+func newOrderedSet() *orderedSet {
+ return &orderedSet{
+ list: []string{},
+ included: set.New[string](),
+ }
+}
+
+// append adds s to the end of os, only if it is not included already.
+func (os *orderedSet) append(s string) {
+ if !os.included.Contains(s) {
+ os.list = append(os.list, s)
+ os.included.Add(s)
+ }
+}
+
+// determineManifestConversionInputs contains the inputs for determineManifestConversion.
+type determineManifestConversionInputs struct {
+ srcMIMEType string // MIME type of the input manifest
+
+ destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
+
+ forceManifestMIMEType string // User’s choice of forced manifest MIME type
+ requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+}
+
+// manifestConversionPlan contains the decisions made by determineManifestConversion.
+type manifestConversionPlan struct {
+ // The preferred manifest MIME type (whether we are converting to it or using it unmodified).
+ // We compute this only to show it in error messages; without having to add this context
+ // in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredMIMEType string
+ preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
+ otherMIMETypeCandidates []string // Other possible alternatives, in order
+}
+
+// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
+func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
+ srcType := in.srcMIMEType
+ normalizedSrcType := manifest.NormalizedMIMEType(srcType)
+ if srcType != normalizedSrcType {
+ logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
+ srcType = normalizedSrcType
+ }
+
+ destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
+ if in.forceManifestMIMEType != "" {
+ destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
+ }
+
+ if len(destSupportedManifestMIMETypes) == 0 {
+ if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) {
+ return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
+ }
+ destSupportedManifestMIMETypes = ociEncryptionMIMETypes
+ }
+ supportedByDest := set.New[string]()
+ for _, t := range destSupportedManifestMIMETypes {
+ if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
+ supportedByDest.Add(t)
+ }
+ }
+ if supportedByDest.Empty() {
+ if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
+ return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
+ }
+ // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
+ if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
+ return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting")
+ }
+ // destSupportedManifestMIMETypes has three possible origins:
+ if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
+ return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
+ in.forceManifestMIMEType)
+ }
+ if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
+ // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
+ return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
+ }
+ // 3. destination does not support encryption.
+ return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
+ strings.Join(destSupportedManifestMIMETypes, ", "))
+ }
+
+ // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
+ // So, build a list of types to try in order of decreasing preference.
+ // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct,
+ // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other.
+ // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types
+ // and never attempt the other one.
+ prioritizedTypes := newOrderedSet()
+
+ // First of all, prefer to keep the original manifest unmodified.
+ if supportedByDest.Contains(srcType) {
+ prioritizedTypes.append(srcType)
+ }
+ if in.cannotModifyManifestReason != "" {
+ // We could also drop this check and have the caller
+ // make the choice; it is already doing that to an extent, to improve error
+ // messages. But it is nice to hide the “if we can't modify, do no conversion”
+ // special case in here; the caller can then worry (or not) only about a good UI.
+ logrus.Debugf("We can't modify the manifest, hoping for the best...")
+ return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
+ }
+
+ // Then use our list of preferred types.
+ for _, t := range preferredManifestMIMETypes {
+ if supportedByDest.Contains(t) {
+ prioritizedTypes.append(t)
+ }
+ }
+
+ // Finally, try anything else the destination supports.
+ for _, t := range destSupportedManifestMIMETypes {
+ if supportedByDest.Contains(t) {
+ prioritizedTypes.append(t)
+ }
+ }
+
+ logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
+ if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
+ return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
+ }
+ res := manifestConversionPlan{
+ preferredMIMEType: prioritizedTypes.list[0],
+ otherMIMETypeCandidates: prioritizedTypes.list[1:],
+ }
+ res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
+ if !res.preferredMIMETypeNeedsConversion {
+ logrus.Debugf("... will first try using the original manifest unmodified")
+ }
+ return res, nil
+}
+
+// isMultiImage returns true if img is a list of images
+func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) {
+ _, mt, err := img.Manifest(ctx)
+ if err != nil {
+ return false, err
+ }
+ return manifest.MIMETypeIsMultiImage(mt), nil
+}
+
+// determineListConversion takes the current MIME type of a list of manifests,
+// the list of MIME types supported for a given destination, and a possible
+// forced value, and returns the MIME type to which we should convert the list
+// of manifests (regardless of whether we are converting to it or using it
+// unmodified) and a slice of other list types which might be supported by the
+// destination.
+func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) {
+ // If there's no list of supported types, then anything we support is expected to be supported.
+ if len(destSupportedMIMETypes) == 0 {
+ destSupportedMIMETypes = manifest.SupportedListMIMETypes
+ }
+ // If we're forcing it, replace the list of supported types with the forced value.
+ if forcedListMIMEType != "" {
+ destSupportedMIMETypes = []string{forcedListMIMEType}
+ }
+
+ prioritizedTypes := newOrderedSet()
+ // The first priority is the current type, if it's in the list, since that lets us avoid a
+ // conversion that isn't strictly necessary.
+ if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
+ prioritizedTypes.append(currentListMIMEType)
+ }
+ // Pick out the other list types that we support.
+ for _, t := range destSupportedMIMETypes {
+ if manifest.MIMETypeIsMultiImage(t) {
+ prioritizedTypes.append(t)
+ }
+ }
+
+ logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
+ if len(prioritizedTypes.list) == 0 {
+ return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
+ }
+ selectedType := prioritizedTypes.list[0]
+ otherSupportedTypes := prioritizedTypes.list[1:]
+ if selectedType != currentListMIMEType {
+ logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
+ } else {
+ logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes)
+ }
+ // Done.
+ return selectedType, otherSupportedTypes, nil
+}
diff --git a/copy/manifest_test.go b/copy/manifest_test.go
new file mode 100644
index 0000000..0a7d654
--- /dev/null
+++ b/copy/manifest_test.go
@@ -0,0 +1,484 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/manifest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOrderedSet(t *testing.T) {
+ for _, c := range []struct{ input, expected []string }{
+ {[]string{}, []string{}},
+ {[]string{"a", "b", "c"}, []string{"a", "b", "c"}},
+ {[]string{"a", "b", "a", "c"}, []string{"a", "b", "c"}},
+ } {
+ os := newOrderedSet()
+ for _, s := range c.input {
+ os.append(s)
+ }
+ assert.Equal(t, c.expected, os.list, fmt.Sprintf("%#v", c.input))
+ }
+}
+
+func TestDetermineManifestConversion(t *testing.T) {
+ supportS1S2OCI := []string{
+ v1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+ supportS1OCI := []string{
+ v1.MediaTypeImageManifest,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+ supportS1S2 := []string{
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+ supportOnlyS1 := []string{
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+
+ cases := []struct {
+ description string
+ sourceType string
+ destTypes []string
+ expected manifestConversionPlan
+ }{
+ // Destination accepts anything — no conversion necessary
+ {
+ "s1→anything", manifest.DockerV2Schema1SignedMediaType, nil,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ {
+ "s2→anything", manifest.DockerV2Schema2MediaType, nil,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema2MediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // Destination accepts the unmodified original
+ {
+ "s1→s1s2", manifest.DockerV2Schema1SignedMediaType, supportS1S2,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType},
+ },
+ },
+ {
+ "s2→s1s2", manifest.DockerV2Schema2MediaType, supportS1S2,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema2MediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: supportOnlyS1,
+ },
+ },
+ {
+ "s1→s1", manifest.DockerV2Schema1SignedMediaType, supportOnlyS1,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema1MediaType},
+ },
+ },
+ // text/plain is normalized to s1, and if the destination accepts s1, no conversion happens.
+ {
+ "text→s1s2", "text/plain", supportS1S2,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType},
+ },
+ },
+ {
+ "text→s1", "text/plain", supportOnlyS1,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema1MediaType},
+ },
+ },
+ // Conversion necessary, a preferred format is acceptable
+ {
+ "s2→s1", manifest.DockerV2Schema2MediaType, supportOnlyS1,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema1MediaType},
+ },
+ },
+ // Conversion necessary, a preferred format is not acceptable
+ {
+ "s2→OCI", manifest.DockerV2Schema2MediaType, []string{v1.MediaTypeImageManifest},
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // text/plain is converted if the destination does not accept s1
+ {
+ "text→s2", "text/plain", []string{manifest.DockerV2Schema2MediaType},
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema2MediaType,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // Conversion necessary, try the preferred formats in order.
+ // We abuse manifest.DockerV2ListMediaType here as a MIME type which is not in supportS1S2OCI,
+ // but is still recognized by manifest.NormalizedMIMEType and not normalized to s1
+ {
+ "special→s2", manifest.DockerV2ListMediaType, supportS1S2OCI,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema2MediaType,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{manifest.DockerV2Schema1SignedMediaType, v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType},
+ },
+ },
+ {
+ "special→s1", manifest.DockerV2ListMediaType, supportS1OCI,
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType},
+ },
+ },
+ {
+ "special→OCI", manifest.DockerV2ListMediaType, []string{v1.MediaTypeImageManifest, "other options", "with lower priority"},
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{"other options", "with lower priority"},
+ },
+ },
+ }
+
+ for _, c := range cases {
+ res, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: c.sourceType,
+ destSupportedManifestMIMETypes: c.destTypes,
+ forceManifestMIMEType: "",
+ requiresOCIEncryption: false,
+ cannotModifyManifestReason: "",
+ })
+ require.NoError(t, err, c.description)
+ assert.Equal(t, c.expected, res, c.description)
+ }
+
+ // Whatever the input is, with cannotModifyManifestReason we return "keep the original as is"
+ for _, c := range cases {
+ res, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: c.sourceType,
+ destSupportedManifestMIMETypes: c.destTypes,
+ forceManifestMIMEType: "",
+ requiresOCIEncryption: false,
+ cannotModifyManifestReason: "Preserving digests",
+ })
+ require.NoError(t, err, c.description)
+ assert.Equal(t, manifestConversionPlan{
+ preferredMIMEType: manifest.NormalizedMIMEType(c.sourceType),
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ }, res, c.description)
+ }
+
+ // With forceManifestMIMEType, the output is always the forced manifest type (in this case oci manifest)
+ for _, c := range cases {
+ res, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: c.sourceType,
+ destSupportedManifestMIMETypes: c.destTypes,
+ forceManifestMIMEType: v1.MediaTypeImageManifest,
+ requiresOCIEncryption: false,
+ cannotModifyManifestReason: "",
+ })
+ require.NoError(t, err, c.description)
+ assert.Equal(t, manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ }, res, c.description)
+ }
+
+ // When encryption is required:
+ for _, c := range []struct {
+ description string
+ in determineManifestConversionInputs // with requiresOCIEncryption implied
+ expected manifestConversionPlan // Or {} to expect a failure
+ }{
+ { // Destination accepts anything - no conversion necessary
+ "OCI→anything",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: nil,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ { // Destination accepts anything - need to convert for encryption
+ "s2→anything",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: nil,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // Destination accepts an encrypted format
+ {
+ "OCI→OCI",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ {
+ "s2→OCI",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // Destination does not accept an encrypted format
+ {
+ "OCI→s2",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: supportS1S2,
+ },
+ manifestConversionPlan{},
+ },
+ {
+ "s2→s2",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: supportS1S2,
+ },
+ manifestConversionPlan{},
+ },
+ // Whatever the input is, with cannotModifyManifestReason we return "keep the original as is".
+ // Still, encryption is necessarily going to fail…
+ {
+ "OCI→OCI cannotModifyManifestReason",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ cannotModifyManifestReason: "Preserving digests",
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ {
+ "s2→OCI cannotModifyManifestReason",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ cannotModifyManifestReason: "Preserving digests",
+ },
+ manifestConversionPlan{
+ preferredMIMEType: manifest.DockerV2Schema2MediaType,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // forceManifestMIMEType to a type that supports encryption
+ {
+ "OCI→OCI forced",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ forceManifestMIMEType: v1.MediaTypeImageManifest,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: false,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ {
+ "s2→OCI forced",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ forceManifestMIMEType: v1.MediaTypeImageManifest,
+ },
+ manifestConversionPlan{
+ preferredMIMEType: v1.MediaTypeImageManifest,
+ preferredMIMETypeNeedsConversion: true,
+ otherMIMETypeCandidates: []string{},
+ },
+ },
+ // forceManifestMIMEType to a type that does not support encryption
+ {
+ "OCI→s2 forced",
+ determineManifestConversionInputs{
+ srcMIMEType: v1.MediaTypeImageManifest,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ forceManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ },
+ manifestConversionPlan{},
+ },
+ {
+ "s2→s2 forced",
+ determineManifestConversionInputs{
+ srcMIMEType: manifest.DockerV2Schema2MediaType,
+ destSupportedManifestMIMETypes: supportS1S2OCI,
+ forceManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ },
+ manifestConversionPlan{},
+ },
+ } {
+ in := c.in
+ in.requiresOCIEncryption = true
+ res, err := determineManifestConversion(in)
+ if c.expected.preferredMIMEType != "" {
+ require.NoError(t, err, c.description)
+ assert.Equal(t, c.expected, res, c.description)
+ } else {
+ assert.Error(t, err, c.description)
+ }
+ }
+}
+
+// fakeUnparsedImage is an implementation of types.UnparsedImage which only returns itself as a MIME type in Manifest,
+// except that "" means “reading the manifest should fail”
+type fakeUnparsedImage struct {
+ mocks.ForbiddenUnparsedImage
+ mt string
+}
+
+func (f fakeUnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if f.mt == "" {
+ return nil, "", errors.New("Manifest() directed to fail")
+ }
+ return nil, f.mt, nil
+}
+
+func TestIsMultiImage(t *testing.T) {
+ // MIME type is available; more or less a smoke test, other cases are handled in manifest.MIMETypeIsMultiImage
+ for _, c := range []struct {
+ mt string
+ expected bool
+ }{
+ {manifest.DockerV2ListMediaType, true},
+ {manifest.DockerV2Schema2MediaType, false},
+ {v1.MediaTypeImageManifest, false},
+ {v1.MediaTypeImageIndex, true},
+ } {
+ src := fakeUnparsedImage{mocks.ForbiddenUnparsedImage{}, c.mt}
+ res, err := isMultiImage(context.Background(), src)
+ require.NoError(t, err)
+ assert.Equal(t, c.expected, res, c.mt)
+ }
+
+ // Error getting manifest MIME type
+ src := fakeUnparsedImage{mocks.ForbiddenUnparsedImage{}, ""}
+ _, err := isMultiImage(context.Background(), src)
+ assert.Error(t, err)
+}
+
+func TestDetermineManifestListConversion(t *testing.T) {
+ supportS1S2OCI := []string{
+ v1.MediaTypeImageIndex,
+ v1.MediaTypeImageManifest,
+ manifest.DockerV2ListMediaType,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+ supportS1S2 := []string{
+ manifest.DockerV2ListMediaType,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+ supportOnlyOCI := []string{
+ v1.MediaTypeImageIndex,
+ v1.MediaTypeImageManifest,
+ }
+ supportOnlyS1 := []string{
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ }
+
+ cases := []struct {
+ description string
+ sourceType string
+ destTypes []string
+ expectedUpdate string
+ expectedOtherCandidates []string
+ }{
+ // Destination accepts anything — try all variants
+ {"s2→anything", manifest.DockerV2ListMediaType, nil, "", []string{v1.MediaTypeImageIndex}},
+ {"OCI→anything", v1.MediaTypeImageIndex, nil, "", []string{manifest.DockerV2ListMediaType}},
+ // Destination accepts the unmodified original
+ {"s2→s1s2OCI", manifest.DockerV2ListMediaType, supportS1S2OCI, "", []string{v1.MediaTypeImageIndex}},
+ {"OCI→s1s2OCI", v1.MediaTypeImageIndex, supportS1S2OCI, "", []string{manifest.DockerV2ListMediaType}},
+ {"s2→s1s2", manifest.DockerV2ListMediaType, supportS1S2, "", []string{}},
+ {"OCI→OCI", v1.MediaTypeImageIndex, supportOnlyOCI, "", []string{}},
+ // Conversion necessary, try the preferred formats in order.
+ {"special→OCI", "unrecognized", supportS1S2OCI, v1.MediaTypeImageIndex, []string{manifest.DockerV2ListMediaType}},
+ {"special→s2", "unrecognized", supportS1S2, manifest.DockerV2ListMediaType, []string{}},
+ }
+
+ for _, c := range cases {
+ copier := &copier{}
+ preferredMIMEType, otherCandidates, err := copier.determineListConversion(c.sourceType, c.destTypes, "")
+ require.NoError(t, err, c.description)
+ if c.expectedUpdate == "" {
+ assert.Equal(t, manifest.NormalizedMIMEType(c.sourceType), preferredMIMEType, c.description)
+ } else {
+ assert.Equal(t, c.expectedUpdate, preferredMIMEType, c.description)
+ }
+ assert.Equal(t, c.expectedOtherCandidates, otherCandidates, c.description)
+ }
+
+ // With forceManifestMIMEType, the output is always the forced manifest type (in this case OCI index)
+ for _, c := range cases {
+ copier := &copier{}
+ preferredMIMEType, otherCandidates, err := copier.determineListConversion(c.sourceType, c.destTypes, v1.MediaTypeImageIndex)
+ require.NoError(t, err, c.description)
+ assert.Equal(t, v1.MediaTypeImageIndex, preferredMIMEType, c.description)
+ assert.Equal(t, []string{}, otherCandidates, c.description)
+ }
+
+ // The destination doesn’t support list formats at all
+ copier := &copier{}
+ _, _, err := copier.determineListConversion(v1.MediaTypeImageIndex, supportOnlyS1, "")
+ assert.Error(t, err)
+}
diff --git a/copy/multiple.go b/copy/multiple.go
new file mode 100644
index 0000000..f252e34
--- /dev/null
+++ b/copy/multiple.go
@@ -0,0 +1,351 @@
+package copy
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+type instanceCopyKind int
+
+const (
+ instanceCopyCopy instanceCopyKind = iota
+ instanceCopyClone
+)
+
+type instanceCopy struct {
+ op instanceCopyKind
+ sourceDigest digest.Digest
+
+ // Fields which can be used by callers when operation
+ // is `instanceCopyCopy`
+ copyForceCompressionFormat bool
+
+ // Fields which can be used by callers when operation
+ // is `instanceCopyClone`
+ cloneCompressionVariant OptionCompressionVariant
+ clonePlatform *imgspecv1.Platform
+ cloneAnnotations map[string]string
+}
+
+// internal type only to make imgspecv1.Platform comparable
+type platformComparable struct {
+ architecture string
+ os string
+ osVersion string
+ osFeatures string
+ variant string
+}
+
+// Converts imgspecv1.Platform to a comparable format.
+func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable {
+ if platform == nil {
+ return platformComparable{}
+ }
+ osFeatures := slices.Clone(platform.OSFeatures)
+ sort.Strings(osFeatures)
+ return platformComparable{architecture: platform.Architecture,
+ os: platform.OS,
+ // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now.
+ osFeatures: strings.Join(osFeatures, ","),
+ osVersion: platform.OSVersion,
+ variant: platform.Variant,
+ }
+}
+
+// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests
+func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) {
+ res := make(map[platformComparable]*set.Set[string])
+ for _, instanceDigest := range instanceDigests {
+ instanceDetails, err := list.Instance(instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
+ }
+ platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
+ platformSet, ok := res[platform]
+ if !ok {
+ platformSet = set.New[string]()
+ res[platform] = platformSet
+ }
+ platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames)
+ }
+ return res, nil
+}
+
+func validateCompressionVariantExists(input []OptionCompressionVariant) error {
+ for _, option := range input {
+ _, err := compression.AlgorithmByName(option.Algorithm.Name())
+ if err != nil {
+ return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err)
+ }
+ }
+ return nil
+}
+
+// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
+func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) {
+ res := []instanceCopy{}
+ if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 {
+ // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist`
+ // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances,
+ // EnsureCompressionVariantsExist asks for an instance with some compression,
+ // an instance with that compression already exists, but is not included in options.Instances.
+ // We might define the semantics and implement this in the future.
+ return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages")
+ }
+ err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist)
+ if err != nil {
+ return res, err
+ }
+ compressionsByPlatform, err := platformCompressionMap(list, instanceDigests)
+ if err != nil {
+ return nil, err
+ }
+ for i, instanceDigest := range instanceDigests {
+ if options.ImageListSelection == CopySpecificImages &&
+ !slices.Contains(options.Instances, instanceDigest) {
+ logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
+ continue
+ }
+ instanceDetails, err := list.Instance(instanceDigest)
+ if err != nil {
+ return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
+ }
+ forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, instanceCopy{
+ op: instanceCopyCopy,
+ sourceDigest: instanceDigest,
+ copyForceCompressionFormat: forceCompressionFormat,
+ })
+ platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
+ compressionList := compressionsByPlatform[platform]
+ for _, compressionVariant := range options.EnsureCompressionVariantsExist {
+ if !compressionList.Contains(compressionVariant.Algorithm.Name()) {
+ res = append(res, instanceCopy{
+ op: instanceCopyClone,
+ sourceDigest: instanceDigest,
+ cloneCompressionVariant: compressionVariant,
+ clonePlatform: instanceDetails.ReadOnly.Platform,
+ cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
+ })
+ // add current compression to the list so that we don’t create duplicate clones
+ compressionList.Add(compressionVariant.Algorithm.Name())
+ }
+ }
+ }
+ return res, nil
+}
+
+// copyMultipleImages copies some or all of an image list's instances, using
+// c.policyContext to validate source image admissibility.
+func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) {
+ // Parse the list and get a copy of the original value after it's re-encoded.
+ manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading manifest list: %w", err)
+ }
+ originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
+ }
+ updatedList := originalList.CloneInternal()
+
+ sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel,
+ "Getting image list signatures",
+ "Checking if image list destination supports signatures")
+ if err != nil {
+ return nil, err
+ }
+
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ return nil, errors.New("Digest of source image's manifest would not match destination reference")
+ }
+ }
+ }
+
+ // Determine if we're allowed to modify the manifest list.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copySingleImage.
+ cannotModifyManifestListReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestListReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestListReason = "Destination specifies a digest"
+ }
+ if c.options.PreserveDigests {
+ cannotModifyManifestListReason = "Instructed to preserve digests"
+ }
+
+ // Determine if we'll need to convert the manifest list to a different format.
+ forceListMIMEType := c.options.ForceManifestMIMEType
+ switch forceListMIMEType {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ forceListMIMEType = manifest.DockerV2ListMediaType
+ case imgspecv1.MediaTypeImageManifest:
+ forceListMIMEType = imgspecv1.MediaTypeImageIndex
+ }
+ selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
+ if err != nil {
+ return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
+ }
+ if selectedListType != originalList.MIMEType() {
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
+ }
+ }
+
+ // Copy each image, or just the ones we want to copy, in turn.
+ instanceDigests := updatedList.Instances()
+ instanceEdits := []internalManifest.ListEdit{}
+ instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options)
+ if err != nil {
+ return nil, fmt.Errorf("preparing instances for copy: %w", err)
+ }
+ c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests))
+ for i, instance := range instanceCopyList {
+ // Update instances to be edited by their `ListOperation` and
+ // populate necessary fields.
+ switch instance.op {
+ case instanceCopyCopy:
+ logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
+ c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
+ updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
+ if err != nil {
+ return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
+ }
+ // Record the result of a possible conversion here.
+ instanceEdits = append(instanceEdits, internalManifest.ListEdit{
+ ListOperation: internalManifest.ListOpUpdate,
+ UpdateOldDigest: instance.sourceDigest,
+ UpdateDigest: updated.manifestDigest,
+ UpdateSize: int64(len(updated.manifest)),
+ UpdateCompressionAlgorithms: updated.compressionAlgorithms,
+ UpdateMediaType: updated.manifestMIMEType})
+ case instanceCopyClone:
+ logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
+ c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
+ unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
+ updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{
+ requireCompressionFormatMatch: true,
+ compressionFormat: &instance.cloneCompressionVariant.Algorithm,
+ compressionLevel: instance.cloneCompressionVariant.Level})
+ if err != nil {
+ return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
+ }
+ // Record the result of a possible conversion here.
+ instanceEdits = append(instanceEdits, internalManifest.ListEdit{
+ ListOperation: internalManifest.ListOpAdd,
+ AddDigest: updated.manifestDigest,
+ AddSize: int64(len(updated.manifest)),
+ AddMediaType: updated.manifestMIMEType,
+ AddPlatform: instance.clonePlatform,
+ AddAnnotations: instance.cloneAnnotations,
+ AddCompressionAlgorithms: updated.compressionAlgorithms,
+ })
+ default:
+ return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
+ }
+ }
+
+ // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
+ if err = updatedList.EditInstances(instanceEdits); err != nil {
+ return nil, fmt.Errorf("updating manifest list: %w", err)
+ }
+
+ // Iterate through supported list types, preferred format first.
+ c.Printf("Writing manifest list to image destination\n")
+ var errs []string
+ for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
+ var attemptedList internalManifest.ListPublic = updatedList
+
+ logrus.Debugf("Trying to use manifest list type %s…", thisListType)
+
+ // Perform the list conversion, if we need one.
+ if thisListType != updatedList.MIMEType() {
+ attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
+ if err != nil {
+ return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
+ }
+ }
+
+ // Check if the updates or a type conversion meaningfully changed the list of images
+ // by serializing them both so that we can compare them.
+ attemptedManifestList, err := attemptedList.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
+ }
+ originalManifestList, err := originalList.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
+ }
+
+ // If we can't just use the original value, but we have to change it, flag an error.
+ if !bytes.Equal(attemptedManifestList, originalManifestList) {
+ if cannotModifyManifestListReason != "" {
+ return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
+ }
+ logrus.Debugf("Manifest list has been updated")
+ } else {
+ // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
+ attemptedManifestList = manifestList
+ }
+
+ // Save the manifest list.
+ err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
+ if err != nil {
+ logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
+ continue
+ }
+ errs = nil
+ manifestList = attemptedManifestList
+ break
+ }
+ if errs != nil {
+ return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+
+ // Sign the manifest list.
+ newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity)
+ if err != nil {
+ return nil, err
+ }
+ sigs = append(slices.Clone(sigs), newSigs...)
+
+ c.Printf("Storing list signatures\n")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
+ return nil, fmt.Errorf("writing signatures: %w", err)
+ }
+
+ return manifestList, nil
+}
diff --git a/copy/multiple_test.go b/copy/multiple_test.go
new file mode 100644
index 0000000..f753c24
--- /dev/null
+++ b/copy/multiple_test.go
@@ -0,0 +1,162 @@
+package copy
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Test `instanceCopyCopy` cases.
+func TestPrepareCopyInstancesforInstanceCopyCopy(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("..", "internal", "manifest", "testdata", "oci1.index.zstd-selection.json"))
+ require.NoError(t, err)
+ list, err := internalManifest.ListFromBlob(validManifest, internalManifest.GuessMIMEType(validManifest))
+ require.NoError(t, err)
+
+ // Test CopyAllImages
+ sourceInstances := []digest.Digest{
+ digest.Digest("sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
+ digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
+ }
+
+ instancesToCopy, err := prepareInstanceCopies(list, sourceInstances, &Options{})
+ require.NoError(t, err)
+ compare := []instanceCopy{}
+
+ for _, instance := range sourceInstances {
+ compare = append(compare, instanceCopy{op: instanceCopyCopy,
+ sourceDigest: instance, copyForceCompressionFormat: false})
+ }
+ assert.Equal(t, instancesToCopy, compare)
+
+ // Test CopySpecificImages where selected instance is sourceInstances[1]
+ instancesToCopy, err = prepareInstanceCopies(list, sourceInstances, &Options{Instances: []digest.Digest{sourceInstances[1]}, ImageListSelection: CopySpecificImages})
+ require.NoError(t, err)
+ compare = []instanceCopy{{op: instanceCopyCopy,
+ sourceDigest: sourceInstances[1]}}
+ assert.Equal(t, instancesToCopy, compare)
+
+ _, err = prepareInstanceCopies(list, sourceInstances, &Options{Instances: []digest.Digest{sourceInstances[1]}, ImageListSelection: CopySpecificImages, ForceCompressionFormat: true})
+ require.EqualError(t, err, "cannot use ForceCompressionFormat with undefined default compression format")
+}
+
+// Test `instanceCopyClone` cases.
+func TestPrepareCopyInstancesforInstanceCopyClone(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("..", "internal", "manifest", "testdata", "oci1.index.zstd-selection.json"))
+ require.NoError(t, err)
+ list, err := internalManifest.ListFromBlob(validManifest, internalManifest.GuessMIMEType(validManifest))
+ require.NoError(t, err)
+
+ // Prepare option for `instanceCopyClone` case.
+ ensureCompressionVariantsExist := []OptionCompressionVariant{{Algorithm: compression.Zstd}}
+
+ sourceInstances := []digest.Digest{
+ digest.Digest("sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
+ digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
+ }
+
+ // CopySpecificImage must fail with error
+ _, err = prepareInstanceCopies(list, sourceInstances, &Options{EnsureCompressionVariantsExist: ensureCompressionVariantsExist,
+ Instances: []digest.Digest{sourceInstances[1]},
+ ImageListSelection: CopySpecificImages})
+ require.EqualError(t, err, "EnsureCompressionVariantsExist is not implemented for CopySpecificImages")
+
+ // Test copying all images with replication
+ instancesToCopy, err := prepareInstanceCopies(list, sourceInstances, &Options{EnsureCompressionVariantsExist: ensureCompressionVariantsExist})
+ require.NoError(t, err)
+
+ // Following test ensures
+ // * Still copy gzip variants if they exist in the original
+ // * Not create new Zstd variants if they exist in the original.
+
+ // We crated a list of three instances `sourceInstances` and since in oci1.index.zstd-selection.json
+ // amd64 already has a zstd instance i.e sourceInstance[1] so it should not create replication for
+ // `sourceInstance[0]` and `sourceInstance[1]` but should do it for `sourceInstance[2]` for `arm64`
+ // and still copy `sourceInstance[2]`.
+ expectedResponse := []simplerInstanceCopy{}
+ for _, instance := range sourceInstances {
+ expectedResponse = append(expectedResponse, simplerInstanceCopy{op: instanceCopyCopy,
+ sourceDigest: instance})
+ // If its `arm64` and sourceDigest[2] , expect a clone to happen
+ if instance == sourceInstances[2] {
+ expectedResponse = append(expectedResponse, simplerInstanceCopy{op: instanceCopyClone, sourceDigest: instance, cloneCompressionVariant: "zstd", clonePlatform: "arm64-linux-"})
+ }
+ }
+ actualResponse := convertInstanceCopyToSimplerInstanceCopy(instancesToCopy)
+ assert.Equal(t, expectedResponse, actualResponse)
+
+ // Test option with multiple copy request for same compression format
+ // above expection should stay same, if out ensureCompressionVariantsExist requests zstd twice
+ ensureCompressionVariantsExist = []OptionCompressionVariant{{Algorithm: compression.Zstd}, {Algorithm: compression.Zstd}}
+ instancesToCopy, err = prepareInstanceCopies(list, sourceInstances, &Options{EnsureCompressionVariantsExist: ensureCompressionVariantsExist})
+ require.NoError(t, err)
+ expectedResponse = []simplerInstanceCopy{}
+ for _, instance := range sourceInstances {
+ expectedResponse = append(expectedResponse, simplerInstanceCopy{op: instanceCopyCopy,
+ sourceDigest: instance})
+ // If its `arm64` and sourceDigest[2] , expect a clone to happen
+ if instance == sourceInstances[2] {
+ expectedResponse = append(expectedResponse, simplerInstanceCopy{op: instanceCopyClone, sourceDigest: instance, cloneCompressionVariant: "zstd", clonePlatform: "arm64-linux-"})
+ }
+ }
+ actualResponse = convertInstanceCopyToSimplerInstanceCopy(instancesToCopy)
+ assert.Equal(t, expectedResponse, actualResponse)
+
+ // Add same instance twice but clone must appear only once.
+ ensureCompressionVariantsExist = []OptionCompressionVariant{{Algorithm: compression.Zstd}}
+ sourceInstances = []digest.Digest{
+ digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
+ digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
+ }
+ instancesToCopy, err = prepareInstanceCopies(list, sourceInstances, &Options{EnsureCompressionVariantsExist: ensureCompressionVariantsExist})
+ require.NoError(t, err)
+ // two copies but clone should happen only once
+ numberOfCopyClone := 0
+ for _, instance := range instancesToCopy {
+ if instance.op == instanceCopyClone {
+ numberOfCopyClone++
+ }
+ }
+ assert.Equal(t, 1, numberOfCopyClone)
+}
+
+// simpler version of `instanceCopy` for testing where fields are string
+// instead of pointer
+type simplerInstanceCopy struct {
+ op instanceCopyKind
+ sourceDigest digest.Digest
+
+ // Fields which can be used by callers when operation
+ // is `instanceCopyClone`
+ cloneCompressionVariant string
+ clonePlatform string
+ cloneAnnotations map[string]string
+}
+
+func convertInstanceCopyToSimplerInstanceCopy(copies []instanceCopy) []simplerInstanceCopy {
+ res := []simplerInstanceCopy{}
+ for _, instance := range copies {
+ compression := ""
+ platform := ""
+ compression = instance.cloneCompressionVariant.Algorithm.Name()
+ if instance.clonePlatform != nil {
+ platform = instance.clonePlatform.Architecture + "-" + instance.clonePlatform.OS + "-" + instance.clonePlatform.Variant
+ }
+ res = append(res, simplerInstanceCopy{
+ op: instance.op,
+ sourceDigest: instance.sourceDigest,
+ cloneCompressionVariant: compression,
+ clonePlatform: platform,
+ cloneAnnotations: instance.cloneAnnotations,
+ })
+ }
+ return res
+}
diff --git a/copy/progress_bars.go b/copy/progress_bars.go
new file mode 100644
index 0000000..ce07823
--- /dev/null
+++ b/copy/progress_bars.go
@@ -0,0 +1,160 @@
+package copy
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/vbauerster/mpb/v8"
+ "github.com/vbauerster/mpb/v8/decor"
+)
+
+// newProgressPool creates a *mpb.Progress.
+// The caller must eventually call pool.Wait() after the pool will no longer be updated.
+// NOTE: Every progress bar created within the progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
+func (c *copier) newProgressPool() *mpb.Progress {
+ return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
+}
+
+// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
+func customPartialBlobDecorFunc(s decor.Statistics) string {
+ if s.Total == 0 {
+ pairFmt := "%.1f / %.1f (skipped: %.1f)"
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
+ }
+ pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
+ percentage := 100.0 * float64(s.Refill) / float64(s.Total)
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
+}
+
+// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
+type progressBar struct {
+ *mpb.Bar
+ originalSize int64 // or -1 if unknown
+}
+
+// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
+// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo()
+// to print a single line instead.
+//
+// NOTE: Every progress bar created within a progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
+//
+// As a convention, most users of progress bars should call mark100PercentComplete on full success;
+// by convention, we don't leave progress bars in partial state when fully done
+// (even if we copied much less data than anticipated).
+func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
+ // shortDigestLen is the length of the digest used for blobs.
+ const shortDigestLen = 12
+
+ prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+ // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
+ maxPrefixLen := len("Copying blob ") + shortDigestLen
+ if len(prefix) > maxPrefixLen {
+ prefix = prefix[:maxPrefixLen]
+ }
+
+ // onComplete will replace prefix once the bar/spinner has completed
+ onComplete = prefix + " " + onComplete
+
+ // Use a normal progress bar when we know the size (i.e., size > 0).
+ // Otherwise, use a spinner to indicate that something's happening.
+ var bar *mpb.Bar
+ if info.Size > 0 {
+ if partial {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.Any(customPartialBlobDecorFunc),
+ ),
+ )
+ } else {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
+ decor.Name(" | "),
+ decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
+ ),
+ )
+ }
+ } else {
+ bar = pool.New(0,
+ mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
+ ),
+ )
+ }
+ return &progressBar{
+ Bar: bar,
+ originalSize: info.Size,
+ }
+}
+
+// printCopyInfo prints a "Copying ..." message on the copier if the output is
+// set to `io.Discard`. In that case, the progress bars won't be rendered but
+// we still want to indicate when blobs and configs are copied.
+func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
+ if c.progressOutput == io.Discard {
+ c.Printf("Copying %s %s\n", kind, info.Digest)
+ }
+}
+
+// mark100PercentComplete marks the progres bars as 100% complete;
+// it may do so by possibly advancing the current state if it is below the known total.
+func (bar *progressBar) mark100PercentComplete() {
+ if bar.originalSize > 0 {
+ // We can't call bar.SetTotal even if we wanted to; the total can not be changed
+ // after a progress bar is created with a definite total.
+ bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
+ } else {
+ // -1 = unknown size
+ // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
+ // size (possible at least in theory), in mpb, zero-sized progress bars are treated
+ // as unknown size, in particular they are not configured to be marked as
+ // complete on bar.Current() reaching bar.total (because that would happen already
+ // when creating the progress bar).
+ // That means that we are both _allowed_ to call SetTotal, and we _have to_.
+ bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
+ }
+}
+
+// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
+// with the number of received bytes.
+type blobChunkAccessorProxy struct {
+ wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
+ bar *progressBar // A progress bar updated with the number of bytes read so far
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
+ if err == nil {
+ total := int64(0)
+ for _, c := range chunks {
+ total += int64(c.Length)
+ }
+ s.bar.IncrInt64(total)
+ }
+ return rc, errs, err
+}
diff --git a/copy/progress_channel.go b/copy/progress_channel.go
new file mode 100644
index 0000000..d5e9e09
--- /dev/null
+++ b/copy/progress_channel.go
@@ -0,0 +1,79 @@
+package copy
+
+import (
+ "io"
+ "time"
+
+ "github.com/containers/image/v5/types"
+)
+
+// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
+type progressReader struct {
+ source io.Reader
+ channel chan<- types.ProgressProperties
+ interval time.Duration
+ artifact types.BlobInfo
+ lastUpdate time.Time
+ offset uint64
+ offsetUpdate uint64
+}
+
+// newProgressReader creates a new progress reader for:
+// `source`: The source when internally reading bytes
+// `channel`: The reporter channel to which the progress will be sent
+// `interval`: The update interval to indicate how often the progress should update
+// `artifact`: The blob metadata which is currently being progressed
+func newProgressReader(
+ source io.Reader,
+ channel chan<- types.ProgressProperties,
+ interval time.Duration,
+ artifact types.BlobInfo,
+) *progressReader {
+ // The progress reader constructor informs the progress channel
+ // that a new artifact will be read
+ channel <- types.ProgressProperties{
+ Event: types.ProgressEventNewArtifact,
+ Artifact: artifact,
+ }
+ return &progressReader{
+ source: source,
+ channel: channel,
+ interval: interval,
+ artifact: artifact,
+ lastUpdate: time.Now(),
+ offset: 0,
+ offsetUpdate: 0,
+ }
+}
+
+// reportDone indicates to the internal channel that the progress has been
+// finished
+func (r *progressReader) reportDone() {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventDone,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+}
+
+// Read continuously reads bytes into the progress reader and reports the
+// status via the internal channel
+func (r *progressReader) Read(p []byte) (int, error) {
+ n, err := r.source.Read(p)
+ r.offset += uint64(n)
+ r.offsetUpdate += uint64(n)
+
+ // Fire the progress reader in the provided interval
+ if time.Since(r.lastUpdate) > r.interval {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventRead,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+ r.lastUpdate = time.Now()
+ r.offsetUpdate = 0
+ }
+ return n, err
+}
diff --git a/copy/progress_channel_test.go b/copy/progress_channel_test.go
new file mode 100644
index 0000000..86f31c8
--- /dev/null
+++ b/copy/progress_channel_test.go
@@ -0,0 +1,80 @@
+package copy
+
+import (
+ "bytes"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func newSUT(
+ t *testing.T,
+ reader io.Reader,
+ duration time.Duration,
+ channel chan types.ProgressProperties,
+) *progressReader {
+ artifact := types.BlobInfo{Size: 10}
+
+ go func() {
+ res := <-channel
+ assert.Equal(t, res.Event, types.ProgressEventNewArtifact)
+ assert.Equal(t, res.Artifact, artifact)
+ }()
+ res := newProgressReader(reader, channel, duration, artifact)
+
+ return res
+}
+
+func TestNewProgressReader(t *testing.T) {
+ // Given
+ channel := make(chan types.ProgressProperties)
+ sut := newSUT(t, nil, time.Second, channel)
+ assert.NotNil(t, sut)
+
+ // When/Then
+ go func() {
+ res := <-channel
+ assert.Equal(t, res.Event, types.ProgressEventDone)
+ }()
+ sut.reportDone()
+}
+
+func TestReadWithoutEvent(t *testing.T) {
+ // Given
+ channel := make(chan types.ProgressProperties)
+ reader := bytes.NewReader([]byte{0, 1, 2})
+ sut := newSUT(t, reader, time.Second, channel)
+ assert.NotNil(t, sut)
+
+ // When
+ b := []byte{0, 1, 2, 3, 4}
+ read, err := reader.Read(b)
+
+ // Then
+ assert.Nil(t, err)
+ assert.Equal(t, read, 3)
+}
+
+func TestReadWithEvent(t *testing.T) {
+ // Given
+ channel := make(chan types.ProgressProperties)
+ reader := bytes.NewReader([]byte{0, 1, 2, 3, 4, 5, 6})
+ sut := newSUT(t, reader, time.Nanosecond, channel)
+ assert.NotNil(t, sut)
+ b := []byte{0, 1, 2, 3, 4}
+
+ // When/Then
+ go func() {
+ res := <-channel
+ assert.Equal(t, res.Event, types.ProgressEventRead)
+ assert.Equal(t, res.Offset, uint64(5))
+ assert.Equal(t, res.OffsetUpdate, uint64(5))
+ }()
+ read, err := reader.Read(b)
+ assert.Equal(t, read, 5)
+ assert.Nil(t, err)
+
+}
diff --git a/copy/sign.go b/copy/sign.go
new file mode 100644
index 0000000..0ec54de
--- /dev/null
+++ b/copy/sign.go
@@ -0,0 +1,115 @@
+package copy
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ internalsig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature/sigstore"
+ "github.com/containers/image/v5/signature/simplesigning"
+ "github.com/containers/image/v5/transports"
+)
+
+// setupSigners initializes c.signers.
+func (c *copier) setupSigners() error {
+ c.signers = append(c.signers, c.options.Signers...)
+ // c.signersToClose is intentionally not updated with c.options.Signers.
+
+ // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need
+ // to clean up any created signers on failure.
+
+ if c.options.SignBy != "" {
+ opts := []simplesigning.Option{
+ simplesigning.WithKeyFingerprint(c.options.SignBy),
+ }
+ if c.options.SignPassphrase != "" {
+ opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase))
+ }
+ signer, err := simplesigning.NewSigner(opts...)
+ if err != nil {
+ return err
+ }
+ c.signers = append(c.signers, signer)
+ c.signersToClose = append(c.signersToClose, signer)
+ }
+
+ if c.options.SignBySigstorePrivateKeyFile != "" {
+ signer, err := sigstore.NewSigner(
+ sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase),
+ )
+ if err != nil {
+ return err
+ }
+ c.signers = append(c.signers, signer)
+ c.signersToClose = append(c.signersToClose, signer)
+ }
+
+ return nil
+}
+
+// sourceSignatures returns signatures from unparsedSource,
+// and verifies that they can be used (to avoid copying a large image when we
+// can tell in advance that it would ultimately fail)
+func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage,
+ gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
+ var sigs []internalsig.Signature
+ if c.options.RemoveSignatures {
+ sigs = []internalsig.Signature{}
+ } else {
+ c.Printf("%s\n", gettingSignaturesMessage)
+ s, err := unparsed.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading signatures: %w", err)
+ }
+ sigs = s
+ }
+ if len(sigs) != 0 {
+ c.Printf("%s\n", checkingDestMessage)
+ if err := c.dest.SupportsSignatures(ctx); err != nil {
+ return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
+ }
+ }
+ return sigs, nil
+}
+
+// createSignatures creates signatures for manifest and an optional identity.
+func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) {
+ if len(c.signers) == 0 {
+ // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible.
+ return nil, nil
+ }
+
+ if identity != nil {
+ if reference.IsNameOnly(identity) {
+ return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
+ }
+ } else {
+ identity = c.dest.Reference().DockerReference()
+ if identity == nil {
+ return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
+ }
+ }
+
+ res := make([]internalsig.Signature, 0, len(c.signers))
+ for signerIndex, signer := range c.signers {
+ msg := internalSigner.ProgressMessage(signer)
+ if len(c.signers) == 1 {
+ c.Printf("Creating signature: %s\n", msg)
+ } else {
+ c.Printf("Creating signature %d: %s\n", signerIndex+1, msg)
+ }
+ newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity)
+ if err != nil {
+ if len(c.signers) == 1 {
+ return nil, fmt.Errorf("creating signature: %w", err)
+ } else {
+ return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
+ }
+ }
+ res = append(res, newSig)
+ }
+ return res, nil
+}
diff --git a/copy/sign_test.go b/copy/sign_test.go
new file mode 100644
index 0000000..49f3fce
--- /dev/null
+++ b/copy/sign_test.go
@@ -0,0 +1,163 @@
+package copy
+
+import (
+ "context"
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination"
+ internalsig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature/signer"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// stubSignerImpl is a signer.SigningImplementation that allows us to check the signed identity, without the overhead of actually signing.
+// We abuse internalsig.Sigstore to store the signed manifest and identity in the payload and MIME type fields, respectively.
+type stubSignerImpl struct {
+ signingFailure error // if set, SignImageManifest returns this
+}
+
+func (s *stubSignerImpl) ProgressMessage() string {
+ return "Signing with stubSigner"
+}
+
+func (s *stubSignerImpl) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalsig.Signature, error) {
+ if s.signingFailure != nil {
+ return nil, s.signingFailure
+ }
+ return internalsig.SigstoreFromComponents(dockerReference.String(), m, nil), nil
+}
+
+func (s *stubSignerImpl) Close() error {
+ return nil
+}
+
+func TestCreateSignatures(t *testing.T) {
+ stubSigner := internalSigner.NewSigner(&stubSignerImpl{})
+ defer stubSigner.Close()
+
+ manifestBlob := []byte("Something")
+ // Set up dir: and docker: destinations
+ tempDir := t.TempDir()
+ dirRef, err := directory.NewReference(tempDir)
+ require.NoError(t, err)
+ dirDest, err := dirRef.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dirDest.Close()
+ dockerRef, err := docker.ParseReference("//busybox")
+ require.NoError(t, err)
+ dockerDest, err := dockerRef.NewImageDestination(context.Background(),
+ &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"})
+ require.NoError(t, err)
+ defer dockerDest.Close()
+
+ workingOptions := Options{Signers: []*signer.Signer{stubSigner}}
+ for _, cc := range []struct {
+ name string
+ dest types.ImageDestination
+ options *Options
+ identity string
+ successWithNoSigs bool
+ successfullySignedIdentity string // Set to expect a successful signing with workingOptions
+ }{
+ {
+ name: "signing fails",
+ dest: dockerDest,
+ options: &Options{
+ Signers: []*signer.Signer{
+ internalSigner.NewSigner(&stubSignerImpl{signingFailure: errors.New("fails")}),
+ },
+ },
+ },
+ {
+ name: "second signing fails",
+ dest: dockerDest,
+ options: &Options{
+ Signers: []*signer.Signer{
+ stubSigner,
+ internalSigner.NewSigner(&stubSignerImpl{signingFailure: errors.New("fails")}),
+ },
+ },
+ },
+ {
+ name: "not a full reference",
+ dest: dockerDest,
+ identity: "myregistry.io/myrepo",
+ },
+ {
+ name: "dir: with no identity specified, but no signing request",
+ dest: dirDest,
+ options: &Options{},
+ successWithNoSigs: true,
+ },
+
+ {
+ name: "dir: with no identity specified",
+ dest: dirDest,
+ identity: "",
+ },
+ {
+ name: "dir: with overridden identity",
+ dest: dirDest,
+ identity: "myregistry.io/myrepo:mytag",
+ successfullySignedIdentity: "myregistry.io/myrepo:mytag",
+ },
+ {
+ name: "docker:// without overriding the identity",
+ dest: dockerDest,
+ identity: "",
+ successfullySignedIdentity: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "docker:// with overidden identity",
+ dest: dockerDest,
+ identity: "myregistry.io/myrepo:mytag",
+ successfullySignedIdentity: "myregistry.io/myrepo:mytag",
+ },
+ } {
+ var identity reference.Named = nil
+ if cc.identity != "" {
+ i, err := reference.ParseNormalizedNamed(cc.identity)
+ require.NoError(t, err, cc.name)
+ identity = i
+ }
+ options := cc.options
+ if options == nil {
+ options = &workingOptions
+ }
+
+ c := &copier{
+ dest: imagedestination.FromPublic(cc.dest),
+ options: options,
+ reportWriter: io.Discard,
+ }
+ defer c.close()
+ err := c.setupSigners()
+ require.NoError(t, err, cc.name)
+ sigs, err := c.createSignatures(context.Background(), manifestBlob, identity)
+ switch {
+ case cc.successfullySignedIdentity != "":
+ require.NoError(t, err, cc.name)
+ require.Len(t, sigs, 1, cc.name)
+ stubSig, ok := sigs[0].(internalsig.Sigstore)
+ require.True(t, ok, cc.name)
+ // Compare how stubSignerImpl.SignImageManifest stuffs the signing parameters into these fields.
+ assert.Equal(t, manifestBlob, stubSig.UntrustedPayload(), cc.name)
+ assert.Equal(t, cc.successfullySignedIdentity, stubSig.UntrustedMIMEType(), cc.name)
+
+ case cc.successWithNoSigs:
+ require.NoError(t, err, cc.name)
+ require.Empty(t, sigs, cc.name)
+
+ default:
+ assert.Error(t, err, cc.name)
+ }
+ }
+}
diff --git a/copy/single.go b/copy/single.go
new file mode 100644
index 0000000..67ca43f
--- /dev/null
+++ b/copy/single.go
@@ -0,0 +1,907 @@
+package copy
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/pkg/platform"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/vbauerster/mpb/v8"
+ "golang.org/x/exp/slices"
+)
+
+// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
+type imageCopier struct {
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src *image.SourcedImage
+ diffIDsAreNeeded bool
+ cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
+ canSubstituteBlobs bool
+ compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
+ compressionLevel *int
+ requireCompressionFormatMatch bool
+}
+
+type copySingleImageOptions struct {
+ requireCompressionFormatMatch bool
+ compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
+ compressionLevel *int
+}
+
+// copySingleImageResult carries data produced by copySingleImage
+type copySingleImageResult struct {
+ manifest []byte
+ manifestMIMEType string
+ manifestDigest digest.Digest
+ compressionAlgorithms []compressiontypes.Algorithm
+}
+
+// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate
+// source image admissibility.
+func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) {
+ // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
+ // Make sure we fail cleanly in such cases.
+ multiImage, err := isMultiImage(ctx, unparsedImage)
+ if err != nil {
+ // FIXME FIXME: How to name a reference for the sub-image?
+ return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
+ }
+ if multiImage {
+ return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
+ }
+
+ // Please keep this policy check BEFORE reading any other information about the image.
+ // (The multiImage check above only matches the MIME type, which we have received anyway.
+ // Actual parsing of anything should be deferred.)
+ if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
+ return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err)
+ }
+ src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage)
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
+ }
+
+ // If the destination is a digested reference, make a note of that, determine what digest value we're
+ // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
+ // one item from a manifest list that matches it, accept that as a match.
+ destIsDigestedReference := false
+ if named := c.dest.Reference().DockerReference(); named != nil {
+ if digested, ok := named.(reference.Digested); ok {
+ destIsDigestedReference = true
+ matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ manifestList, _, err := c.unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err)
+ }
+ matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
+ if err != nil {
+ return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
+ }
+ if !matches {
+ return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference")
+ }
+ }
+ }
+ }
+
+ if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ sigs, err := c.sourceSignatures(ctx, src,
+ "Getting image source signatures",
+ "Checking if image destination supports signatures")
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ // Determine if we're allowed to modify the manifest.
+ // If we can, set to the empty string. If we can't, set to the reason why.
+ // Compare, and perhaps keep in sync with, the version in copyMultipleImages.
+ cannotModifyManifestReason := ""
+ if len(sigs) > 0 {
+ cannotModifyManifestReason = "Would invalidate signatures"
+ }
+ if destIsDigestedReference {
+ cannotModifyManifestReason = "Destination specifies a digest"
+ }
+ if c.options.PreserveDigests {
+ cannotModifyManifestReason = "Instructed to preserve digests"
+ }
+
+ ic := imageCopier{
+ c: c,
+ manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
+ src: src,
+ // diffIDsAreNeeded is computed later
+ cannotModifyManifestReason: cannotModifyManifestReason,
+ requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
+ }
+ if opts.compressionFormat != nil {
+ ic.compressionFormat = opts.compressionFormat
+ ic.compressionLevel = opts.compressionLevel
+ } else if c.options.DestinationCtx != nil {
+ // Note that compressionFormat and compressionLevel can be nil.
+ ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
+ ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
+ }
+ // Decide whether we can substitute blobs with semantic equivalents:
+ // - Don’t do that if we can’t modify the manifest at all
+ // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
+
+ if err := ic.updateEmbeddedDockerReference(); err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
+
+ manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
+ srcMIMEType: ic.src.ManifestMIMEType,
+ destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
+ forceManifestMIMEType: c.options.ForceManifestMIMEType,
+ requiresOCIEncryption: destRequiresOciEncryption,
+ cannotModifyManifestReason: ic.cannotModifyManifestReason,
+ })
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+ // We set up this part of ic.manifestUpdates quite early, not just around the
+ // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
+ // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
+ // on the expected destination format.
+ if manifestConversionPlan.preferredMIMETypeNeedsConversion {
+ ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
+ }
+
+ // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
+ ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
+
+ // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
+ if c.options.OptimizeDestinationImageAlreadyExists {
+ shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
+ noPendingManifestUpdates := ic.noPendingManifestUpdates()
+
+ logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
+ if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
+ matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
+ if err != nil {
+ logrus.Warnf("Failed to compare destination image manifest: %v", err)
+ return copySingleImageResult{}, err
+ }
+
+ if matchedResult != nil {
+ c.Printf("Skipping: image already present at destination\n")
+ return *matchedResult, nil
+ }
+ }
+ }
+
+ compressionAlgos, err := ic.copyLayers(ctx)
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+
+ // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
+ // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
+ // without actually trying to upload something and getting a types.ManifestTypeRejectedError.
+ // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
+ // we're altering how they're compressed. If the process succeeds, fine…
+ manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
+ wipResult := copySingleImageResult{
+ manifest: manifestBytes,
+ manifestMIMEType: manifestConversionPlan.preferredMIMEType,
+ manifestDigest: manifestDigest,
+ }
+ if err != nil {
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
+ // … if it fails, and the failure is either because the manifest is rejected by the registry, or
+ // because we failed to create a manifest of the specified type because the specific manifest type
+ // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
+ // have other options available that could still succeed.
+ var manifestTypeRejectedError types.ManifestTypeRejectedError
+ var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
+ isManifestRejected := errors.As(err, &manifestTypeRejectedError)
+ isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
+ if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
+ // We don’t have other options.
+ // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
+ // Don’t bother the user with MIME types if we have no choice.
+ return copySingleImageResult{}, err
+ }
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
+ // So if we are here, we will definitely be trying to convert the manifest.
+ // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
+ // so let’s bail out early and with a better error message.
+ if ic.cannotModifyManifestReason != "" {
+ return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
+ }
+
+ // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
+ errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
+ for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
+ logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
+ ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
+ attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
+ if err != nil {
+ logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
+ continue
+ }
+
+ // We have successfully uploaded a manifest.
+ wipResult = copySingleImageResult{
+ manifest: attemptedManifest,
+ manifestMIMEType: manifestMIMEType,
+ manifestDigest: attemptedManifestDigest,
+ }
+ errs = nil // Mark this as a success so that we don't abort below.
+ break
+ }
+ if errs != nil {
+ return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+ }
+ if targetInstance != nil {
+ targetInstance = &wipResult.manifestDigest
+ }
+
+ newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity)
+ if err != nil {
+ return copySingleImageResult{}, err
+ }
+ sigs = append(slices.Clone(sigs), newSigs...)
+
+ if len(sigs) > 0 {
+ c.Printf("Storing signatures\n")
+ if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
+ return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err)
+ }
+ }
+ wipResult.compressionAlgorithms = compressionAlgos
+ res := wipResult // We are done
+ return res, nil
+}
+
+// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
+func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
+ if dest.MustMatchRuntimeOS() {
+ c, err := src.OCIConfig(ctx)
+ if err != nil {
+ return fmt.Errorf("parsing image configuration: %w", err)
+ }
+ wantedPlatforms, err := platform.WantedPlatforms(sys)
+ if err != nil {
+ return fmt.Errorf("getting current platform information %#v: %w", sys, err)
+ }
+
+ options := newOrderedSet()
+ match := false
+ for _, wantedPlatform := range wantedPlatforms {
+ // For a transitional period, this might trigger warnings because the Variant
+ // field was added to OCI config only recently. If this turns out to be too noisy,
+ // revert this check to only look for (OS, Architecture).
+ if platform.MatchesPlatform(c.Platform, wantedPlatform) {
+ match = true
+ break
+ }
+ options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
+ }
+ if !match {
+ logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
+ c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
+ }
+ }
+ return nil
+}
+
+// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
+func (ic *imageCopier) updateEmbeddedDockerReference() error {
+ if ic.c.dest.IgnoresEmbeddedDockerReference() {
+ return nil // Destination would prefer us not to update the embedded reference.
+ }
+ destRef := ic.c.dest.Reference().DockerReference()
+ if destRef == nil {
+ return nil // Destination does not care about Docker references
+ }
+ if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
+ return nil // No reference embedded in the manifest, or it matches destRef already.
+ }
+
+ if ic.cannotModifyManifestReason != "" {
+ return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
+ transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
+ }
+ ic.manifestUpdates.EmbeddedDockerReference = destRef
+ return nil
+}
+
+func (ic *imageCopier) noPendingManifestUpdates() bool {
+ return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
+}
+
+// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the
+// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise.
+func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) {
+ srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("calculating manifest digest: %w", err)
+ }
+
+ destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
+ if err != nil {
+ logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
+ return nil, nil
+ }
+ defer destImageSource.Close()
+
+ destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
+ if err != nil {
+ logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
+ return nil, nil
+ }
+
+ destManifestDigest, err := manifest.Digest(destManifest)
+ if err != nil {
+ return nil, fmt.Errorf("calculating manifest digest: %w", err)
+ }
+
+ logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
+ if srcManifestDigest != destManifestDigest {
+ return nil, nil
+ }
+
+ compressionAlgos := set.New[string]()
+ for _, srcInfo := range ic.src.LayerInfos() {
+ if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
+ compressionAlgos.Add(c.Name())
+ }
+ }
+
+ algos, err := algorithmsByNames(compressionAlgos.Values())
+ if err != nil {
+ return nil, err
+ }
+
+ // Destination and source manifests, types and digests should all be equivalent
+ return &copySingleImageResult{
+ manifest: destManifest,
+ manifestMIMEType: destManifestType,
+ manifestDigest: srcManifestDigest,
+ compressionAlgorithms: algos,
+ }, nil
+}
+
+// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
+func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
+ srcInfos := ic.src.LayerInfos()
+ numLayers := len(srcInfos)
+ updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
+ if err != nil {
+ return nil, err
+ }
+ srcInfosUpdated := false
+ if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
+ }
+ srcInfos = updatedSrcInfos
+ srcInfosUpdated = true
+ }
+
+ type copyLayerData struct {
+ destInfo types.BlobInfo
+ diffID digest.Digest
+ err error
+ }
+
+ // The manifest is used to extract the information whether a given
+ // layer is empty.
+ man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+ manifestLayerInfos := man.LayerInfos()
+
+ // copyGroup is used to determine if all layers are copied
+ copyGroup := sync.WaitGroup{}
+
+ data := make([]copyLayerData, numLayers)
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
+ defer copyGroup.Done()
+ cld := copyLayerData{}
+ if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
+ // DiffIDs are, currently, needed only when converting from schema1.
+ // In which case src.LayerInfos will not have URLs because schema1
+ // does not support them.
+ if ic.diffIDsAreNeeded {
+ cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
+ } else {
+ cld.destInfo = srcLayer
+ logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
+ }
+ } else {
+ cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
+ }
+ data[index] = cld
+ }
+
+ // Decide which layers to encrypt
+ layersToEncrypt := set.New[int]()
+ var encryptAll bool
+ if ic.c.options.OciEncryptLayers != nil {
+ encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
+ totalLayers := len(srcInfos)
+ for _, l := range *ic.c.options.OciEncryptLayers {
+ switch {
+ case l >= 0 && l < totalLayers:
+ layersToEncrypt.Add(l)
+ case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers
+ layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed.
+ default:
+ return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers)
+ }
+ }
+
+ if encryptAll {
+ for i := 0; i < len(srcInfos); i++ {
+ layersToEncrypt.Add(i)
+ }
+ }
+ }
+
+ if err := func() error { // A scope for defer
+ progressPool := ic.c.newProgressPool()
+ defer progressPool.Wait()
+
+ // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
+ defer copyGroup.Wait()
+
+ for i, srcLayer := range srcInfos {
+ err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
+ if err != nil {
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying layer: %w", err)
+ }
+ copyGroup.Add(1)
+ go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
+ }
+
+ // A call to copyGroup.Wait() is done at this point by the defer above.
+ return nil
+ }(); err != nil {
+ return nil, err
+ }
+
+ compressionAlgos := set.New[string]()
+ destInfos := make([]types.BlobInfo, numLayers)
+ diffIDs := make([]digest.Digest, numLayers)
+ for i, cld := range data {
+ if cld.err != nil {
+ return nil, cld.err
+ }
+ if cld.destInfo.CompressionAlgorithm != nil {
+ compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name())
+ }
+ destInfos[i] = cld.destInfo
+ diffIDs[i] = cld.diffID
+ }
+
+ // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
+ // OptimizeDestinationImageAlreadyExists short-circuit conditions
+ ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
+ if ic.diffIDsAreNeeded {
+ ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
+ }
+ if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
+ ic.manifestUpdates.LayerInfos = destInfos
+ }
+ algos, err := algorithmsByNames(compressionAlgos.Values())
+ if err != nil {
+ return nil, err
+ }
+ return algos, nil
+}
+
+// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
+func layerDigestsDiffer(a, b []types.BlobInfo) bool {
+ return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
+ return a.Digest == b.Digest
+ })
+}
+
+// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
+// stores the resulting config and manifest to the destination, and returns the stored manifest
+// and its digest.
+func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
+ var pendingImage types.Image = ic.src
+ if !ic.noPendingManifestUpdates() {
+ if ic.cannotModifyManifestReason != "" {
+ return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
+ }
+ if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
+ // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
+ // So, this can only happen if we are trying to upload using one of the other MIME type candidates.
+ // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
+ // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
+ // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
+ // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
+ return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ }
+ pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
+ if err != nil {
+ return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
+ }
+ pendingImage = pi
+ }
+ man, _, err := pendingImage.Manifest(ctx)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest: %w", err)
+ }
+
+ if err := ic.copyConfig(ctx, pendingImage); err != nil {
+ return nil, "", err
+ }
+
+ ic.c.Printf("Writing manifest to image destination\n")
+ manifestDigest, err := manifest.Digest(man)
+ if err != nil {
+ return nil, "", err
+ }
+ if instanceDigest != nil {
+ instanceDigest = &manifestDigest
+ }
+ if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
+ logrus.Debugf("Error %v while writing manifest %q", err, string(man))
+ return nil, "", fmt.Errorf("writing manifest: %w", err)
+ }
+ return man, manifestDigest, nil
+}
+
+// copyConfig copies config.json, if any, from src to dest.
+func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
+ srcInfo := src.ConfigInfo()
+ if srcInfo.Digest != "" {
+ if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
+ // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
+ return fmt.Errorf("copying config: %w", err)
+ }
+ defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
+
+ destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
+ progressPool := ic.c.newProgressPool()
+ defer progressPool.Wait()
+ bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
+ defer bar.Abort(false)
+ ic.c.printCopyInfo("config", srcInfo)
+
+ configBlob, err := src.ConfigBlob(ctx)
+ if err != nil {
+ return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
+ }
+
+ destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+
+ bar.mark100PercentComplete()
+ return destInfo, nil
+ }()
+ if err != nil {
+ return err
+ }
+ if destInfo.Digest != srcInfo.Digest {
+ return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ }
+ }
+ return nil
+}
+
+// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
+// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
+type diffIDResult struct {
+ digest digest.Digest
+ err error
+}
+
+func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
+ // This MIME type → compression mapping belongs in manifest-specific code in our manifest
+ // package (but we should preferably replace/change UpdatedImage instead of productizing
+ // this workaround).
+ switch srcInfo.MediaType {
+ case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
+ return &compression.Gzip
+ case imgspecv1.MediaTypeImageLayerZstd:
+ return &compression.Zstd
+ }
+ return nil
+}
+
+// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
+// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
+// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
+ // If the srcInfo doesn't contain compression information, try to compute it from the
+ // MediaType, which was either read from a manifest by way of LayerInfos() or constructed
+ // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
+ // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
+ // which uses the compression information to compute the updated MediaType values.
+ // (Sadly UpdatedImage() is documented to not update MediaTypes from
+ // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
+ if srcInfo.CompressionAlgorithm == nil {
+ srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
+ }
+
+ ic.c.printCopyInfo("blob", srcInfo)
+
+ diffIDIsNeeded := false
+ var cachedDiffID digest.Digest = ""
+ if ic.diffIDsAreNeeded {
+ cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded = cachedDiffID == ""
+ }
+ // When encrypting to decrypting, only use the simple code path. We might be able to optimize more
+ // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
+ // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
+ encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil)
+ canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
+
+ // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
+ if canAvoidProcessingCompleteLayer {
+ canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
+ logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
+ srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
+ canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
+ // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
+ // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
+ // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
+ // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
+ // Fixing that will probably require passing more information to TryReusingBlob() than the current version of
+ // the ImageDestination interface lets us pass in.
+ var requiredCompression *compressiontypes.Algorithm
+ var originalCompression *compressiontypes.Algorithm
+ if ic.requireCompressionFormatMatch {
+ requiredCompression = ic.compressionFormat
+ originalCompression = srcInfo.CompressionAlgorithm
+ }
+ reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ CanSubstitute: canSubstitute,
+ EmptyLayer: emptyLayer,
+ LayerIndex: &layerIndex,
+ SrcRef: srcRef,
+ RequiredCompression: requiredCompression,
+ OriginalCompression: originalCompression,
+ })
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
+ }
+ if reused {
+ logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
+ func() { // A scope for defer
+ bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
+ defer bar.Abort(false)
+ bar.mark100PercentComplete()
+ }()
+
+ // Throw an event that the layer has been skipped
+ if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
+ ic.c.options.Progress <- types.ProgressProperties{
+ Event: types.ProgressEventSkipped,
+ Artifact: srcInfo,
+ }
+ }
+
+ return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
+ }
+ }
+
+ // A partial pull is managed by the destination storage, that decides what portions
+ // of the source file are not known yet and must be fetched.
+ // Attempt a partial only when the source allows to retrieve a blob partially and
+ // the destination has support for it.
+ if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
+ if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
+ bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
+ hideProgressBar := true
+ defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
+ bar.Abort(hideProgressBar)
+ }()
+
+ proxy := blobChunkAccessorProxy{
+ wrapped: ic.c.rawSource,
+ bar: bar,
+ }
+ uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
+ if err == nil {
+ if srcInfo.Size != -1 {
+ refill := srcInfo.Size - bar.Current()
+ bar.SetCurrent(srcInfo.Size)
+ bar.SetRefill(refill)
+ }
+ bar.mark100PercentComplete()
+ hideProgressBar = false
+ logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
+ return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
+ }
+ logrus.Debugf("Failed to retrieve partial blob: %v", err)
+ return false, types.BlobInfo{}
+ }(); reused {
+ return blobInfo, cachedDiffID, nil
+ }
+ }
+
+ // Fallback: copy the layer, computing the diffID if we need to do so
+ return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
+ bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
+ defer bar.Abort(false)
+
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
+ if err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
+ }
+ defer srcStream.Close()
+
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+
+ diffID := cachedDiffID
+ if diffIDIsNeeded {
+ select {
+ case <-ctx.Done():
+ return types.BlobInfo{}, "", ctx.Err()
+ case diffIDResult := <-diffIDChan:
+ if diffIDResult.err != nil {
+ return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
+ }
+ logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptingOrDecrypting {
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ }
+ diffID = diffIDResult.digest
+ }
+ }
+
+ bar.mark100PercentComplete()
+ return blobInfo, diffID, nil
+ }()
+}
+
+// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
+func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
+ // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
+ // compression format if the blob was substituted.
+ // Handling of compression, encryption, and the related MIME types and the like are all the responsibility
+ // of the generic code in this package.
+ res := types.BlobInfo{
+ Digest: reusedBlob.Digest,
+ Size: reusedBlob.Size,
+ URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
+ Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
+ MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
+ CompressionOperation: reusedBlob.CompressionOperation,
+ CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
+ CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
+ }
+ // The transport is only expected to fill CompressionOperation and CompressionAlgorithm
+ // if the blob was substituted; otherwise, fill it in based
+ // on what we know from the srcInfos we were given.
+ if reusedBlob.Digest == inputInfo.Digest {
+ res.CompressionOperation = inputInfo.CompressionOperation
+ res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
+ }
+ return res
+}
+
+// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
+// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
+// perhaps (de/re/)compressing the stream,
+// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
+func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
+ diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
+ var diffIDChan chan diffIDResult
+
+ err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
+ if diffIDIsNeeded {
+ diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
+ pipeReader, pipeWriter := io.Pipe()
+ defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
+ }()
+
+ getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
+ // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
+ // reading from the pipe has failed, we don’t really care.
+ // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
+ // the return value includes an error indication, which we do check.
+ //
+ // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
+ // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
+ go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
+ return pipeWriter
+ }
+ }
+
+ blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
+ return blobInfo, diffIDChan, err
+ // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
+}
+
+// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
+ result := diffIDResult{
+ digest: "",
+ err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
+ }
+ defer func() { dest <- result }()
+ defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
+
+ result.digest, result.err = computeDiffID(layerStream, decompressor)
+}
+
+// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
+func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
+ if decompressor != nil {
+ s, err := decompressor(stream)
+ if err != nil {
+ return "", err
+ }
+ defer s.Close()
+ stream = s
+ }
+
+ return digest.Canonical.FromReader(stream)
+}
+
+// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
+func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
+ result := []compressiontypes.Algorithm{}
+ for _, name := range names {
+ algo, err := compression.AlgorithmByName(name)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, algo)
+ }
+ return result, nil
+}
diff --git a/copy/single_test.go b/copy/single_test.go
new file mode 100644
index 0000000..144b5ed
--- /dev/null
+++ b/copy/single_test.go
@@ -0,0 +1,141 @@
+package copy
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUpdatedBlobInfoFromReuse(t *testing.T) {
+ srcInfo := types.BlobInfo{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ URLs: []string{"https://layer.url"},
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is not set by LayerInfos()
+ }
+
+ for _, c := range []struct {
+ reused private.ReusedBlob
+ expected types.BlobInfo
+ }{
+ { // A straightforward reuse without substitution
+ reused: private.ReusedBlob{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ // CompressionOperation not set
+ // CompressionAlgorithm not set
+ },
+ expected: types.BlobInfo{
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ URLs: nil,
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress, // Might be set by blobCacheSource.LayerInfosForCopy
+ CompressionAlgorithm: &compression.Gzip, // Set e.g. in copyLayer
+ // CryptoOperation is set to the zero value
+ },
+ },
+ { // Reuse with substitution
+ reused: private.ReusedBlob{
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 513543640,
+ CompressionOperation: types.Decompress,
+ CompressionAlgorithm: nil,
+ },
+ expected: types.BlobInfo{
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 513543640,
+ URLs: nil,
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Decompress,
+ CompressionAlgorithm: nil,
+ // CryptoOperation is set to the zero value
+ },
+ },
+ } {
+ res := updatedBlobInfoFromReuse(srcInfo, c.reused)
+ assert.Equal(t, c.expected, res, fmt.Sprintf("%#v", c.reused))
+ }
+}
+
+func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) *diffIDResult {
+ ch := make(chan diffIDResult)
+ go diffIDComputationGoroutine(ch, layerStream, decompressor)
+ timeout := time.After(time.Second)
+ select {
+ case res := <-ch:
+ return &res
+ case <-timeout:
+ return nil
+ }
+}
+
+func TestDiffIDComputationGoroutine(t *testing.T) {
+ stream, err := os.Open("fixtures/Hello.uncompressed")
+ require.NoError(t, err)
+ res := goDiffIDComputationGoroutineWithTimeout(stream, nil)
+ require.NotNil(t, res)
+ assert.NoError(t, res.err)
+ assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String())
+
+ // Error reading input
+ reader, writer := io.Pipe()
+ err = writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine"))
+ require.NoError(t, err)
+ res = goDiffIDComputationGoroutineWithTimeout(reader, nil)
+ require.NotNil(t, res)
+ assert.Error(t, res.err)
+}
+
+func TestComputeDiffID(t *testing.T) {
+ for _, c := range []struct {
+ filename string
+ decompressor compressiontypes.DecompressorFunc
+ result digest.Digest
+ }{
+ {"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
+ {"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"},
+ {"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
+ {"fixtures/Hello.zst", nil, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"},
+ {"fixtures/Hello.zst", compression.ZstdDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
+ } {
+ stream, err := os.Open(c.filename)
+ require.NoError(t, err, c.filename)
+ defer stream.Close()
+
+ diffID, err := computeDiffID(stream, c.decompressor)
+ require.NoError(t, err, c.filename)
+ assert.Equal(t, c.result, diffID)
+ }
+
+ // Error initializing decompression
+ _, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor)
+ assert.Error(t, err)
+
+ // Error reading input
+ reader, writer := io.Pipe()
+ defer reader.Close()
+ err = writer.CloseWithError(errors.New("Expected error reading input in computeDiffID"))
+ require.NoError(t, err)
+ _, err = computeDiffID(reader, nil)
+ assert.Error(t, err)
+}
diff --git a/directory/directory_dest.go b/directory/directory_dest.go
new file mode 100644
index 0000000..222723a
--- /dev/null
+++ b/directory/directory_dest.go
@@ -0,0 +1,284 @@
+package directory
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const version = "Directory Transport Version: 1.1\n"
+
+// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
+// using the 'dir' transport
+var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
+
+type dirImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
+ ref dirReference
+}
+
+// newImageDestination returns an ImageDestination for writing to a directory.
+func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) {
+ desiredLayerCompression := types.PreserveOriginal
+ if sys != nil {
+ if sys.DirForceCompress {
+ desiredLayerCompression = types.Compress
+
+ if sys.DirForceDecompress {
+ return nil, fmt.Errorf("Cannot compress and decompress at the same time")
+ }
+ }
+ if sys.DirForceDecompress {
+ desiredLayerCompression = types.Decompress
+ }
+ }
+
+ // If directory exists check if it is empty
+ // if not empty, check whether the contents match that of a container image directory and overwrite the contents
+ // if the contents don't match throw an error
+ dirExists, err := pathExists(ref.resolvedPath)
+ if err != nil {
+ return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
+ }
+ if dirExists {
+ isEmpty, err := isDirEmpty(ref.resolvedPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isEmpty {
+ versionExists, err := pathExists(ref.versionPath())
+ if err != nil {
+ return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
+ }
+ if versionExists {
+ contents, err := os.ReadFile(ref.versionPath())
+ if err != nil {
+ return nil, err
+ }
+ // check if contents of version file is what we expect it to be
+ if string(contents) != version {
+ return nil, ErrNotContainerImageDir
+ }
+ } else {
+ return nil, ErrNotContainerImageDir
+ }
+ // delete directory contents so that only one image is in the directory at a time
+ if err = removeDirContents(ref.resolvedPath); err != nil {
+ return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
+ }
+ logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
+ }
+ } else {
+ // create directory if it doesn't exist
+ if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
+ return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
+ }
+ }
+ // create version file
+ err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
+ if err != nil {
+ return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
+ }
+
+ d := &dirImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: nil,
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *dirImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *dirImageDestination) Close() error {
+ return nil
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ succeeded := false
+ explicitClosed := false
+ defer func() {
+ if !explicitClosed {
+ blobFile.Close()
+ }
+ if !succeeded {
+ os.Remove(blobFile.Name())
+ }
+ }()
+
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(blobFile, stream)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ blobDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
+ // On Windows, the “permissions of newly created files” argument to syscall.Open is
+ // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
+ // always fails on Windows.
+ if runtime.GOOS != "windows" {
+ if err := blobFile.Chmod(0644); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ }
+
+ blobPath := d.ref.layerPath(blobDigest)
+ // need to explicitly close the file, since a rename won't otherwise not work on Windows
+ blobFile.Close()
+ explicitClosed = true
+ if err := os.Rename(blobFile.Name(), blobPath); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ succeeded = true
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if info.Digest == "" {
+ return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
+ }
+ blobPath := d.ref.layerPath(info.Digest)
+ finfo, err := os.Stat(blobPath)
+ if err != nil && os.IsNotExist(err) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
+}
+
+// PutManifest writes manifest to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
+ return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ for i, sig := range signatures {
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
+ return nil
+}
+
+// returns true if path exists
+func pathExists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+// returns true if directory is empty
+func isDirEmpty(path string) (bool, error) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return false, err
+ }
+ return len(files) == 0, nil
+}
+
+// deletes the contents of a directory
+func removeDirContents(path string) error {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/directory/directory_src.go b/directory/directory_src.go
new file mode 100644
index 0000000..5fc83bb
--- /dev/null
+++ b/directory/directory_src.go
@@ -0,0 +1,102 @@
+package directory
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+type dirImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ ref dirReference
+}
+
+// newImageSource returns an ImageSource reading from an existing directory.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ref dirReference) private.ImageSource {
+ s := &dirImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *dirImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *dirImageSource) Close() error {
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
+ if err != nil {
+ return nil, "", err
+ }
+ return m, manifest.GuessMIMEType(m), err
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ r, err := os.Open(s.ref.layerPath(info.Digest))
+ if err != nil {
+ return nil, -1, err
+ }
+ fi, err := r.Stat()
+ if err != nil {
+ return nil, -1, err
+ }
+ return r, fi.Size(), nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ signatures := []signature.Signature{}
+ for i := 0; ; i++ {
+ path := s.ref.signaturePath(i, instanceDigest)
+ sigBlob, err := os.ReadFile(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ break
+ }
+ return nil, err
+ }
+ signature, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
+ signatures = append(signatures, signature)
+ }
+ return signatures, nil
+}
diff --git a/directory/directory_test.go b/directory/directory_test.go
new file mode 100644
index 0000000..8c80262
--- /dev/null
+++ b/directory/directory_test.go
@@ -0,0 +1,209 @@
+package directory
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageSource = (*dirImageSource)(nil)
+var _ private.ImageDestination = (*dirImageDestination)(nil)
+
+func TestDestinationReference(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ ref2 := dest.Reference()
+ assert.Equal(t, tmpDir, ref2.StringWithinTransport())
+}
+
+func TestGetPutManifest(t *testing.T) {
+ ref, _ := refToTempDir(t)
+
+ man := []byte("test-manifest")
+ list := []byte("test-manifest-list")
+ md, err := manifest.Digest(man)
+ require.NoError(t, err)
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ err = dest.PutManifest(context.Background(), man, &md)
+ assert.NoError(t, err)
+ err = dest.PutManifest(context.Background(), list, nil)
+ assert.NoError(t, err)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ defer src.Close()
+ m, mt, err := src.GetManifest(context.Background(), nil)
+ assert.NoError(t, err)
+ assert.Equal(t, list, m)
+ assert.Equal(t, "", mt)
+
+ m, mt, err = src.GetManifest(context.Background(), &md)
+ assert.NoError(t, err)
+ assert.Equal(t, man, m)
+ assert.Equal(t, "", mt)
+}
+
+func TestGetPutBlob(t *testing.T) {
+ computedBlob := []byte("test-blob")
+ providedBlob := []byte("provided-blob")
+ providedDigest := digest.Digest("sha256:provided-test-digest")
+
+ ref, _ := refToTempDir(t)
+ cache := memory.New()
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ assert.Equal(t, types.PreserveOriginal, dest.DesiredLayerCompression())
+ // PutBlob with caller-provided data
+ providedInfo, err := dest.PutBlob(context.Background(), bytes.NewReader(providedBlob), types.BlobInfo{Digest: providedDigest, Size: int64(len(providedBlob))}, cache, false)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(providedBlob)), providedInfo.Size)
+ assert.Equal(t, providedDigest, providedInfo.Digest)
+ // PutBlob with unknown data
+ computedInfo, err := dest.PutBlob(context.Background(), bytes.NewReader(computedBlob), types.BlobInfo{Digest: "", Size: int64(-1)}, cache, false)
+ assert.NoError(t, err)
+ assert.Equal(t, int64(len(computedBlob)), computedInfo.Size)
+ assert.Equal(t, digest.FromBytes(computedBlob), computedInfo.Digest)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ defer src.Close()
+ for digest, expectedBlob := range map[digest.Digest][]byte{
+ providedInfo.Digest: providedBlob,
+ computedInfo.Digest: computedBlob,
+ } {
+ rc, size, err := src.GetBlob(context.Background(), types.BlobInfo{Digest: digest, Size: int64(len(expectedBlob))}, cache)
+ assert.NoError(t, err)
+ defer rc.Close()
+ b, err := io.ReadAll(rc)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedBlob, b)
+ assert.Equal(t, int64(len(expectedBlob)), size)
+ }
+}
+
+// readerFromFunc allows implementing Reader by any function, e.g. a closure.
+type readerFromFunc func([]byte) (int, error)
+
+func (fn readerFromFunc) Read(p []byte) (int, error) {
+ return fn(p)
+}
+
+// TestPutBlobDigestFailure simulates behavior on digest verification failure.
+func TestPutBlobDigestFailure(t *testing.T) {
+ const digestErrorString = "Simulated digest error"
+ const blobDigest = digest.Digest("sha256:test-digest")
+
+ ref, _ := refToTempDir(t)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ blobPath := dirRef.layerPath(blobDigest)
+ cache := memory.New()
+
+ firstRead := true
+ reader := readerFromFunc(func(p []byte) (int, error) {
+ _, err := os.Lstat(blobPath)
+ require.Error(t, err)
+ require.True(t, os.IsNotExist(err))
+ if firstRead {
+ if len(p) > 0 {
+ firstRead = false
+ }
+ for i := 0; i < len(p); i++ {
+ p[i] = 0xAA
+ }
+ return len(p), nil
+ }
+ return 0, errors.New(digestErrorString)
+ })
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false)
+ assert.ErrorContains(t, err, digestErrorString)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ _, err = os.Lstat(blobPath)
+ require.Error(t, err)
+ require.True(t, os.IsNotExist(err))
+}
+
+func TestGetPutSignatures(t *testing.T) {
+ ref, _ := refToTempDir(t)
+
+ man := []byte("test-manifest")
+ list := []byte("test-manifest-list")
+ md, err := manifest.Digest(man)
+ require.NoError(t, err)
+ // These signatures are completely invalid; start with 0xA3 just to be minimally plausible to signature.FromBlob.
+ signatures := [][]byte{
+ []byte("\xA3sig1"),
+ []byte("\xA3sig2"),
+ }
+ listSignatures := [][]byte{
+ []byte("\xA3sig3"),
+ []byte("\xA3sig4"),
+ }
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ err = dest.SupportsSignatures(context.Background())
+ assert.NoError(t, err)
+
+ err = dest.PutManifest(context.Background(), man, &md)
+ require.NoError(t, err)
+ err = dest.PutManifest(context.Background(), list, nil)
+ require.NoError(t, err)
+
+ err = dest.PutSignatures(context.Background(), signatures, &md)
+ assert.NoError(t, err)
+ err = dest.PutSignatures(context.Background(), listSignatures, nil)
+ assert.NoError(t, err)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ defer src.Close()
+ sigs, err := src.GetSignatures(context.Background(), nil)
+ assert.NoError(t, err)
+ assert.Equal(t, listSignatures, sigs)
+
+ sigs, err = src.GetSignatures(context.Background(), &md)
+ assert.NoError(t, err)
+ assert.Equal(t, signatures, sigs)
+}
+
+func TestSourceReference(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ defer src.Close()
+ ref2 := src.Reference()
+ assert.Equal(t, tmpDir, ref2.StringWithinTransport())
+}
diff --git a/directory/directory_transport.go b/directory/directory_transport.go
new file mode 100644
index 0000000..7e30686
--- /dev/null
+++ b/directory/directory_transport.go
@@ -0,0 +1,188 @@
+package directory
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for directory paths.
+var Transport = dirTransport{}
+
+type dirTransport struct{}
+
+func (t dirTransport) Name() string {
+ return "dir"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// dirReference is an ImageReference for directory paths.
+type dirReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ path string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no directory.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// dirTransport.ParseReference; callers who intentionally deal with directories
+// can use directory.NewReference.
+
+// NewReference returns a directory reference for a specified path.
+//
+// We do not expose an API supplying the resolvedPath; we could, but recomputing it
+// is generally cheap enough that we prefer being confident about the properties of resolvedPath.
+func NewReference(path string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path)
+ if err != nil {
+ return nil, err
+ }
+ return dirReference{path: path, resolvedPath: resolved}, nil
+}
+
+func (ref dirReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref dirReference) StringWithinTransport() string {
+ return ref.path
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref dirReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref dirReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedPath
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref dirReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedPath
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by dirTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ref), nil
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for dir: images")
+}
+
+// manifestPath returns a path for the manifest within a directory using our conventions.
+func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
+ if instanceDigest != nil {
+ return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
+ }
+ return filepath.Join(ref.path, "manifest.json")
+}
+
+// layerPath returns a path for a layer tarball within a directory using our conventions.
+func (ref dirReference) layerPath(digest digest.Digest) string {
+ // FIXME: Should we keep the digest identification?
+ return filepath.Join(ref.path, digest.Encoded())
+}
+
+// signaturePath returns a path for a signature within a directory using our conventions.
+func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
+ if instanceDigest != nil {
+ return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
+ }
+ return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
+}
+
+// versionPath returns a path for the version file within a directory using our conventions.
+func (ref dirReference) versionPath() string {
+ return filepath.Join(ref.path, "version")
+}
diff --git a/directory/directory_transport_test.go b/directory/directory_transport_test.go
new file mode 100644
index 0000000..0ef96c4
--- /dev/null
+++ b/directory/directory_transport_test.go
@@ -0,0 +1,230 @@
+package directory
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "dir", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testNewReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "/etc",
+ "/this/does/not/exist",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "relative/path",
+ "/double//slashes",
+ "/has/./dot",
+ "/has/dot/../dot",
+ "/trailing/slash/",
+ "/",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestNewReference(t *testing.T) {
+ testNewReference(t, NewReference)
+}
+
+// testNewReference is a test shared for Transport.ParseReference and NewReference.
+func testNewReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ tmpDir := t.TempDir()
+
+ for _, path := range []string{
+ "/",
+ "/etc",
+ tmpDir,
+ "relativepath",
+ tmpDir + "/thisdoesnotexist",
+ } {
+ ref, err := fn(path)
+ require.NoError(t, err, path)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ assert.Equal(t, path, dirRef.path, path)
+ }
+
+ _, err := fn(tmpDir + "/thisparentdoesnotexist/something")
+ assert.Error(t, err)
+}
+
+// refToTempDir creates a temporary directory and returns a reference to it.
+func refToTempDir(t *testing.T) (types.ImageReference, string) {
+ tmpDir := t.TempDir()
+ ref, err := NewReference(tmpDir)
+ require.NoError(t, err)
+ return ref, tmpDir
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, _ := refToTempDir(t)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+ assert.Equal(t, tmpDir, ref.StringWithinTransport())
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, _ := refToTempDir(t)
+ assert.Nil(t, ref.DockerReference())
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+ // A non-canonical path. Test just one, the various other cases are
+ // tested in explicitfilepath.ResolvePathToFullyExplicit.
+ ref, err := NewReference(tmpDir + "/.")
+ require.NoError(t, err)
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+
+ // "/" as a corner case.
+ ref, err = NewReference("/")
+ require.NoError(t, err)
+ assert.Equal(t, "/", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+ // We don't really know enough to make a full equality test here.
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.NotEmpty(t, ns)
+ assert.Equal(t, filepath.Dir(tmpDir), ns[0])
+
+ // Test with a known path which should exist. Test just one non-canonical
+ // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ //
+ // It would be nice to test a deeper hierarchy, but it is not obvious what
+ // deeper path is always available in the various distros, AND is not likely
+ // to contains a symbolic link.
+ for _, path := range []string{"/usr/share", "/usr/share/./."} {
+ _, err := os.Lstat(path)
+ require.NoError(t, err)
+ ref, err := NewReference(path)
+ require.NoError(t, err)
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.Equal(t, []string{"/usr"}, ns)
+ }
+
+ // "/" as a corner case.
+ ref, err := NewReference("/")
+ require.NoError(t, err)
+ assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ ref, _ := refToTempDir(t)
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ mFixture, err := os.ReadFile("../manifest/fixtures/v2s1.manifest.json")
+ require.NoError(t, err)
+ err = dest.PutManifest(context.Background(), mFixture, nil)
+ assert.NoError(t, err)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ img, err := ref.NewImage(context.Background(), nil)
+ assert.NoError(t, err)
+ defer img.Close()
+}
+
+func TestReferenceNewImageNoValidManifest(t *testing.T) {
+ ref, _ := refToTempDir(t)
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ err = dest.PutManifest(context.Background(), []byte(`{"schemaVersion":1}`), nil)
+ assert.NoError(t, err)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ _, err = ref.NewImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ ref, _ := refToTempDir(t)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ assert.NoError(t, err)
+ defer src.Close()
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ ref, _ := refToTempDir(t)
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ assert.NoError(t, err)
+ defer dest.Close()
+}
+
+func TestReferenceDeleteImage(t *testing.T) {
+ ref, _ := refToTempDir(t)
+ err := ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceManifestPath(t *testing.T) {
+ dhex := digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
+
+ ref, tmpDir := refToTempDir(t)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath(nil))
+ assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", dirRef.manifestPath(&dhex))
+}
+
+func TestReferenceLayerPath(t *testing.T) {
+ const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+
+ ref, tmpDir := refToTempDir(t)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/"+hex, dirRef.layerPath("sha256:"+hex))
+}
+
+func TestReferenceSignaturePath(t *testing.T) {
+ dhex := digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
+
+ ref, tmpDir := refToTempDir(t)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0, nil))
+ assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9, nil))
+ assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", dirRef.signaturePath(0, &dhex))
+ assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", dirRef.signaturePath(9, &dhex))
+}
+
+func TestReferenceVersionPath(t *testing.T) {
+ ref, tmpDir := refToTempDir(t)
+ dirRef, ok := ref.(dirReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/version", dirRef.versionPath())
+}
diff --git a/directory/explicitfilepath/path.go b/directory/explicitfilepath/path.go
new file mode 100644
index 0000000..32ae1ae
--- /dev/null
+++ b/directory/explicitfilepath/path.go
@@ -0,0 +1,55 @@
+package explicitfilepath
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
+// To do so, all elements of the input path must exist; as a special case, the final component may be
+// a non-existent name (but not a symlink pointing to a non-existent name)
+// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
+func ResolvePathToFullyExplicit(path string) (string, error) {
+ switch _, err := os.Lstat(path); {
+ case err == nil:
+ return resolveExistingPathToFullyExplicit(path)
+ case os.IsNotExist(err):
+ parent, file := filepath.Split(path)
+ resolvedParent, err := resolveExistingPathToFullyExplicit(parent)
+ if err != nil {
+ return "", err
+ }
+ if file == "." || file == ".." {
+ // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well.
+ // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
+ // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
+ // in the resulting path, and especially not at the end.
+ return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
+ }
+ resolvedPath := filepath.Join(resolvedParent, file)
+ // As a sanity check, ensure that there are no "." or ".." components.
+ cleanedResolvedPath := filepath.Clean(resolvedPath)
+ if cleanedResolvedPath != resolvedPath {
+ // Coverage: This should never happen.
+ return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
+ }
+ return resolvedPath, nil
+ default: // err != nil, unrecognized
+ return "", err
+ }
+}
+
+// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit,
+// but without the special case for missing final component.
+func resolveExistingPathToFullyExplicit(path string) (string, error) {
+ resolved, err := filepath.Abs(path)
+ if err != nil {
+ return "", err // Coverage: This can fail only if os.Getwd() fails.
+ }
+ resolved, err = filepath.EvalSymlinks(resolved)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Clean(resolved), nil
+}
diff --git a/directory/explicitfilepath/path_test.go b/directory/explicitfilepath/path_test.go
new file mode 100644
index 0000000..7a4c24d
--- /dev/null
+++ b/directory/explicitfilepath/path_test.go
@@ -0,0 +1,172 @@
+package explicitfilepath
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type pathResolvingTestCase struct {
+ setup func(*testing.T, string) string
+ expected string
+}
+
+var testCases = []pathResolvingTestCase{
+ { // A straightforward subdirectory hierarchy
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
+ require.NoError(t, err)
+ return "dir1/dir2/dir3"
+ },
+ "dir1/dir2/dir3",
+ },
+ { // Missing component
+ func(t *testing.T, top string) string {
+ return "thisismissing/dir2"
+ },
+ "",
+ },
+ { // Symlink on the path
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
+ require.NoError(t, err)
+ err = os.Symlink("dir1", filepath.Join(top, "link1"))
+ require.NoError(t, err)
+ return "link1/dir2"
+ },
+ "dir1/dir2",
+ },
+ { // Trailing symlink
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
+ require.NoError(t, err)
+ err = os.Symlink("dir2", filepath.Join(top, "dir1/link2"))
+ require.NoError(t, err)
+ return "dir1/link2"
+ },
+ "dir1/dir2",
+ },
+ { // Symlink pointing nowhere, as a non-final component
+ func(t *testing.T, top string) string {
+ err := os.Symlink("thisismissing", filepath.Join(top, "link1"))
+ require.NoError(t, err)
+ return "link1/dir2"
+ },
+ "",
+ },
+ { // Trailing symlink pointing nowhere (but note that a missing non-symlink would be accepted)
+ func(t *testing.T, top string) string {
+ err := os.Symlink("thisismissing", filepath.Join(top, "link1"))
+ require.NoError(t, err)
+ return "link1"
+ },
+ "",
+ },
+ { // Relative components in a path
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
+ require.NoError(t, err)
+ return "dir1/./dir2/../dir2/dir3"
+ },
+ "dir1/dir2/dir3",
+ },
+ { // Trailing relative components
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
+ require.NoError(t, err)
+ return "dir1/dir2/.."
+ },
+ "dir1",
+ },
+ { // Relative components in symlink
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
+ require.NoError(t, err)
+ err = os.Symlink("../dir1/dir2", filepath.Join(top, "dir1/link2"))
+ require.NoError(t, err)
+ return "dir1/link2"
+ },
+ "dir1/dir2",
+ },
+ { // Relative component pointing "into" a symlink
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
+ require.NoError(t, err)
+ err = os.Symlink("dir3", filepath.Join(top, "dir1/dir2/link3"))
+ require.NoError(t, err)
+ return "dir1/dir2/link3/../.."
+ },
+ "dir1",
+ },
+ { // Unreadable directory
+ func(t *testing.T, top string) string {
+ err := os.MkdirAll(filepath.Join(top, "unreadable/dir2"), 0755)
+ require.NoError(t, err)
+ err = os.Chmod(filepath.Join(top, "unreadable"), 000)
+ require.NoError(t, err)
+ return "unreadable/dir2"
+ },
+ "",
+ },
+}
+
+func testPathsAreSameFile(t *testing.T, path1, path2, description string) {
+ fi1, err := os.Stat(path1)
+ require.NoError(t, err)
+ fi2, err := os.Stat(path2)
+ require.NoError(t, err)
+ assert.True(t, os.SameFile(fi1, fi2), description)
+}
+
+func runPathResolvingTestCase(t *testing.T, f func(string) (string, error), c pathResolvingTestCase, suffix string) {
+ topDir := t.TempDir()
+ defer func() {
+ // Clean up after the "Unreadable directory" case; os.RemoveAll just fails without this.
+ _ = os.Chmod(filepath.Join(topDir, "unreadable"), 0755) // Ignore errors, especially if this does not exist.
+ }()
+
+ input := c.setup(t, topDir) + suffix // Do not call filepath.Join() on input, it calls filepath.Clean() internally!
+ description := fmt.Sprintf("%s vs. %s%s", input, c.expected, suffix)
+
+ fullOutput, err := f(topDir + "/" + input)
+ if c.expected == "" {
+ assert.Error(t, err, description)
+ } else {
+ require.NoError(t, err, input)
+ fullExpected := topDir + "/" + c.expected + suffix
+ assert.Equal(t, fullExpected, fullOutput)
+
+ // Either the two paths resolve to the same existing file, or to the same name in the same existing parent.
+ if _, err := os.Lstat(fullExpected); err == nil {
+ testPathsAreSameFile(t, fullOutput, fullExpected, description)
+ } else {
+ require.True(t, os.IsNotExist(err))
+ _, err := os.Stat(fullOutput)
+ require.Error(t, err)
+ require.True(t, os.IsNotExist(err))
+
+ parentExpected, fileExpected := filepath.Split(fullExpected)
+ parentOutput, fileOutput := filepath.Split(fullOutput)
+ assert.Equal(t, fileExpected, fileOutput)
+ testPathsAreSameFile(t, parentOutput, parentExpected, description)
+ }
+ }
+}
+
+func TestResolvePathToFullyExplicit(t *testing.T) {
+ for _, c := range testCases {
+ runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "")
+ runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "/trailing")
+ }
+}
+
+func TestResolveExistingPathToFullyExplicit(t *testing.T) {
+ for _, c := range testCases {
+ runPathResolvingTestCase(t, resolveExistingPathToFullyExplicit, c, "")
+ }
+}
diff --git a/doc.go b/doc.go
new file mode 100644
index 0000000..7ed58be
--- /dev/null
+++ b/doc.go
@@ -0,0 +1,70 @@
+// The package image provides libraries and commands to interact with container images.
+//
+// package main
+//
+// import (
+// "context"
+// "fmt"
+//
+// "github.com/containers/image/v5/docker"
+// )
+//
+// func main() {
+// ref, err := docker.ParseReference("//fedora")
+// if err != nil {
+// panic(err)
+// }
+// ctx := context.Background()
+// img, err := ref.NewImage(ctx, nil)
+// if err != nil {
+// panic(err)
+// }
+// defer img.Close()
+// b, _, err := img.Manifest(ctx)
+// if err != nil {
+// panic(err)
+// }
+// fmt.Printf("%s", string(b))
+// }
+//
+// ## Notes on running in rootless mode
+//
+// If your application needs to access a containers/storage store in rootless
+// mode, then the following additional steps have to be performed at start-up of
+// your application:
+//
+// package main
+//
+// import (
+// "github.com/containers/storage/pkg/reexec"
+// "github.com/syndtr/gocapability/capability"
+// "github.com/containers/storage/pkg/unshare"
+// )
+//
+// var neededCapabilities = []capability.Cap{
+// capability.CAP_CHOWN,
+// capability.CAP_DAC_OVERRIDE,
+// capability.CAP_FOWNER,
+// capability.CAP_FSETID,
+// capability.CAP_MKNOD,
+// capability.CAP_SETFCAP,
+// }
+//
+// func main() {
+// reexec.Init()
+//
+// capabilities, err := capability.NewPid(0)
+// if err != nil {
+// panic(err)
+// }
+// for _, cap := range neededCapabilities {
+// if !capabilities.Get(capability.EFFECTIVE, cap) {
+// // We miss a capability we need, create a user namespaces
+// unshare.MaybeReexecUsingUserNamespace(true)
+// }
+// }
+// // rest of your code follows here
+// }
+//
+// TODO(runcom)
+package image
diff --git a/docker/archive/dest.go b/docker/archive/dest.go
new file mode 100644
index 0000000..632ee7c
--- /dev/null
+++ b/docker/archive/dest.go
@@ -0,0 +1,82 @@
+package archive
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+type archiveImageDestination struct {
+ *tarfile.Destination // Implements most of types.ImageDestination
+ ref archiveReference
+ writer *Writer // Should be closed if closeWriter
+ closeWriter bool
+}
+
+func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
+ if ref.sourceIndex != -1 {
+ return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
+ }
+
+ var writer *Writer
+ var closeWriter bool
+ if ref.writer != nil {
+ writer = ref.writer
+ closeWriter = false
+ } else {
+ w, err := NewWriter(sys, ref.path)
+ if err != nil {
+ return nil, err
+ }
+ writer = w
+ closeWriter = true
+ }
+ tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
+ if sys != nil && sys.DockerArchiveAdditionalTags != nil {
+ tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
+ }
+ return &archiveImageDestination{
+ Destination: tarDest,
+ ref: ref,
+ writer: writer,
+ closeWriter: closeWriter,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *archiveImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *archiveImageDestination) Close() error {
+ if d.closeWriter {
+ return d.writer.Close()
+ }
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ d.writer.imageCommitted()
+ if d.closeWriter {
+ // We could do this only in .Close(), but failures in .Close() are much more likely to be
+ // ignored by callers that use defer. So, in single-image destinations, try to complete
+ // the archive here.
+ // But if Commit() is never called, let .Close() clean up.
+ err := d.writer.Close()
+ d.closeWriter = false
+ return err
+ }
+ return nil
+}
diff --git a/docker/archive/dest_test.go b/docker/archive/dest_test.go
new file mode 100644
index 0000000..06b3909
--- /dev/null
+++ b/docker/archive/dest_test.go
@@ -0,0 +1,5 @@
+package archive
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageDestination = (*archiveImageDestination)(nil)
diff --git a/docker/archive/fixtures/almostempty.tar b/docker/archive/fixtures/almostempty.tar
new file mode 100644
index 0000000..ac37c1e
--- /dev/null
+++ b/docker/archive/fixtures/almostempty.tar
Binary files differ
diff --git a/docker/archive/reader.go b/docker/archive/reader.go
new file mode 100644
index 0000000..875a152
--- /dev/null
+++ b/docker/archive/reader.go
@@ -0,0 +1,121 @@
+package archive
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+// Reader manages a single Docker archive, allows listing its contents and accessing
+// individual images with less overhead than creating image references individually
+// (because the archive is, if necessary, copied or decompressed only once).
+type Reader struct {
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ archive *tarfile.Reader
+}
+
+// NewReader returns a Reader for path.
+// The caller should call .Close() on the returned object.
+func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
+ archive, err := tarfile.NewReaderFromFile(sys, path)
+ if err != nil {
+ return nil, err
+ }
+ return &Reader{
+ path: path,
+ archive: archive,
+ }, nil
+}
+
+// Close deletes temporary files associated with the Reader, if any.
+func (r *Reader) Close() error {
+ return r.archive.Close()
+}
+
+// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
+// and a variant of imageReference that points at the same image within the reader.
+// The caller should call .Close() on the returned Reader.
+func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
+ standalone, ok := ref.(archiveReference)
+ if !ok {
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ }
+ if standalone.archiveReader != nil {
+ return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
+ }
+ reader, err := NewReader(sys, standalone.path)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ reader.Close()
+ }
+ }()
+ readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ succeeded = true
+ return reader, readerRef, nil
+}
+
+// List returns the a set of references for images in the Reader,
+// grouped by the image the references point to.
+// The references are valid only until the Reader is closed.
+func (r *Reader) List() ([][]types.ImageReference, error) {
+ res := [][]types.ImageReference{}
+ for imageIndex, image := range r.archive.Manifest {
+ refs := []types.ImageReference{}
+ for _, tag := range image.RepoTags {
+ parsedTag, err := reference.ParseNormalizedNamed(tag)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err)
+ }
+ nt, ok := parsedTag.(reference.NamedTagged)
+ if !ok {
+ return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
+ }
+ ref, err := newReference(r.path, nt, -1, r.archive, nil)
+ if err != nil {
+ return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err)
+ }
+ refs = append(refs, ref)
+ }
+ if len(refs) == 0 {
+ ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
+ if err != nil {
+ return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err)
+ }
+ refs = append(refs, ref)
+ }
+ res = append(res, refs)
+ }
+ return res, nil
+}
+
+// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
+// (i.e. exposing the short names before normalization).
+// The function reports an error if ref does not identify a single image.
+// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
+// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
+// matching the image are returned.
+// Almost all users should use List() or ImageReference.DockerReference() instead.
+func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
+ archiveRef, ok := ref.(archiveReference)
+ if !ok {
+ return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
+ }
+ manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
+ if err != nil {
+ return nil, err
+ }
+ if tagIndex != -1 {
+ return []string{manifestItem.RepoTags[tagIndex]}, nil
+ }
+ return manifestItem.RepoTags, nil
+}
diff --git a/docker/archive/src.go b/docker/archive/src.go
new file mode 100644
index 0000000..c4ab9a8
--- /dev/null
+++ b/docker/archive/src.go
@@ -0,0 +1,41 @@
+package archive
+
+import (
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+type archiveImageSource struct {
+ *tarfile.Source // Implements most of types.ImageSource
+ ref archiveReference
+}
+
+// newImageSource returns a types.ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
+ var archive *tarfile.Reader
+ var closeArchive bool
+ if ref.archiveReader != nil {
+ archive = ref.archiveReader
+ closeArchive = false
+ } else {
+ a, err := tarfile.NewReaderFromFile(sys, ref.path)
+ if err != nil {
+ return nil, err
+ }
+ archive = a
+ closeArchive = true
+ }
+ src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex)
+ return &archiveImageSource{
+ Source: src,
+ ref: ref,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *archiveImageSource) Reference() types.ImageReference {
+ return s.ref
+}
diff --git a/docker/archive/src_test.go b/docker/archive/src_test.go
new file mode 100644
index 0000000..b98abea
--- /dev/null
+++ b/docker/archive/src_test.go
@@ -0,0 +1,5 @@
+package archive
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*archiveImageSource)(nil)
diff --git a/docker/archive/transport.go b/docker/archive/transport.go
new file mode 100644
index 0000000..39e92ca
--- /dev/null
+++ b/docker/archive/transport.go
@@ -0,0 +1,206 @@
+package archive
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ ctrImage "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for local Docker archives.
+var Transport = archiveTransport{}
+
+type archiveTransport struct{}
+
+func (t archiveTransport) Name() string {
+ return "docker-archive"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // See the explanation in archiveReference.PolicyConfigurationIdentity.
+ return errors.New(`docker-archive: does not support any scopes except the default "" one`)
+}
+
+// archiveReference is an ImageReference for Docker images.
+type archiveReference struct {
+ path string
+ // May be nil to read the only image in an archive, or to create an untagged image.
+ ref reference.NamedTagged
+ // If not -1, a zero-based index of the image in the manifest. Valid only for sources.
+ // Must not be set if ref is set.
+ sourceIndex int
+ // If not nil, must have been created from path (but archiveReader.path may point at a temporary
+ // file, not necessarily path precisely).
+ archiveReader *tarfile.Reader
+ // If not nil, must have been created for path
+ writer *Writer
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if refString == "" {
+ return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
+ }
+
+ path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
+ var nt reference.NamedTagged
+ sourceIndex := -1
+
+ if gotTagOrIndex {
+ // A :tag or :@index was specified.
+ if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
+ i, err := strconv.Atoi(tagOrIndex[1:])
+ if err != nil {
+ return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
+ }
+ if i < 0 {
+ return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
+ }
+ sourceIndex = i
+ } else {
+ ref, err := reference.ParseNormalizedNamed(tagOrIndex)
+ if err != nil {
+ return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
+ }
+ ref = reference.TagNameOnly(ref)
+ refTagged, isTagged := ref.(reference.NamedTagged)
+ if !isTagged { // If ref contains a digest, TagNameOnly does not change it
+ return nil, fmt.Errorf("reference does not include a tag: %s", ref.String())
+ }
+ nt = refTagged
+ }
+ }
+
+ return newReference(path, nt, sourceIndex, nil, nil)
+}
+
+// NewReference returns a Docker archive reference for a path and an optional reference.
+func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
+ return newReference(path, ref, -1, nil, nil)
+}
+
+// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
+func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
+ return newReference(path, nil, sourceIndex, nil, nil)
+}
+
+// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
+// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
+func newReference(path string, ref reference.NamedTagged, sourceIndex int,
+ archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
+ if strings.Contains(path, ":") {
+ return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
+ }
+ if ref != nil && sourceIndex != -1 {
+ return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
+ }
+ if _, isDigest := ref.(reference.Canonical); isDigest {
+ return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String())
+ }
+ if sourceIndex != -1 && sourceIndex < 0 {
+ return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
+ }
+ return archiveReference{
+ path: path,
+ ref: ref,
+ sourceIndex: sourceIndex,
+ archiveReader: archiveReader,
+ writer: writer,
+ }, nil
+}
+
+func (ref archiveReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref archiveReference) StringWithinTransport() string {
+ switch {
+ case ref.ref != nil:
+ return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
+ case ref.sourceIndex != -1:
+ return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
+ default:
+ return ref.path
+ }
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref archiveReference) DockerReference() reference.Named {
+ return ref.ref
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref archiveReference) PolicyConfigurationIdentity() string {
+ // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
+ return ""
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref archiveReference) PolicyConfigurationNamespaces() []string {
+ // TODO
+ return []string{}
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return ctrImage.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ // Not really supported, for safety reasons.
+ return errors.New("Deleting images not implemented for docker-archive: images")
+}
diff --git a/docker/archive/transport_test.go b/docker/archive/transport_test.go
new file mode 100644
index 0000000..5993685
--- /dev/null
+++ b/docker/archive/transport_test.go
@@ -0,0 +1,284 @@
+package archive
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256digest = "@sha256:" + sha256digestHex
+ tarFixture = "fixtures/almostempty.tar"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "docker-archive", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testParseReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected.
+ "docker.io/library/busybox:notlatest",
+ "docker.io/library/busybox",
+ "docker.io/library",
+ "docker.io",
+ "",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestParseReference(t *testing.T) {
+ testParseReference(t, ParseReference)
+}
+
+// testParseReference is a test shared for Transport.ParseReference and ParseReference.
+func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ for _, c := range []struct {
+ input, expectedPath, expectedRef string
+ expectedSourceIndex int
+ }{
+ {"", "", "", -1}, // Empty input is explicitly rejected
+ {"/path", "/path", "", -1},
+ {"/path:busybox:notlatest", "/path", "docker.io/library/busybox:notlatest", -1}, // Explicit tag
+ {"/path:busybox" + sha256digest, "", "", -1}, // Digest references are forbidden
+ {"/path:busybox", "/path", "docker.io/library/busybox:latest", -1}, // Default tag
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ {"/path:busybox:latest" + sha256digest, "", "", -1}, // Both tag and digest is rejected
+ {"/path:docker.io/library/busybox:latest", "/path", "docker.io/library/busybox:latest", -1}, // All implied reference parts explicitly specified
+ {"/path:UPPERCASEISINVALID", "", "", -1}, // Invalid reference format
+ {"/path:@", "", "", -1}, // Missing source index
+ {"/path:@0", "/path", "", 0}, // Valid source index
+ {"/path:@999999", "/path", "", 999999}, // Valid source index
+ {"/path:@-2", "", "", -1}, // Negative source index
+ {"/path:@-1", "", "", -1}, // Negative source index, using the placeholder value
+ {"/path:busybox@0", "", "", -1}, // References and source indices can’t be combined.
+ {"/path:@0:busybox", "", "", -1}, // References and source indices can’t be combined.
+ } {
+ ref, err := fn(c.input)
+ if c.expectedPath == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ archiveRef, ok := ref.(archiveReference)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.expectedPath, archiveRef.path, c.input)
+ if c.expectedRef == "" {
+ assert.Nil(t, archiveRef.ref, c.input)
+ } else {
+ require.NotNil(t, archiveRef.ref, c.input)
+ assert.Equal(t, c.expectedRef, archiveRef.ref.String(), c.input)
+ }
+ assert.Equal(t, c.expectedSourceIndex, archiveRef.sourceIndex, c.input)
+ }
+ }
+}
+
+// namedTaggedRef returns a reference.NamedTagged for input
+func namedTaggedRef(t *testing.T, input string) reference.NamedTagged {
+ named, err := reference.ParseNormalizedNamed(input)
+ require.NoError(t, err, input)
+ nt, ok := named.(reference.NamedTagged)
+ require.True(t, ok, input)
+ return nt
+}
+
+func TestNewReference(t *testing.T) {
+ for _, path := range []string{"relative", "/absolute"} {
+ for _, c := range []struct {
+ ref string
+ ok bool
+ }{
+ {"busybox:notlatest", true},
+ {"busybox:notlatest" + sha256digest, false},
+ {"", true},
+ } {
+ var ntRef reference.NamedTagged = nil
+ if c.ref != "" {
+ ntRef = namedTaggedRef(t, c.ref)
+ }
+
+ res, err := NewReference(path, ntRef)
+ if !c.ok {
+ assert.Error(t, err, c.ref)
+ } else {
+ require.NoError(t, err, c.ref)
+ archiveRef, ok := res.(archiveReference)
+ require.True(t, ok, c.ref)
+ assert.Equal(t, path, archiveRef.path)
+ if c.ref == "" {
+ assert.Nil(t, archiveRef.ref, c.ref)
+ } else {
+ require.NotNil(t, archiveRef.ref, c.ref)
+ assert.Equal(t, ntRef.String(), archiveRef.ref.String(), c.ref)
+ }
+ assert.Equal(t, -1, archiveRef.sourceIndex, c.ref)
+ }
+ }
+ }
+ _, err := NewReference("with:colon", nil)
+ assert.Error(t, err)
+
+ // Complete coverage testing of the private newReference here as well
+ ntRef := namedTaggedRef(t, "busybox:latest")
+ _, err = newReference("path", ntRef, 0, nil, nil)
+ assert.Error(t, err)
+}
+
+func TestNewIndexReference(t *testing.T) {
+ for _, path := range []string{"relative", "/absolute"} {
+ for _, c := range []struct {
+ index int
+ ok bool
+ }{
+ {0, true},
+ {9999990, true},
+ {-1, true},
+ {-2, false},
+ } {
+ res, err := NewIndexReference(path, c.index)
+ if !c.ok {
+ assert.Error(t, err, c.index)
+ } else {
+ require.NoError(t, err, c.index)
+ archiveRef, ok := res.(archiveReference)
+ require.True(t, ok, c.index)
+ assert.Equal(t, path, archiveRef.path)
+ assert.Nil(t, archiveRef.ref, c.index)
+ assert.Equal(t, c.index, archiveRef.sourceIndex)
+ }
+ }
+ }
+ _, err := NewReference("with:colon", nil)
+ assert.Error(t, err)
+}
+
+// A common list of reference formats to test for the various ImageReference methods.
+var validReferenceTestCases = []struct {
+ input, dockerRef string
+ sourceIndex int
+ stringWithinTransport string
+}{
+ {"/pathonly", "", -1, "/pathonly"},
+ {"/path:busybox:notlatest", "docker.io/library/busybox:notlatest", -1, "/path:docker.io/library/busybox:notlatest"}, // Explicit tag
+ {"/path:docker.io/library/busybox:latest", "docker.io/library/busybox:latest", -1, "/path:docker.io/library/busybox:latest"}, // All implied reference part explicitly specified
+ {"/path:example.com/ns/foo:bar", "example.com/ns/foo:bar", -1, "/path:example.com/ns/foo:bar"}, // All values explicitly specified
+ {"/path:@0", "", 0, "/path:@0"},
+ {"/path:@999999", "", 999999, "/path:@999999"},
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, err := ParseReference("/tmp/archive.tar")
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ dockerRef := ref.DockerReference()
+ if c.dockerRef != "" {
+ require.NotNil(t, dockerRef, c.input)
+ assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
+ } else {
+ require.Nil(t, dockerRef, c.input)
+ }
+ }
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input)
+ }
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ assert.Empty(t, "", ref.PolicyConfigurationNamespaces(), c.input)
+ }
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ for _, suffix := range []string{"", ":emptyimage:latest", ":@0"} {
+ ref, err := ParseReference(tarFixture + suffix)
+ require.NoError(t, err, suffix)
+ img, err := ref.NewImage(context.Background(), nil)
+ require.NoError(t, err, suffix)
+ defer img.Close()
+ }
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ for _, suffix := range []string{"", ":emptyimage:latest", ":@0"} {
+ ref, err := ParseReference(tarFixture + suffix)
+ require.NoError(t, err, suffix)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err, suffix)
+ defer src.Close()
+ }
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ ref, err := ParseReference(filepath.Join(tmpDir, "no-reference"))
+ require.NoError(t, err)
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ assert.NoError(t, err)
+ dest.Close()
+
+ ref, err = ParseReference(filepath.Join(tmpDir, "with-reference") + "busybox:latest")
+ require.NoError(t, err)
+ dest, err = ref.NewImageDestination(context.Background(), nil)
+ assert.NoError(t, err)
+ defer dest.Close()
+}
+
+func TestReferenceDeleteImage(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for i, suffix := range []string{"", ":some-reference", ":@0"} {
+ testFile := filepath.Join(tmpDir, fmt.Sprintf("file%d.tar", i))
+ err := os.WriteFile(testFile, []byte("nonempty"), 0644)
+ require.NoError(t, err, suffix)
+
+ ref, err := ParseReference(testFile + suffix)
+ require.NoError(t, err, suffix)
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err, suffix)
+
+ _, err = os.Lstat(testFile)
+ assert.NoError(t, err, suffix)
+ }
+}
diff --git a/docker/archive/writer.go b/docker/archive/writer.go
new file mode 100644
index 0000000..11f797c
--- /dev/null
+++ b/docker/archive/writer.go
@@ -0,0 +1,103 @@
+package archive
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+)
+
+// Writer manages a single in-progress Docker archive and allows adding images to it.
+type Writer struct {
+ path string // The original, user-specified path; not the maintained temporary file, if any
+ regularFile bool // path refers to a regular file (e.g. not a pipe)
+ archive *tarfile.Writer
+ writer io.Closer
+
+ // The following state can only be accessed with the mutex held.
+ mutex sync.Mutex
+ hadCommit bool // At least one successful commit has happened
+}
+
+// NewWriter returns a Writer for path.
+// The caller should call .Close() on the returned object.
+func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
+ // path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ fh.Close()
+ }
+ }()
+
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("statting file %q: %w", path, err)
+ }
+ regularFile := fhStat.Mode().IsRegular()
+ if regularFile && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
+ archive := tarfile.NewWriter(fh)
+
+ succeeded = true
+ return &Writer{
+ path: path,
+ regularFile: regularFile,
+ archive: archive,
+ writer: fh,
+ hadCommit: false,
+ }, nil
+}
+
+// imageCommitted notifies the Writer that at least one image was successfully committed to the stream.
+func (w *Writer) imageCommitted() {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ w.hadCommit = true
+}
+
+// Close writes all outstanding data about images to the archive, and
+// releases state associated with the Writer, if any.
+// No more images can be added after this is called.
+func (w *Writer) Close() error {
+ err := w.archive.Close()
+ if err2 := w.writer.Close(); err2 != nil && err == nil {
+ err = err2
+ }
+ if err == nil && w.regularFile && !w.hadCommit {
+ // Writing to the destination never had a success; delete the destination if we created it.
+ // This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
+ // left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
+ // the caller manually deleting the partial archive. So, delete it instead.
+ //
+ // Archives with at least one successfully created image are left around; they might still be valuable.
+ //
+ // Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
+ // Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
+ if err2 := os.Remove(w.path); err2 != nil {
+ err = err2
+ }
+ }
+ return err
+}
+
+// NewReference returns an ImageReference that allows adding an image to Writer,
+// with an optional reference.
+func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
+ return newReference(w.path, destinationRef, -1, nil, w)
+}
diff --git a/docker/body_reader.go b/docker/body_reader.go
new file mode 100644
index 0000000..7d66ef6
--- /dev/null
+++ b/docker/body_reader.go
@@ -0,0 +1,253 @@
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry
+ bodyReaderMinimumProgress = 1 * 1024 * 1024
+ // bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry
+ bodyReaderMSSinceLastRetry = 60 * 1_000
+)
+
+// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
+// which can transparently resume some (very limited) kinds of aborted connections.
+type bodyReader struct {
+ ctx context.Context
+ c *dockerClient
+ path string // path to pass to makeRequest to retry
+ logURL *url.URL // a string to use in error messages
+ firstConnectionTime time.Time
+
+ body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
+ lastRetryOffset int64 // -1 if N/A
+ lastRetryTime time.Time // time.Time{} if N/A
+ offset int64 // Current offset within the blob
+ lastSuccessTime time.Time // time.Time{} if N/A
+}
+
+// newBodyReader creates a bodyReader for request path in c.
+// firstBody is an already correctly opened body for the blob, returning the full blob from the start.
+// If reading from firstBody fails, bodyReader may heuristically decide to resume.
+func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
+ logURL, err := c.resolveRequestURL(path)
+ if err != nil {
+ return nil, err
+ }
+ res := &bodyReader{
+ ctx: ctx,
+ c: c,
+ path: path,
+ logURL: logURL,
+ firstConnectionTime: time.Now(),
+
+ body: firstBody,
+ lastRetryOffset: -1,
+ lastRetryTime: time.Time{},
+ offset: 0,
+ lastSuccessTime: time.Time{},
+ }
+ return res, nil
+}
+
+// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number.
+func parseDecimalInString(s string, start int) (int64, int, error) {
+ i := start
+ for i < len(s) && s[i] >= '0' && s[i] <= '9' {
+ i++
+ }
+ if i == start {
+ return -1, -1, errors.New("missing decimal number")
+ }
+ v, err := strconv.ParseInt(s[start:i], 10, 64)
+ if err != nil {
+ return -1, -1, fmt.Errorf("parsing number: %w", err)
+ }
+ return v, i, nil
+}
+
+// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it.
+func parseExpectedChar(s string, pos int, expected byte) (int, error) {
+ if pos == len(s) || s[pos] != expected {
+ return -1, fmt.Errorf("missing expected %q", expected)
+ }
+ return pos + 1, nil
+}
+
+// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1.
+func parseContentRange(res *http.Response) (int64, int64, int64, error) {
+ hdrs := res.Header.Values("Content-Range")
+ switch len(hdrs) {
+ case 0:
+ return -1, -1, -1, errors.New("missing Content-Range: header")
+ case 1:
+ break
+ default:
+ return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs))
+ }
+ hdr := hdrs[0]
+ expectedPrefix := "bytes "
+ if !strings.HasPrefix(hdr, expectedPrefix) {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix)
+ }
+ first, pos, err := parseDecimalInString(hdr, len(expectedPrefix))
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err)
+ }
+ pos, err = parseExpectedChar(hdr, pos, '-')
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
+ }
+ last, pos, err := parseDecimalInString(hdr, pos)
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err)
+ }
+ pos, err = parseExpectedChar(hdr, pos, '/')
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
+ }
+ completeLength := int64(-1)
+ if pos < len(hdr) && hdr[pos] == '*' {
+ pos++
+ } else {
+ completeLength, pos, err = parseDecimalInString(hdr, pos)
+ if err != nil {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err)
+ }
+ }
+ if pos < len(hdr) {
+ return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr)
+ }
+ return first, last, completeLength, nil
+}
+
+// Read implements io.ReadCloser
+func (br *bodyReader) Read(p []byte) (int, error) {
+ if br.body == nil {
+ return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted())
+ }
+ n, err := br.body.Read(p)
+ br.offset += int64(n)
+ switch {
+ case err == nil || err == io.EOF:
+ br.lastSuccessTime = time.Now()
+ return n, err // Unlike the default: case, don’t log anything.
+
+ case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET):
+ originalErr := err
+ redactedURL := br.logURL.Redacted()
+ if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil {
+ return n, err
+ }
+
+ if err := br.body.Close(); err != nil {
+ logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
+ }
+ br.body = nil
+ time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
+
+ headers := map[string][]string{
+ "Range": {fmt.Sprintf("bytes=%d-", br.offset)},
+ }
+ res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err)
+ }
+ consumedBody := false
+ defer func() {
+ if !consumedBody {
+ res.Body.Close()
+ }
+ }()
+ switch res.StatusCode {
+ case http.StatusPartialContent: // OK
+ // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed.
+ // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation.
+ first, last, completeLength, err := parseContentRange(res)
+ if err != nil {
+ return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err)
+ }
+ // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob.
+ if first != br.offset || (completeLength != -1 && last+1 != completeLength) {
+ return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength)
+ }
+ // Continue below
+ case http.StatusOK:
+ return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK)
+ default:
+ err := registryHTTPResponseToError(res)
+ return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
+ }
+
+ logrus.Debugf("Successfully reconnected to %s", redactedURL)
+ consumedBody = true
+ br.body = res.Body
+ br.lastRetryOffset = br.offset
+ br.lastRetryTime = time.Time{}
+ return n, nil
+
+ default:
+ logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err)
+ return n, err
+ }
+}
+
+// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value.
+// If tm is time.Time{}, it returns math.NaN()
+func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 {
+ if tm == (time.Time{}) {
+ return math.NaN()
+ }
+ return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0
+}
+
+// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
+// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
+func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
+ currentTime := time.Now()
+ msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime)
+ msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime)
+ msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime)
+ logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms",
+ redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess)
+ progress := br.offset - br.lastRetryOffset
+ if progress >= bodyReaderMinimumProgress {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
+ return nil
+ }
+ if br.lastRetryTime == (time.Time{}) {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
+ return nil
+ }
+ if msSinceLastRetry >= bodyReaderMSSinceLastRetry {
+ logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
+ return nil
+ }
+ logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry)
+ return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w",
+ br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr)
+}
+
+// Close implements io.ReadCloser
+func (br *bodyReader) Close() error {
+ if br.body == nil {
+ return nil
+ }
+ err := br.body.Close()
+ br.body = nil
+ return err
+}
diff --git a/docker/body_reader_test.go b/docker/body_reader_test.go
new file mode 100644
index 0000000..0011582
--- /dev/null
+++ b/docker/body_reader_test.go
@@ -0,0 +1,196 @@
+package docker
+
+import (
+ "errors"
+ "math"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseDecimalInString(t *testing.T) {
+ for _, prefix := range []string{"", "text", "0"} {
+ for _, suffix := range []string{"", "text"} {
+ for _, c := range []struct {
+ s string
+ v int64
+ }{
+ {"0", 0},
+ {"1", 1},
+ {"0700", 700}, // not octal
+ } {
+ input := prefix + c.s + suffix
+ res, pos, err := parseDecimalInString(input, len(prefix))
+ require.NoError(t, err, input)
+ assert.Equal(t, c.v, res, input)
+ assert.Equal(t, len(prefix)+len(c.s), pos, input)
+ }
+ for _, c := range []string{
+ "-1",
+ "xA",
+ "&",
+ "",
+ "999999999999999999999999999999999999999999999999999999999999999999",
+ } {
+ input := prefix + c + suffix
+ _, _, err := parseDecimalInString(input, len(prefix))
+ assert.Error(t, err, c)
+ }
+ }
+ }
+}
+
+func TestParseExpectedChar(t *testing.T) {
+ for _, prefix := range []string{"", "text", "0"} {
+ for _, suffix := range []string{"", "text"} {
+ input := prefix + "+" + suffix
+ pos, err := parseExpectedChar(input, len(prefix), '+')
+ require.NoError(t, err, input)
+ assert.Equal(t, len(prefix)+1, pos, input)
+
+ _, err = parseExpectedChar(input, len(prefix), '-')
+ assert.Error(t, err, input)
+ }
+ }
+}
+
+func TestParseContentRange(t *testing.T) {
+ for _, c := range []struct {
+ in string
+ first, last, completeLength int64
+ }{
+ {"bytes 0-0/1", 0, 0, 1},
+ {"bytes 010-020/030", 10, 20, 30},
+ {"bytes 1000-1010/*", 1000, 1010, -1},
+ } {
+ first, last, completeLength, err := parseContentRange(&http.Response{
+ Header: http.Header{
+ http.CanonicalHeaderKey("Content-Range"): []string{c.in},
+ },
+ })
+ require.NoError(t, err, c.in)
+ assert.Equal(t, c.first, first, c.in)
+ assert.Equal(t, c.last, last, c.in)
+ assert.Equal(t, c.completeLength, completeLength, c.in)
+ }
+
+ for _, hdr := range []http.Header{
+ nil,
+ {http.CanonicalHeaderKey("Content-Range"): []string{}},
+ {http.CanonicalHeaderKey("Content-Range"): []string{"bytes 1-2/3", "bytes 1-2/3"}},
+ } {
+ _, _, _, err := parseContentRange(&http.Response{
+ Header: hdr,
+ })
+ assert.Error(t, err)
+ }
+
+ for _, c := range []string{
+ "",
+ "notbytes 1-2/3",
+ "bytes ",
+ "bytes x-2/3",
+ "bytes 1*2/3",
+ "bytes 1",
+ "bytes 1-",
+ "bytes 1-x/3",
+ "bytes 1-2",
+ "bytes 1-2@3",
+ "bytes 1-2/",
+ "bytes 1-2/*a",
+ "bytes 1-2/3a",
+ } {
+ _, _, _, err := parseContentRange(&http.Response{
+ Header: http.Header{
+ http.CanonicalHeaderKey("Content-Range"): []string{c},
+ },
+ })
+ assert.Error(t, err, c, c)
+ }
+}
+
+func TestMillisecondsSinceOptional(t *testing.T) {
+ current := time.Date(2023, 2, 9, 8, 7, 6, 5, time.UTC)
+ res := millisecondsSinceOptional(current, time.Time{})
+ assert.True(t, math.IsNaN(res))
+ tm := current.Add(-60 * time.Second) // 60 seconds _before_ current
+ res = millisecondsSinceOptional(current, tm)
+ assert.Equal(t, res, 60_000.0)
+}
+
+func TestBodyReaderErrorIfNotReconnecting(t *testing.T) {
+ // Silence logrus.Info logs in the tested method
+ prevLevel := logrus.StandardLogger().Level
+ logrus.StandardLogger().SetLevel(logrus.WarnLevel)
+ t.Cleanup(func() {
+ logrus.StandardLogger().SetLevel(prevLevel)
+ })
+
+ for _, c := range []struct {
+ name string
+ previousRetry bool
+ currentOffset int64
+ currentTime int // milliseconds
+ expectReconnect bool
+ }{
+ {
+ name: "A lot of progress, after a long time, second retry",
+ previousRetry: true,
+ currentOffset: 2 * bodyReaderMinimumProgress,
+ currentTime: 2 * bodyReaderMSSinceLastRetry,
+ expectReconnect: true,
+ },
+ {
+ name: "A lot of progress, after little time, second retry",
+ previousRetry: true,
+ currentOffset: 2 * bodyReaderMinimumProgress,
+ currentTime: 1,
+ expectReconnect: true,
+ },
+ {
+ name: "Little progress, after a long time, second retry",
+ previousRetry: true,
+ currentOffset: 1,
+ currentTime: 2 * bodyReaderMSSinceLastRetry,
+ expectReconnect: true,
+ },
+ {
+ name: "Little progress, after little time, second retry",
+ previousRetry: true,
+ currentOffset: 1,
+ currentTime: 1,
+ expectReconnect: false,
+ },
+ {
+ name: "Little progress, after little time, first retry",
+ previousRetry: false,
+ currentOffset: 1,
+ currentTime: bodyReaderMSSinceLastRetry / 2,
+ expectReconnect: true,
+ },
+ } {
+ tm := time.Now()
+ br := bodyReader{}
+ if c.previousRetry {
+ br.lastRetryOffset = 2 * bodyReaderMinimumProgress
+ br.offset = br.lastRetryOffset + c.currentOffset
+ br.firstConnectionTime = tm.Add(-time.Duration(c.currentTime+2*bodyReaderMSSinceLastRetry) * time.Millisecond)
+ br.lastRetryTime = tm.Add(-time.Duration(c.currentTime) * time.Millisecond)
+ } else {
+ br.lastRetryOffset = -1
+ br.lastRetryTime = time.Time{}
+ br.offset = c.currentOffset
+ br.firstConnectionTime = tm.Add(-time.Duration(c.currentTime) * time.Millisecond)
+ }
+ err := br.errorIfNotReconnecting(errors.New("some error for error text only"), "URL for error text only")
+ if c.expectReconnect {
+ assert.NoError(t, err, c.name, br)
+ } else {
+ assert.Error(t, err, c.name, br)
+ }
+ }
+}
diff --git a/docker/cache.go b/docker/cache.go
new file mode 100644
index 0000000..728d32d
--- /dev/null
+++ b/docker/cache.go
@@ -0,0 +1,23 @@
+package docker
+
+import (
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+)
+
+// bicTransportScope returns a BICTransportScope appropriate for ref.
+func bicTransportScope(ref dockerReference) types.BICTransportScope {
+ // Blobs can be reused across the whole registry.
+ return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
+}
+
+// newBICLocationReference returns a BICLocationReference appropriate for ref.
+func newBICLocationReference(ref dockerReference) types.BICLocationReference {
+ // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
+ return types.BICLocationReference{Opaque: ref.ref.Name()}
+}
+
+// parseBICLocationReference returns a repository for encoded lr.
+func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
+ return reference.ParseNormalizedNamed(lr.Opaque)
+}
diff --git a/docker/daemon/client.go b/docker/daemon/client.go
new file mode 100644
index 0000000..354af21
--- /dev/null
+++ b/docker/daemon/client.go
@@ -0,0 +1,96 @@
+package daemon
+
+import (
+ "net/http"
+ "path/filepath"
+
+ "github.com/containers/image/v5/types"
+ dockerclient "github.com/docker/docker/client"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+// NewDockerClient initializes a new API client based on the passed SystemContext.
+func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
+ host := dockerclient.DefaultDockerHost
+ if sys != nil && sys.DockerDaemonHost != "" {
+ host = sys.DockerDaemonHost
+ }
+
+ opts := []dockerclient.Opt{
+ dockerclient.WithHost(host),
+ dockerclient.WithAPIVersionNegotiation(),
+ }
+
+ // We conditionalize building the TLS configuration only to TLS sockets:
+ //
+ // The dockerclient.Client implementation differentiates between
+ // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows)
+ // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS).
+ //
+ // Only Client.proto is set from the URL in dockerclient.WithHost(),
+ // Client.scheme is detected based on a http.Client.TLSClientConfig presence;
+ // dockerclient.WithHTTPClient with a client that has TLSClientConfig set
+ // will, by default, trigger an attempt to use TLS.
+ //
+ // So, don’t use WithHTTPClient for unix:// sockets at all.
+ //
+ // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://),
+ // we also should not set TLSClientConfig. We continue to use WithHTTPClient
+ // with our slightly non-default settings to avoid a behavior change on updates of c/image.
+ //
+ // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic
+ // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ;
+ // so that would not be any simpler.
+ serverURL, err := dockerclient.ParseHostURL(host)
+ if err != nil {
+ return nil, err
+ }
+ switch serverURL.Scheme {
+ case "unix": // Nothing
+ case "http":
+ hc := httpConfig()
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
+ default:
+ hc, err := tlsConfig(sys)
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
+ }
+
+ return dockerclient.NewClientWithOpts(opts...)
+}
+
+func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
+ options := tlsconfig.Options{}
+ if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify {
+ options.InsecureSkipVerify = true
+ }
+
+ if sys != nil && sys.DockerDaemonCertPath != "" {
+ options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem")
+ options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem")
+ options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem")
+ }
+
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ return &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ CheckRedirect: dockerclient.CheckRedirect,
+ }, nil
+}
+
+func httpConfig() *http.Client {
+ return &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: nil,
+ },
+ CheckRedirect: dockerclient.CheckRedirect,
+ }
+}
diff --git a/docker/daemon/client_test.go b/docker/daemon/client_test.go
new file mode 100644
index 0000000..021e24d
--- /dev/null
+++ b/docker/daemon/client_test.go
@@ -0,0 +1,108 @@
+package daemon
+
+import (
+ "net/http"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ dockerclient "github.com/docker/docker/client"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDockerClientFromNilSystemContext(t *testing.T) {
+ client, err := newDockerClient(nil)
+
+ assert.Nil(t, err, "There should be no error creating the Docker client")
+ assert.NotNil(t, client, "A Docker client reference should have been returned")
+
+ assert.Equal(t, dockerclient.DefaultDockerHost, client.DaemonHost(), "The default docker host should have been used")
+
+ assert.NoError(t, client.Close())
+}
+
+func TestDockerClientFromCertContext(t *testing.T) {
+ testDir := testDir(t)
+
+ host := "tcp://127.0.0.1:2376"
+ systemCtx := &types.SystemContext{
+ DockerDaemonCertPath: filepath.Join(testDir, "testdata", "certs"),
+ DockerDaemonHost: host,
+ DockerDaemonInsecureSkipTLSVerify: true,
+ }
+
+ client, err := newDockerClient(systemCtx)
+
+ assert.Nil(t, err, "There should be no error creating the Docker client")
+ assert.NotNil(t, client, "A Docker client reference should have been returned")
+
+ assert.Equal(t, host, client.DaemonHost())
+
+ assert.NoError(t, client.Close())
+}
+
+func TestTlsConfigFromInvalidCertPath(t *testing.T) {
+ ctx := &types.SystemContext{
+ DockerDaemonCertPath: "/foo/bar",
+ }
+
+ _, err := tlsConfig(ctx)
+ assert.ErrorContains(t, err, "could not read CA certificate")
+}
+
+func TestTlsConfigFromCertPath(t *testing.T) {
+ testDir := testDir(t)
+
+ ctx := &types.SystemContext{
+ DockerDaemonCertPath: filepath.Join(testDir, "testdata", "certs"),
+ DockerDaemonInsecureSkipTLSVerify: true,
+ }
+
+ httpClient, err := tlsConfig(ctx)
+
+ assert.NoError(t, err, "There should be no error creating the HTTP client")
+
+ tlsConfig := httpClient.Transport.(*http.Transport).TLSClientConfig
+ assert.True(t, tlsConfig.InsecureSkipVerify, "TLS verification should be skipped")
+ assert.Len(t, tlsConfig.Certificates, 1, "There should be one certificate")
+}
+
+func TestSkipTLSVerifyOnly(t *testing.T) {
+ //testDir := testDir(t)
+
+ ctx := &types.SystemContext{
+ DockerDaemonInsecureSkipTLSVerify: true,
+ }
+
+ httpClient, err := tlsConfig(ctx)
+
+ assert.NoError(t, err, "There should be no error creating the HTTP client")
+
+ tlsConfig := httpClient.Transport.(*http.Transport).TLSClientConfig
+ assert.True(t, tlsConfig.InsecureSkipVerify, "TLS verification should be skipped")
+ assert.Len(t, tlsConfig.Certificates, 0, "There should be no certificate")
+}
+
+func TestSpecifyPlainHTTPViaHostScheme(t *testing.T) {
+ host := "http://127.0.0.1:2376"
+ ctx := &types.SystemContext{
+ DockerDaemonHost: host,
+ }
+
+ client, err := newDockerClient(ctx)
+
+ assert.Nil(t, err, "There should be no error creating the Docker client")
+ assert.NotNil(t, client, "A Docker client reference should have been returned")
+
+ assert.Equal(t, host, client.DaemonHost())
+ assert.NoError(t, client.Close())
+}
+
+func testDir(t *testing.T) string {
+ testDir, err := os.Getwd()
+ if err != nil {
+ t.Fatal("Unable to determine the current test directory")
+ }
+ return testDir
+}
diff --git a/docker/daemon/daemon_dest.go b/docker/daemon/daemon_dest.go
new file mode 100644
index 0000000..55431db
--- /dev/null
+++ b/docker/daemon/daemon_dest.go
@@ -0,0 +1,186 @@
+package daemon
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/docker/docker/client"
+ "github.com/sirupsen/logrus"
+)
+
+type daemonImageDestination struct {
+ ref daemonReference
+ mustMatchRuntimeOS bool
+ *tarfile.Destination // Implements most of types.ImageDestination
+ archive *tarfile.Writer
+ // For talking to imageLoadGoroutine
+ goroutineCancel context.CancelFunc
+ statusChannel <-chan error
+ writer *io.PipeWriter
+ // Other state
+ committed bool // writer has been closed
+}
+
+// newImageDestination returns a types.ImageDestination for the specified image reference.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) {
+ if ref.ref == nil {
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ }
+ namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
+ if !ok {
+ return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
+ }
+
+ var mustMatchRuntimeOS = true
+ if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost {
+ mustMatchRuntimeOS = false
+ }
+
+ c, err := newDockerClient(sys)
+ if err != nil {
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
+ }
+
+ reader, writer := io.Pipe()
+ archive := tarfile.NewWriter(writer)
+ // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
+ statusChannel := make(chan error, 1)
+
+ goroutineContext, goroutineCancel := context.WithCancel(ctx)
+ go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
+
+ return &daemonImageDestination{
+ ref: ref,
+ mustMatchRuntimeOS: mustMatchRuntimeOS,
+ Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
+ archive: archive,
+ goroutineCancel: goroutineCancel,
+ statusChannel: statusChannel,
+ writer: writer,
+ committed: false,
+ }, nil
+}
+
+// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
+func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
+ defer c.Close()
+ err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
+ defer func() {
+ logrus.Debugf("docker-daemon: sending done, status %v", err)
+ statusChannel <- err
+ }()
+ defer func() {
+ if err == nil {
+ reader.Close()
+ } else {
+ if err := reader.CloseWithError(err); err != nil {
+ logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err)
+ }
+ }
+ }()
+
+ err = imageLoad(ctx, c, reader)
+}
+
+// imageLoad accepts tar stream on reader and sends it to c
+func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
+ resp, err := c.ImageLoad(ctx, reader, true)
+ if err != nil {
+ return fmt.Errorf("starting a load operation in docker engine: %w", err)
+ }
+ defer resp.Body.Close()
+
+ // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
+ // copied here to minimize dependencies.
+ type jsonError struct {
+ Message string `json:"message,omitempty"`
+ }
+ type jsonMessage struct {
+ Error *jsonError `json:"errorDetail,omitempty"`
+ }
+
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var msg jsonMessage
+ if err := dec.Decode(&msg); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return fmt.Errorf("parsing docker load progress: %w", err)
+ }
+ if msg.Error != nil {
+ return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
+ }
+ }
+ return nil // No error reported = success
+}
+
+// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
+func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return types.PreserveOriginal
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
+ return d.mustMatchRuntimeOS
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *daemonImageDestination) Close() error {
+ if !d.committed {
+ logrus.Debugf("docker-daemon: Closing tar stream to abort loading")
+ // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing.
+ // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including
+ // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the
+ // net/http version with native Context support in Go 1.7) do not always actually immediately cancel
+ // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and
+ // return early if the context is canceled without terminating the goroutine at all.
+ // So we need this CloseWithError to terminate sending the HTTP request Body
+ // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
+ // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
+ // Whether that works or not, closing the PipeWriter seems desirable in any case.
+ if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
+ return err
+ }
+ }
+ d.goroutineCancel()
+
+ return nil
+}
+
+func (d *daemonImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ logrus.Debugf("docker-daemon: Closing tar stream")
+ if err := d.archive.Close(); err != nil {
+ return err
+ }
+ if err := d.writer.Close(); err != nil {
+ return err
+ }
+ d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine.
+
+ logrus.Debugf("docker-daemon: Waiting for status")
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case err := <-d.statusChannel:
+ return err
+ }
+}
diff --git a/docker/daemon/daemon_dest_test.go b/docker/daemon/daemon_dest_test.go
new file mode 100644
index 0000000..51e5d09
--- /dev/null
+++ b/docker/daemon/daemon_dest_test.go
@@ -0,0 +1,5 @@
+package daemon
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageDestination = (*daemonImageDestination)(nil)
diff --git a/docker/daemon/daemon_src.go b/docker/daemon/daemon_src.go
new file mode 100644
index 0000000..10923c2
--- /dev/null
+++ b/docker/daemon/daemon_src.go
@@ -0,0 +1,56 @@
+package daemon
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+type daemonImageSource struct {
+ ref daemonReference
+ *tarfile.Source // Implements most of types.ImageSource
+}
+
+// newImageSource returns a types.ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+//
+// It would be great if we were able to stream the input tar as it is being
+// sent; but Docker sends the top-level manifest, which determines which paths
+// to look for, at the end, so in we will need to seek back and re-read, several times.
+// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
+// is the config, and that the following len(RootFS) files are the layers, but that feels
+// way too brittle.)
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
+ c, err := newDockerClient(sys)
+ if err != nil {
+ return nil, fmt.Errorf("initializing docker engine client: %w", err)
+ }
+ defer c.Close()
+
+ // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
+ // Either way ImageSave should create a tarball with exactly one image.
+ inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
+ if err != nil {
+ return nil, fmt.Errorf("loading image from docker engine: %w", err)
+ }
+ defer inputStream.Close()
+
+ archive, err := tarfile.NewReaderFromStream(sys, inputStream)
+ if err != nil {
+ return nil, err
+ }
+ src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1)
+ return &daemonImageSource{
+ ref: ref,
+ Source: src,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *daemonImageSource) Reference() types.ImageReference {
+ return s.ref
+}
diff --git a/docker/daemon/daemon_src_test.go b/docker/daemon/daemon_src_test.go
new file mode 100644
index 0000000..7214ce7
--- /dev/null
+++ b/docker/daemon/daemon_src_test.go
@@ -0,0 +1,5 @@
+package daemon
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*daemonImageSource)(nil)
diff --git a/docker/daemon/daemon_transport.go b/docker/daemon/daemon_transport.go
new file mode 100644
index 0000000..9eedff7
--- /dev/null
+++ b/docker/daemon/daemon_transport.go
@@ -0,0 +1,219 @@
+package daemon
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for images managed by a local Docker daemon.
+var Transport = daemonTransport{}
+
+type daemonTransport struct{}
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (t daemonTransport) Name() string {
+ return "docker-daemon"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // ID values cannot be effectively namespaced, and are clearly invalid host:port values.
+ if _, err := digest.Parse(scope); err == nil {
+ return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
+ }
+
+ // FIXME? We could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// daemonReference is an ImageReference for images managed by a local Docker daemon
+// Exactly one of id and ref can be set.
+// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
+// For daemonImageDestination, it must be a ref, which is NamedTagged.
+// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
+// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
+type daemonReference struct {
+ id digest.Digest
+ ref reference.Named // !reference.IsNameOnly
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases.
+ // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars).
+
+ // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
+ // reference.ParseAnyReference interprets such strings as digests.
+ if dgst, err := digest.Parse(refString); err == nil {
+ // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
+ // Other digest references are ambiguous, so refuse them.
+ if dgst.Algorithm() != digest.Canonical {
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
+ }
+ return NewReference(dgst, nil)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values
+ if err != nil {
+ return nil, err
+ }
+ if reference.FamiliarName(ref) == digest.Canonical.String() {
+ return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
+ }
+ return NewReference("", ref)
+}
+
+// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly)
+func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) {
+ if id != "" && ref != nil {
+ return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time")
+ }
+ if ref != nil {
+ if reference.IsNameOnly(ref) {
+ return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ }
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
+ // This MAY be accepted in the future.
+ // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
+ // the tag or the digest first?)
+ _, isTagged := ref.(reference.NamedTagged)
+ _, isDigested := ref.(reference.Canonical)
+ if isTagged && isDigested {
+ return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
+ }
+ }
+ return daemonReference{
+ id: id,
+ ref: ref,
+ }, nil
+}
+
+func (ref daemonReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref daemonReference) StringWithinTransport() string {
+ switch {
+ case ref.id != "":
+ return ref.id.String()
+ case ref.ref != nil:
+ return reference.FamiliarString(ref.ref)
+ default: // Coverage: Should never happen, NewReference above should refuse such values.
+ panic("Internal inconsistency: daemonReference has empty id and nil ref")
+ }
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref daemonReference) DockerReference() reference.Named {
+ return ref.ref // May be nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref daemonReference) PolicyConfigurationIdentity() string {
+ // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible.
+ // But the existence of image IDs means that we can’t truly well namespace the input:
+ // a single image can be namespaced either using the name or the ID depending on how it is named.
+ //
+ // That’s fairly unexpected, but we have to cope somehow.
+ //
+ // So, use the ordinary docker/policyconfiguration namespacing for named images.
+ // image IDs all fall into the root namespace.
+ // Users can set up the root namespace to be either untrusted or rejected,
+ // and to set up specific trust for named namespaces. This allows verifying image
+ // identity when a name is known, and unnamed images would be untrusted or rejected.
+ switch {
+ case ref.id != "":
+ return "" // This still allows using the default "" scope to define a global policy for ID-identified images.
+ case ref.ref != nil:
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+ default: // Coverage: Should never happen, NewReference above should refuse such values.
+ panic("Internal inconsistency: daemonReference has empty id and nil ref")
+ }
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref daemonReference) PolicyConfigurationNamespaces() []string {
+ // See the explanation in daemonReference.PolicyConfigurationIdentity.
+ switch {
+ case ref.id != "":
+ return []string{}
+ case ref.ref != nil:
+ return policyconfiguration.DockerReferenceNamespaces(ref.ref)
+ default: // Coverage: Should never happen, NewReference above should refuse such values.
+ panic("Internal inconsistency: daemonReference has empty id and nil ref")
+ }
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ // Should this just untag the image? Should this stop running containers?
+ // The semantics is not quite as clear as for remote repositories.
+ // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
+ return errors.New("Deleting images not implemented for docker-daemon: images")
+}
diff --git a/docker/daemon/daemon_transport_test.go b/docker/daemon/daemon_transport_test.go
new file mode 100644
index 0000000..6a08d54
--- /dev/null
+++ b/docker/daemon/daemon_transport_test.go
@@ -0,0 +1,242 @@
+package daemon
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256digest = "sha256:" + sha256digestHex
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "docker-daemon", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testParseReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ // docker/policyconfiguration-accepted identities and scopes are accepted
+ for _, scope := range []string{
+ "registry.example.com/ns/stream" + sha256digest,
+ "registry.example.com/ns/stream:notlatest",
+ "registry.example.com/ns/stream",
+ "registry.example.com/ns",
+ "registry.example.com",
+ "*.example.com",
+ "*.com",
+ sha256digestHex, // Accept also unqualified hexdigest values, they are in principle possible host names.
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ // Hexadecimal IDs are rejected. algo:hexdigest is clearly an invalid host:port value.
+ err := Transport.ValidatePolicyConfigurationScope(sha256digest)
+ assert.Error(t, err)
+}
+
+func TestParseReference(t *testing.T) {
+ testParseReference(t, ParseReference)
+}
+
+// testParseReference is a test shared for Transport.ParseReference and ParseReference.
+func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ for _, c := range []struct{ input, expectedID, expectedRef string }{
+ {sha256digest, sha256digest, ""}, // Valid digest format
+ {"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest
+ {"sha256:ab", "", ""}, // Invalid digest value (too short)
+ {sha256digest + "ab", "", ""}, // Invalid digest value (too long)
+ {"sha256:XX23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "", ""}, // Invalid digest value
+ {"UPPERCASEISINVALID", "", ""}, // Invalid reference input
+ {"busybox", "", ""}, // Missing tag or digest
+ {"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag
+ {"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // Most versions of docker/reference do not handle that (ignoring the tag), so we reject such input.
+ {"busybox:latest@" + sha256digest, "", ""}, // Both tag and digest
+ {"docker.io/library/busybox:latest", "", "docker.io/library/busybox:latest"}, // All implied values explicitly specified
+ } {
+ ref, err := fn(c.input)
+ if c.expectedID == "" && c.expectedRef == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ daemonRef, ok := ref.(daemonReference)
+ require.True(t, ok, c.input)
+ // If we don't reject the input, the interpretation must be consistent with reference.ParseAnyReference
+ dockerRef, err := reference.ParseAnyReference(c.input)
+ require.NoError(t, err, c.input)
+
+ if c.expectedRef == "" {
+ assert.Equal(t, c.expectedID, daemonRef.id.String(), c.input)
+ assert.Nil(t, daemonRef.ref, c.input)
+
+ _, ok := dockerRef.(reference.Digested)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.expectedID, dockerRef.String(), c.input)
+ } else {
+ assert.Equal(t, "", daemonRef.id.String(), c.input)
+ require.NotNil(t, daemonRef.ref, c.input)
+ assert.Equal(t, c.expectedRef, daemonRef.ref.String(), c.input)
+
+ _, ok := dockerRef.(reference.Named)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.expectedRef, dockerRef.String(), c.input)
+ }
+ }
+ }
+}
+
+// A common list of reference formats to test for the various ImageReference methods.
+// (For IDs it is much simpler, we simply use them unmodified)
+var validNamedReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{
+ {"busybox:notlatest", "docker.io/library/busybox:notlatest", "busybox:notlatest"}, // Explicit tag
+ {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "busybox" + sha256digest}, // Explicit digest
+ {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "busybox:latest"}, // All implied values explicitly specified
+ {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "example.com/ns/foo:bar"}, // All values explicitly specified
+}
+
+func TestNewReference(t *testing.T) {
+ // An ID reference.
+ id, err := digest.Parse(sha256digest)
+ require.NoError(t, err)
+ ref, err := NewReference(id, nil)
+ require.NoError(t, err)
+ daemonRef, ok := ref.(daemonReference)
+ require.True(t, ok)
+ assert.Equal(t, id, daemonRef.id)
+ assert.Nil(t, daemonRef.ref)
+
+ // Named references
+ for _, c := range validNamedReferenceTestCases {
+ parsed, err := reference.ParseNormalizedNamed(c.input)
+ require.NoError(t, err)
+ ref, err := NewReference("", parsed)
+ require.NoError(t, err, c.input)
+ daemonRef, ok := ref.(daemonReference)
+ require.True(t, ok, c.input)
+ assert.Equal(t, "", daemonRef.id.String())
+ require.NotNil(t, daemonRef.ref)
+ assert.Equal(t, c.dockerRef, daemonRef.ref.String(), c.input)
+ }
+
+ // Both an ID and a named reference provided
+ parsed, err := reference.ParseNormalizedNamed("busybox:latest")
+ require.NoError(t, err)
+ _, err = NewReference(id, parsed)
+ assert.Error(t, err)
+
+ // A reference with neither a tag nor digest
+ parsed, err = reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ _, err = NewReference("", parsed)
+ assert.Error(t, err)
+
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@" + sha256digest)
+ require.NoError(t, err)
+ _, ok = parsed.(reference.Canonical)
+ require.True(t, ok)
+ _, ok = parsed.(reference.NamedTagged)
+ require.True(t, ok)
+ _, err = NewReference("", parsed)
+ assert.Error(t, err)
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+
+ ref, err = ParseReference("busybox:latest")
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ assert.Equal(t, sha256digest, ref.StringWithinTransport())
+
+ for _, c := range validNamedReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ assert.Nil(t, ref.DockerReference())
+
+ for _, c := range validNamedReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ dockerRef := ref.DockerReference()
+ require.NotNil(t, dockerRef, c.input)
+ assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
+ }
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ // id-only references have no identity.
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ assert.Equal(t, "", ref.PolicyConfigurationIdentity())
+
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err = ParseReference("busybox:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, "docker.io/library/busybox:notlatest", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ // id-only references have no identity.
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ assert.Empty(t, ref.PolicyConfigurationNamespaces())
+
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err = ParseReference("busybox:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, []string{
+ "docker.io/library/busybox",
+ "docker.io/library",
+ "docker.io",
+ "*.io",
+ }, ref.PolicyConfigurationNamespaces())
+}
+
+// daemonReference.NewImage, daemonReference.NewImageSource, openshiftReference.NewImageDestination
+// untested because just creating the objects immediately connects to the daemon.
+
+func TestReferenceDeleteImage(t *testing.T) {
+ ref, err := ParseReference(sha256digest)
+ require.NoError(t, err)
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+
+ for _, c := range validNamedReferenceTestCases {
+ ref, err := ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err, c.input)
+ }
+}
diff --git a/docker/daemon/testdata/certs/ca.pem b/docker/daemon/testdata/certs/ca.pem
new file mode 100644
index 0000000..3fc3820
--- /dev/null
+++ b/docker/daemon/testdata/certs/ca.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIICzjCCAbagAwIBAgIRAIGgYBNZse0EqRVzxe7aQGIwDQYJKoZIhvcNAQELBQAw
+EDEOMAwGA1UEChMFaGFyZHkwHhcNMTcxMDA0MDgzNDAwWhcNMjAwOTE4MDgzNDAw
+WjAQMQ4wDAYDVQQKEwVoYXJkeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAMlrdtoXWlZMPFwgeKZHrGxjVe4KXkQy5MFBUfO48htyIe2OlZAd3HGyap41
+7L4YciFhw0bp7wHnYtSTiCHQrnA4SLzNuaU2NM5nJw+E4c5kNrkvhLJqpTNCaYCy
+Xbh3H8REW+5UJIgnyeKLx//kvlDm6p4O55+OLlGgzxNaTIgldKLPmx543VVt6VDT
+qgFlaYsRz8hZ12+qAqu5am/Wpfal2+Df7Pmmn5M90UBTUwY8CLc/ZiWbv6hihDWV
+I28JoM0onEqAx7phRd0SwwK4mYfEe/u614r3bZaI36e9ojU9/St4nbMoMeyZP96t
+DOdX9A1SMbsqLOYKXBKM+jXPEaECAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKsMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBALah7CjpwbEY6yjA2KDv
+VaAHEgz4Xd8USW/L2292EiQLmFdIaEJiiWRjtKFiF427TXfAPXvxHA2q9OElDW4d
+G6XAcBJg5mcBh8WRTHwfLQ8llfj7dH1/sfazSUZeat6lTIyhQfkF99LAJTqlfYAF
+aNqIQio7FAjGyJqIPYLa1FKmfLdZr9azb9IjTZLhBGBWdLF0+JOn+JBsl7g9BvUp
+ArCI0Wib/vsr368xkzWzKjij1exZdfw0TmsieNYvViFoFJGNCB5XLPo0bHrmMVVe
+25EGam+xPkG/JQP5Eb3iikSEn8y5SIeJ0nS0EQE6uXPv+lQj1LmVv8OYzjXqpoJT
+n6g=
+-----END CERTIFICATE-----
diff --git a/docker/daemon/testdata/certs/cert.pem b/docker/daemon/testdata/certs/cert.pem
new file mode 100644
index 0000000..f4d8ede
--- /dev/null
+++ b/docker/daemon/testdata/certs/cert.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC6zCCAdOgAwIBAgIQEh1UsPL20u9KnyOByuhYWDANBgkqhkiG9w0BAQsFADAQ
+MQ4wDAYDVQQKEwVoYXJkeTAeFw0xNzEwMDQwODM0MDBaFw0yMDA5MTgwODM0MDBa
+MBwxGjAYBgNVBAoMEWhhcmR5Ljxib290c3RyYXA+MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAyJm29vB/urzreEwF012iAAWW3fgE1VEeNLTP/sZTYV3z
+UNGKao5x7dUIiah8rptkK3+FN4TID8Z2c1DpKzMTisdpRF3UoRWmjm1UTbxEENhk
+EptkFwGFM6BcZSyiLlyCBVM+wGsqzHAASe833S/yiu8miNc2S+jd0FIluKWe0yzG
+u2oaJfA28dBfqWyn9hh6msqBVYK6sDle9t0ditNubCyD+vrnoK8825LOIPV6QafL
+kVyW0/mj4GJutPOVop37HyQMcuQnDWBA+934l3tpeaJ93d3u8XjU7dXuOobKMohw
++33/pTALu9P0WtDbEeo/xcEICgimqpir92KMSXxUbwIDAQABozUwMzAOBgNVHQ8B
+Af8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADANBgkq
+hkiG9w0BAQsFAAOCAQEAnYffv9ipGQVW/t3sFxKu9LXQ7ZkhUSgoxPIA51goaYop
+YM9QR3ZBf2tMJwjKXuOLEkxxweNjP3dMKh2gykFory+jv6OQYIiLf9M82ty8rOPi
+mWLMDAIWWagkj5Yy6b+/aLkpXQ+lEsxLyi6po+D+I+AwRUYvfSc74a7XxkJk77JF
+/0SVgNdDtL08zVNOGDgepP/95e1pKMKgsOiCDnFCOAY+l6HcvizwBH+EI+XtdLVb
+qBmOAYiwYObBaRuyhVbbDKqKRGFUNkmmDv6vCQoTL1C9wrBnAiJe2khbLm1ix9Re
+3MW15CLuipneSgRAWXSdMbDIv9+KQE8fo2TWqikrCw==
+-----END CERTIFICATE-----
diff --git a/docker/daemon/testdata/certs/key.pem b/docker/daemon/testdata/certs/key.pem
new file mode 100644
index 0000000..ede7d2e
--- /dev/null
+++ b/docker/daemon/testdata/certs/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAyJm29vB/urzreEwF012iAAWW3fgE1VEeNLTP/sZTYV3zUNGK
+ao5x7dUIiah8rptkK3+FN4TID8Z2c1DpKzMTisdpRF3UoRWmjm1UTbxEENhkEptk
+FwGFM6BcZSyiLlyCBVM+wGsqzHAASe833S/yiu8miNc2S+jd0FIluKWe0yzGu2oa
+JfA28dBfqWyn9hh6msqBVYK6sDle9t0ditNubCyD+vrnoK8825LOIPV6QafLkVyW
+0/mj4GJutPOVop37HyQMcuQnDWBA+934l3tpeaJ93d3u8XjU7dXuOobKMohw+33/
+pTALu9P0WtDbEeo/xcEICgimqpir92KMSXxUbwIDAQABAoIBAQCyuKjXR5w1Ll4I
+FotWLmTH6jLo3jDIMPZddP6e+emNpRvD1HyixPhiMdvicXdsRUuwqXNx7F4mF+au
+hNbIwz/U9CcoXwSy48w5ttRWUba+31wBa+p3yMX5IhVPmr1/2rGItwsAejpuXBcV
+yAiYi0BnYfyODFf2t6jwElBDO2POtdEoYVYwgtMTMy5pmDA2QA3mKkjCcJviectZ
+9yFb8DFiwIYkryErWrGWaKls/oYV2O0A0mCaIqgw3HfhIl6F1pk+9oYnmsq6IzF5
+wSIg2evd4GMm/L2sqlVFqb4Kj54fbyfdOFK0bQso6VQZvB5tZ6NLHfv2f3BBFHVu
+jO+On/ixAoGBAOJkPHavnAb/lLDnMJjjXYNUQqyxxSlwOwNifG4evf/KAezIIalJ
+kC7jZoFsUkARVbRKQag0T2Xvxw/dDqmNastR1NxsBkhOWjYiQbALYP3u2f06Nhf8
+YlX6hyEje/3bb838//sH5jnaN8GcZnDBrAoPzW+V87pQoCyVrjs2t8qXAoGBAOLV
++PviAUWFjUO//dYk9H9IWotr6rdkzmpLbrj+NoLNSGeoZbByPmT5BuNswXvNyk+9
+smOQ8yqBiMpjxKwR4WQnS6Ydh6HTT33IWLLVazDFMf7ACmXWoScFhCAW6qGfdrYQ
+hkCSbwgun8jbL2D477jJl6ZyQG48lVnnZDjkFbfpAoGAUOqCsekSW23+Nzxqojqh
+sc7sBc2EKstyTENnNfTG9CW/imH9pgQlBJ1Chf+xZjTL7SSdUwFfX4/UFldsZi2l
+fgZBjocNt8pJdA/KaqGmiRxVzayAqRIME673nWCRcKp9y6Ih3Bd2sjbMtuavtp2C
+YBZF1xxBgNZQaZ8WJxPnnQECgYEAzLgGJPWc5iyZCJsesQTbMICRTyEPTYKKFD6N
+6CFt+vDgNsUxOWRx0Vk6kUhW+rAItZzjgZ6RBzyuwtH17sGYZHZefMZL4Y2/QSru
+ej/IpNRjwaF6AN0KxhfhXcCw8zrivX/+WgqOcJj7lh/TC7a/S0uNNSgJ5DODKwd9
+WSboPvkCgYEAzqdWfetko7hEI4076pufJrHPnnCJSHkkQ1QnfVl71mq7UmKXLDxD
+L5oWtU53+dswzvxGrzkOWsRJC5nN30BYJuYlwKzo3+MCKlUzJSuIMVTbTPlwKudh
+AF19s4GFZVo29FlgIQhA5dfIkZgFXAlVxYcGTLUixEmPwrc6yguULPs=
+-----END RSA PRIVATE KEY-----
diff --git a/docker/distribution_error.go b/docker/distribution_error.go
new file mode 100644
index 0000000..11b42c6
--- /dev/null
+++ b/docker/distribution_error.go
@@ -0,0 +1,148 @@
+// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
+// Copyright 2022 github.com/distribution/distribution authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
+ "golang.org/x/exp/slices"
+)
+
+// errNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type unexpectedHTTPStatusError struct {
+ Status string
+}
+
+func (e *unexpectedHTTPStatusError) Error() string {
+ return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// unexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type unexpectedHTTPResponseError struct {
+ ParseErr error
+ StatusCode int
+ Response []byte
+}
+
+func (e *unexpectedHTTPResponseError) Error() string {
+ return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+ var errors errcode.Errors
+ body, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ // For backward compatibility, handle irregularly formatted
+ // messages that contain a "details" field.
+ var detailsErr struct {
+ Details string `json:"details"`
+ }
+ err = json.Unmarshal(body, &detailsErr)
+ if err == nil && detailsErr.Details != "" {
+ switch statusCode {
+ case http.StatusUnauthorized:
+ return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+ }
+ }
+
+ if err := json.Unmarshal(body, &errors); err != nil {
+ return &unexpectedHTTPResponseError{
+ ParseErr: err,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ if len(errors) == 0 {
+ // If there was no error specified in the body, return
+ // UnexpectedHTTPResponseError.
+ return &unexpectedHTTPResponseError{
+ ParseErr: errNoErrorsInBody,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ return errors
+}
+
+func makeErrorList(err error) []error {
+ if errL, ok := err.(errcode.Errors); ok {
+ return []error(errL)
+ }
+ return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+ return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
+}
+
+// handleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func handleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ // Check for OAuth errors within the `WWW-Authenticate` header first
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ for _, c := range dockerChallenge.ResponseChallenges(resp) {
+ if c.Scheme == "bearer" {
+ var err errcode.Error
+ // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+ switch c.Parameters["error"] {
+ case "invalid_token":
+ err.Code = errcode.ErrorCodeUnauthorized
+ case "insufficient_scope":
+ err.Code = errcode.ErrorCodeDenied
+ default:
+ continue
+ }
+ if description := c.Parameters["error_description"]; description != "" {
+ err.Message = description
+ } else {
+ err.Message = err.Code.Message()
+ }
+
+ return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+ }
+ }
+ err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+ if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+ }
+ return err
+ }
+ return &unexpectedHTTPStatusError{Status: resp.Status}
+}
diff --git a/docker/distribution_error_test.go b/docker/distribution_error_test.go
new file mode 100644
index 0000000..c77dccf
--- /dev/null
+++ b/docker/distribution_error_test.go
@@ -0,0 +1,113 @@
+// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
+// Copyright 2022 github.com/distribution/distribution authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package docker
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+func TestHandleErrorResponse401ValidBody(t *testing.T) {
+ json := []byte("{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}")
+ response := &http.Response{
+ Status: "401 Unauthorized",
+ StatusCode: 401,
+ Body: io.NopCloser(bytes.NewReader(json)),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := "unauthorized: action requires authentication"
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
+
+func TestHandleErrorResponse401WithInvalidBody(t *testing.T) {
+ json := []byte("{invalid json}")
+ response := &http.Response{
+ Status: "401 Unauthorized",
+ StatusCode: 401,
+ Body: io.NopCloser(bytes.NewReader(json)),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := "unauthorized: authentication required"
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
+
+func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) {
+ json := []byte("{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}")
+ response := &http.Response{
+ Status: "400 Bad Request",
+ StatusCode: 400,
+ Body: io.NopCloser(bytes.NewReader(json)),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := "digest invalid: provided digest does not match"
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
+
+func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) {
+ json := []byte(`{"randomkey": "randomvalue"}`)
+ response := &http.Response{
+ Status: "404 Not Found",
+ StatusCode: 404,
+ Body: io.NopCloser(bytes.NewReader(json)),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"`
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
+
+func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) {
+ json := []byte("{invalid json}")
+ response := &http.Response{
+ Status: "404 Not Found",
+ StatusCode: 404,
+ Body: io.NopCloser(bytes.NewReader(json)),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\""
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
+
+func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) {
+ response := &http.Response{
+ Status: "501 Not Implemented",
+ StatusCode: 501,
+ Body: io.NopCloser(bytes.NewReader([]byte("{\"Error Encountered\" : \"Function not implemented.\"}"))),
+ }
+ err := handleErrorResponse(response)
+
+ expectedMsg := "received unexpected HTTP status: 501 Not Implemented"
+ if !strings.Contains(err.Error(), expectedMsg) {
+ t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error())
+ }
+}
diff --git a/docker/docker_client.go b/docker/docker_client.go
new file mode 100644
index 0000000..6ce8f70
--- /dev/null
+++ b/docker/docker_client.go
@@ -0,0 +1,1166 @@
+package docker
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/useragent"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/docker/config"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/pkg/tlsclientconfig"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/go-connections/tlsconfig"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ dockerHostname = "docker.io"
+ dockerV1Hostname = "index.docker.io"
+ dockerRegistry = "registry-1.docker.io"
+
+ resolvedPingV2URL = "%s://%s/v2/"
+ resolvedPingV1URL = "%s://%s/v1/_ping"
+ tagsPath = "/v2/%s/tags/list"
+ manifestPath = "/v2/%s/manifests/%s"
+ blobsPath = "/v2/%s/blobs/%s"
+ blobUploadPath = "/v2/%s/blobs/uploads/"
+ extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
+
+ minimumTokenLifetimeSeconds = 60
+
+ extensionSignatureSchemaVersion = 2 // extensionSignature.Version
+ extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
+
+ backoffNumIterations = 5
+ backoffInitialDelay = 2 * time.Second
+ backoffMaxDelay = 60 * time.Second
+)
+
+type certPath struct {
+ path string
+ absolute bool
+}
+
+var (
+ homeCertDir = filepath.FromSlash(".config/containers/certs.d")
+ perHostCertDirs = []certPath{
+ {path: etcDir + "/containers/certs.d", absolute: true},
+ {path: etcDir + "/docker/certs.d", absolute: true},
+ }
+)
+
+// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
+// signature represents a Docker image signature.
+type extensionSignature struct {
+ Version int `json:"schemaVersion"` // Version specifies the schema version
+ Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format
+ Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
+ Content []byte `json:"content"` // Content contains the signature
+}
+
+// signatureList represents list of Docker image signatures.
+type extensionSignatureList struct {
+ Signatures []extensionSignature `json:"signatures"`
+}
+
+type bearerToken struct {
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
+}
+
+// dockerClient is configuration for dealing with a single container registry.
+type dockerClient struct {
+ // The following members are set by newDockerClient and do not change afterwards.
+ sys *types.SystemContext
+ registry string
+ userAgent string
+
+ // tlsClientConfig is setup by newDockerClient and will be used and updated
+ // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
+ tlsClientConfig *tls.Config
+ // The following members are not set by newDockerClient and must be set by callers if needed.
+ auth types.DockerAuthConfig
+ registryToken string
+ signatureBase lookasideStorageBase
+ useSigstoreAttachments bool
+ scope authScope
+
+ // The following members are detected registry properties:
+ // They are set after a successful detectProperties(), and never change afterwards.
+ client *http.Client
+ scheme string
+ challenges []challenge
+ supportsSignatures bool
+
+ // Private state for setupRequestAuth (key: string, value: bearerToken)
+ tokenCache sync.Map
+ // Private state for detectProperties:
+ detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
+ detectPropertiesError error // detectPropertiesError caches the initial error.
+ // Private state for logResponseWarnings
+ reportedWarningsLock sync.Mutex
+ reportedWarnings *set.Set[string]
+}
+
+type authScope struct {
+ resourceType string
+ remoteName string
+ actions string
+}
+
+// sendAuth determines whether we need authentication for v2 or v1 endpoint.
+type sendAuth int
+
+const (
+ // v2 endpoint with authentication.
+ v2Auth sendAuth = iota
+ // v1 endpoint with authentication.
+ // TODO: Get v1Auth working
+ // v1Auth
+ // no authentication, works for both v1 and v2.
+ noAuth
+)
+
+func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
+ token := new(bearerToken)
+ if err := json.Unmarshal(blob, &token); err != nil {
+ return nil, err
+ }
+ if token.Token == "" {
+ token.Token = token.AccessToken
+ }
+ if token.ExpiresIn < minimumTokenLifetimeSeconds {
+ token.ExpiresIn = minimumTokenLifetimeSeconds
+ logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
+ }
+ if token.IssuedAt.IsZero() {
+ token.IssuedAt = time.Now().UTC()
+ }
+ token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ return token, nil
+}
+
+// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
+func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
+ if sys != nil && sys.DockerCertPath != "" {
+ return sys.DockerCertPath, nil
+ }
+ if sys != nil && sys.DockerPerHostCertDirPath != "" {
+ return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil
+ }
+
+ var (
+ hostCertDir string
+ fullCertDirPath string
+ )
+
+ for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute {
+ hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path)
+ } else {
+ hostCertDir = perHostCertDir.path
+ }
+
+ fullCertDirPath = filepath.Join(hostCertDir, hostPort)
+ _, err := os.Stat(fullCertDirPath)
+ if err == nil {
+ break
+ }
+ if os.IsNotExist(err) {
+ continue
+ }
+ if os.IsPermission(err) {
+ logrus.Debugf("error accessing certs directory due to permissions: %v", err)
+ continue
+ }
+ return "", err
+ }
+ return fullCertDirPath, nil
+}
+
+// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
+// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
+// signatureBase is always set in the return value
+// The caller must call .Close() on the returned client when done.
+func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
+ auth, err := config.GetCredentialsForRef(sys, ref.ref)
+ if err != nil {
+ return nil, fmt.Errorf("getting username and password: %w", err)
+ }
+
+ sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write)
+ if err != nil {
+ return nil, err
+ }
+
+ registry := reference.Domain(ref.ref)
+ client, err := newDockerClient(sys, registry, ref.ref.Name())
+ if err != nil {
+ return nil, err
+ }
+ client.auth = auth
+ if sys != nil {
+ client.registryToken = sys.DockerBearerRegistryToken
+ }
+ client.signatureBase = sigBase
+ client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
+ client.scope.resourceType = "repository"
+ client.scope.actions = actions
+ client.scope.remoteName = reference.Path(ref.ref)
+ return client, nil
+}
+
+// newDockerClient returns a new dockerClient instance for the given registry
+// and reference. The reference is used to query the registry configuration
+// and can either be a registry (e.g, "registry.com[:5000]"), a repository
+// (e.g., "registry.com[:5000][/some/namespace]/repo").
+// Please note that newDockerClient does not set all members of dockerClient
+// (e.g., username and password); those must be set by callers if necessary.
+// The caller must call .Close() on the returned client when done.
+func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
+ hostName := registry
+ if registry == dockerHostname {
+ registry = dockerRegistry
+ }
+ tlsClientConfig := &tls.Config{
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ }
+
+ // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
+ // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
+ // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
+ // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
+ // undocumented and may change if docker/docker changes.
+ certDir, err := dockerCertDir(sys, hostName)
+ if err != nil {
+ return nil, err
+ }
+ if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
+ return nil, err
+ }
+
+ // Check if TLS verification shall be skipped (default=false) which can
+ // be specified in the sysregistriesv2 configuration.
+ skipVerify := false
+ reg, err := sysregistriesv2.FindRegistry(sys, reference)
+ if err != nil {
+ return nil, fmt.Errorf("loading registries: %w", err)
+ }
+ if reg != nil {
+ if reg.Blocked {
+ return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys))
+ }
+ skipVerify = reg.Insecure
+ }
+ tlsClientConfig.InsecureSkipVerify = skipVerify
+
+ userAgent := useragent.DefaultUserAgent
+ if sys != nil && sys.DockerRegistryUserAgent != "" {
+ userAgent = sys.DockerRegistryUserAgent
+ }
+
+ return &dockerClient{
+ sys: sys,
+ registry: registry,
+ userAgent: userAgent,
+ tlsClientConfig: tlsClientConfig,
+ reportedWarnings: set.New[string](),
+ }, nil
+}
+
+// CheckAuth validates the credentials by attempting to log into the registry
+// returns an error if an error occurred while making the http request or the status code received was 401
+func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
+ client, err := newDockerClient(sys, registry, registry)
+ if err != nil {
+ return fmt.Errorf("creating new docker client: %w", err)
+ }
+ defer client.Close()
+ client.auth = types.DockerAuthConfig{
+ Username: username,
+ Password: password,
+ }
+
+ resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ err := registryHTTPResponseToError(resp)
+ if resp.StatusCode == http.StatusUnauthorized {
+ err = ErrUnauthorizedForCredentials{Err: err}
+ }
+ return err
+ }
+ return nil
+}
+
+// SearchResult holds the information of each matching image
+// It matches the output returned by the v1 endpoint
+type SearchResult struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ // StarCount states the number of stars the image has
+ StarCount int `json:"star_count"`
+ IsTrusted bool `json:"is_trusted"`
+ // IsAutomated states whether the image is an automated build
+ IsAutomated bool `json:"is_automated"`
+ // IsOfficial states whether the image is an official build
+ IsOfficial bool `json:"is_official"`
+}
+
+// SearchRegistry queries a registry for images that contain "image" in their name
+// The limit is the max number of results desired
+// Note: The limit value doesn't work with all registries
+// for example registry.access.redhat.com returns all the results without limiting it to the limit value
+func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
+ type V2Results struct {
+ // Repositories holds the results returned by the /v2/_catalog endpoint
+ Repositories []string `json:"repositories"`
+ }
+ type V1Results struct {
+ // Results holds the results returned by the /v1/search endpoint
+ Results []SearchResult `json:"results"`
+ }
+ v1Res := &V1Results{}
+
+ // Get credentials from authfile for the underlying hostname
+ // We can't use GetCredentialsForRef here because we want to search the whole registry.
+ auth, err := config.GetCredentials(sys, registry)
+ if err != nil {
+ return nil, fmt.Errorf("getting username and password: %w", err)
+ }
+
+ // The /v2/_catalog endpoint has been disabled for docker.io therefore
+ // the call made to that endpoint will fail. So using the v1 hostname
+ // for docker.io for simplicity of implementation and the fact that it
+ // returns search results.
+ hostname := registry
+ if registry == dockerHostname {
+ hostname = dockerV1Hostname
+ // A search term of library/foo does not find the library/foo image on the docker.io servers,
+ // which is surprising - and that Docker is modifying the search term client-side this same way,
+ // and it seems convenient to do the same thing.
+ // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
+ image = strings.TrimPrefix(image, "library/")
+ }
+
+ client, err := newDockerClient(sys, hostname, registry)
+ if err != nil {
+ return nil, fmt.Errorf("creating new docker client: %w", err)
+ }
+ defer client.Close()
+ client.auth = auth
+ if sys != nil {
+ client.registryToken = sys.DockerBearerRegistryToken
+ }
+
+ // Only try the v1 search endpoint if the search query is not empty. If it is
+ // empty skip to the v2 endpoint.
+ if image != "" {
+ // set up the query values for the v1 endpoint
+ u := url.URL{
+ Path: "/v1/search",
+ }
+ q := u.Query()
+ q.Set("q", image)
+ q.Set("n", strconv.Itoa(limit))
+ u.RawQuery = q.Encode()
+
+ logrus.Debugf("trying to talk to v1 search endpoint")
+ resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
+ if err != nil {
+ logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
+ } else {
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, ""))
+ } else {
+ if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
+ return nil, err
+ }
+ return v1Res.Results, nil
+ }
+ }
+ }
+
+ logrus.Debugf("trying to talk to v2 search endpoint")
+ searchRes := []SearchResult{}
+ path := "/v2/_catalog"
+ for len(searchRes) < limit {
+ resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ err := registryHTTPResponseToError(resp)
+ logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
+ return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
+ }
+ v2Res := &V2Results{}
+ if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
+ return nil, err
+ }
+
+ for _, repo := range v2Res.Repositories {
+ if len(searchRes) == limit {
+ break
+ }
+ if strings.Contains(repo, image) {
+ res := SearchResult{
+ Name: repo,
+ }
+ // bugzilla.redhat.com/show_bug.cgi?id=1976283
+ // If we have a full match, make sure it's listed as the first result.
+ // (Note there might be a full match we never see if we reach the result limit first.)
+ if repo == image {
+ searchRes = append([]SearchResult{res}, searchRes...)
+ } else {
+ searchRes = append(searchRes, res)
+ }
+ }
+ }
+
+ link := resp.Header.Get("Link")
+ if link == "" {
+ break
+ }
+ linkURLPart, _, _ := strings.Cut(link, ";")
+ linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
+ if err != nil {
+ return searchRes, err
+ }
+
+ // can be relative or absolute, but we only want the path (and I
+ // guess we're in trouble if it forwards to a new place...)
+ path = linkURL.Path
+ if linkURL.RawQuery != "" {
+ path += "?"
+ path += linkURL.RawQuery
+ }
+ }
+ return searchRes, nil
+}
+
+// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
+func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ if err := c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+
+ requestURL, err := c.resolveRequestURL(path)
+ if err != nil {
+ return nil, err
+ }
+ return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope)
+}
+
+// resolveRequestURL turns a path for c.makeRequest into a full URL.
+// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs.
+func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
+ urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ res, err := url.Parse(urlString)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// Checks if the auth headers in the response contain an indication of a failed
+// authorizdation because of an "insufficient_scope" error. If that's the case,
+// returns the required scope to be used for fetching a new token.
+func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
+ if err == nil && res.StatusCode == http.StatusUnauthorized {
+ challenges := parseAuthHeader(res.Header)
+ for _, challenge := range challenges {
+ if challenge.Scheme == "bearer" {
+ if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
+ if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
+ if newScope, err := parseAuthScope(scope); err == nil {
+ return true, newScope
+ } else {
+ logrus.WithFields(logrus.Fields{
+ "error": err,
+ "scope": scope,
+ "challenge": challenge,
+ }).Error("Failed to parse the authentication scope from the given challenge")
+ }
+ }
+ }
+ }
+ }
+ }
+ return false, nil
+}
+
+// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
+// silently falling back to fallbackDelay if the header is missing or invalid.
+func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
+ after := res.Header.Get("Retry-After")
+ if after == "" {
+ return fallbackDelay
+ }
+ logrus.Debugf("Detected 'Retry-After' header %q", after)
+ // First, check if we have a numerical value.
+ if num, err := strconv.ParseInt(after, 10, 64); err == nil {
+ return time.Duration(num) * time.Second
+ }
+ // Second, check if we have an HTTP date.
+ if t, err := http.ParseTime(after); err == nil {
+ // If the delta between the date and now is positive, use it.
+ delta := time.Until(t)
+ if delta > 0 {
+ return delta
+ }
+ logrus.Debugf("Retry-After date in the past, ignoring it")
+ return fallbackDelay
+ }
+ logrus.Debugf("Invalid Retry-After format, ignoring it")
+ return fallbackDelay
+}
+
+// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// streamLen, if not -1, specifies the length of the data expected on stream.
+// makeRequest should generally be preferred.
+// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
+// TODO(runcom): too many arguments here, use a struct
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ delay := backoffInitialDelay
+ attempts := 0
+ for {
+ res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
+ attempts++
+
+ // By default we use pre-defined scopes per operation. In
+ // certain cases, this can fail when our authentication is
+ // insufficient, then we might be getting an error back with a
+ // Www-Authenticate Header indicating an insufficient scope.
+ //
+ // Check for that and update the client challenges to retry after
+ // requesting a new token
+ //
+ // We only try this on the first attempt, to not overload an
+ // already struggling server.
+ // We also cannot retry with a body (stream != nil) as stream
+ // was already read
+ if attempts == 1 && stream == nil && auth != noAuth {
+ if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
+ logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
+ // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
+ // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
+ // for more than one extra scope.
+ res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
+ extraScope = newScope
+ }
+ }
+ if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
+ stream != nil || // We can't retry with a body (which is not restartable in the general case)
+ attempts == backoffNumIterations {
+ return res, err
+ }
+ // close response body before retry or context done
+ res.Body.Close()
+
+ delay = parseRetryAfter(res, delay)
+ if delay > backoffMaxDelay {
+ delay = backoffMaxDelay
+ }
+ logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds())
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-time.After(delay):
+ // Nothing
+ }
+ delay *= 2 // If the registry does not specify a delay, back off exponentially.
+ }
+}
+
+// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// streamLen, if not -1, specifies the length of the data expected on stream.
+// makeRequest should generally be preferred.
+// Note that no exponential back off is performed when receiving an http 429 status code.
+func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+ req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream)
+ if err != nil {
+ return nil, err
+ }
+ if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it.
+ req.ContentLength = streamLen
+ }
+ req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
+ for n, h := range headers {
+ for _, hh := range h {
+ req.Header.Add(n, hh)
+ }
+ }
+ req.Header.Add("User-Agent", c.userAgent)
+ if auth == v2Auth {
+ if err := c.setupRequestAuth(req, extraScope); err != nil {
+ return nil, err
+ }
+ }
+ logrus.Debugf("%s %s", method, resolvedURL.Redacted())
+ res, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if warnings := res.Header.Values("Warning"); len(warnings) != 0 {
+ c.logResponseWarnings(res, warnings)
+ }
+ return res, nil
+}
+
+// logResponseWarnings logs warningHeaders from res, if any.
+func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) {
+ c.reportedWarningsLock.Lock()
+ defer c.reportedWarningsLock.Unlock()
+
+ for _, header := range warningHeaders {
+ warningString := parseRegistryWarningHeader(header)
+ if warningString == "" {
+ logrus.Debugf("Ignored Warning: header from registry: %q", header)
+ } else {
+ if !c.reportedWarnings.Contains(warningString) {
+ c.reportedWarnings.Add(warningString)
+ // Note that reportedWarnings is based only on warningString, so that we don’t
+ // repeat the same warning for every request - but the warning includes the URL;
+ // so it may not be specific to that URL.
+ logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString)
+ } else {
+ logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString)
+ }
+ }
+ }
+}
+
+// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning
+// values allowed by opencontainers/distribution-spec.
+// It returns the warning string if the header has the expected format, or "" otherwise.
+func parseRegistryWarningHeader(header string) string {
+ const expectedPrefix = `299 - "`
+ const expectedSuffix = `"`
+
+ // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
+ // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
+ if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
+ return ""
+ }
+ header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
+
+ // ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
+ // as if it were replaced by the octet following the backslash.”, so let’s do that…
+ res := strings.Builder{}
+ afterBackslash := false
+ for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points
+ switch {
+ case c == 0x7F || (c < ' ' && c != '\t'):
+ return "" // Control characters are forbidden
+ case afterBackslash:
+ res.WriteByte(c)
+ afterBackslash = false
+ case c == '"':
+ // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows,
+ // or completely invalid input.
+ return ""
+ case c == '\\':
+ afterBackslash = true
+ default:
+ res.WriteByte(c)
+ }
+ }
+ if afterBackslash {
+ return ""
+ }
+ return res.String()
+}
+
+// we're using the challenges from the /v2/ ping response and not the one from the destination
+// URL in this request because:
+//
+// 1) docker does that as well
+// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
+//
+// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
+func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
+ if len(c.challenges) == 0 {
+ return nil
+ }
+ schemeNames := make([]string, 0, len(c.challenges))
+ for _, challenge := range c.challenges {
+ schemeNames = append(schemeNames, challenge.Scheme)
+ switch challenge.Scheme {
+ case "basic":
+ req.SetBasicAuth(c.auth.Username, c.auth.Password)
+ return nil
+ case "bearer":
+ registryToken := c.registryToken
+ if registryToken == "" {
+ cacheKey := ""
+ scopes := []authScope{c.scope}
+ if extraScope != nil {
+ // Using ':' as a separator here is unambiguous because getBearerToken below
+ // uses the same separator when formatting a remote request (and because
+ // repository names that we create can't contain colons, and extraScope values
+ // coming from a server come from `parseAuthScope`, which also splits on colons).
+ cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
+ if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
+ return fmt.Errorf(
+ "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
+ cacheKey,
+ colonCount,
+ )
+ }
+ scopes = append(scopes, *extraScope)
+ }
+ var token bearerToken
+ t, inCache := c.tokenCache.Load(cacheKey)
+ if inCache {
+ token = t.(bearerToken)
+ }
+ if !inCache || time.Now().After(token.expirationTime) {
+ var (
+ t *bearerToken
+ err error
+ )
+ if c.auth.IdentityToken != "" {
+ t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes)
+ } else {
+ t, err = c.getBearerToken(req.Context(), challenge, scopes)
+ }
+ if err != nil {
+ return err
+ }
+
+ token = *t
+ c.tokenCache.Store(cacheKey, token)
+ }
+ registryToken = token.Token
+ }
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
+ return nil
+ default:
+ logrus.Debugf("no handler for %s authentication", challenge.Scheme)
+ }
+ }
+ logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
+ return nil
+}
+
+func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge,
+ scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.New("missing realm in bearer auth challenge")
+ }
+
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Make the form data required against the oauth2 authentication
+ // More details here: https://docs.docker.com/registry/spec/auth/oauth/
+ params := authReq.URL.Query()
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
+ params.Add("service", service)
+ }
+
+ for _, scope := range scopes {
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
+ }
+ }
+ params.Add("grant_type", "refresh_token")
+ params.Add("refresh_token", c.auth.IdentityToken)
+ params.Add("client_id", "containers/image")
+
+ authReq.Body = io.NopCloser(strings.NewReader(params.Encode()))
+ authReq.Header.Add("User-Agent", c.userAgent)
+ authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
+ res, err := c.client.Do(authReq)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if err := httpResponseToError(res, "Trying to obtain access token"); err != nil {
+ return nil, err
+ }
+
+ tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
+ if err != nil {
+ return nil, err
+ }
+
+ return newBearerTokenFromJSONBlob(tokenBlob)
+}
+
+func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
+ scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.New("missing realm in bearer auth challenge")
+ }
+
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ params := authReq.URL.Query()
+ if c.auth.Username != "" {
+ params.Add("account", c.auth.Username)
+ }
+
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
+ params.Add("service", service)
+ }
+
+ for _, scope := range scopes {
+ if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
+ params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
+ }
+ }
+
+ authReq.URL.RawQuery = params.Encode()
+
+ if c.auth.Username != "" && c.auth.Password != "" {
+ authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
+ }
+ authReq.Header.Add("User-Agent", c.userAgent)
+
+ logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
+ res, err := c.client.Do(authReq)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
+ return nil, err
+ }
+ tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
+ if err != nil {
+ return nil, err
+ }
+
+ return newBearerTokenFromJSONBlob(tokenBlob)
+}
+
+// detectPropertiesHelper performs the work of detectProperties which executes
+// it at most once.
+func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
+ // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
+ // specified by the system context
+ if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
+ }
+ tr := tlsclientconfig.NewTransport()
+ tr.TLSClientConfig = c.tlsClientConfig
+ c.client = &http.Client{Transport: tr}
+
+ ping := func(scheme string) error {
+ pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
+ if err != nil {
+ return err
+ }
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
+ if err != nil {
+ logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
+ return err
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return registryHTTPResponseToError(resp)
+ }
+ c.challenges = parseAuthHeader(resp.Header)
+ c.scheme = scheme
+ c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
+ return nil
+ }
+ err := ping("https")
+ if err != nil && c.tlsClientConfig.InsecureSkipVerify {
+ err = ping("http")
+ }
+ if err != nil {
+ err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
+ if c.sys != nil && c.sys.DockerDisableV1Ping {
+ return err
+ }
+ // best effort to understand if we're talking to a V1 registry
+ pingV1 := func(scheme string) bool {
+ pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
+ if err != nil {
+ return false
+ }
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
+ if err != nil {
+ logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
+ return false
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return false
+ }
+ return true
+ }
+ isV1 := pingV1("https")
+ if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
+ isV1 = pingV1("http")
+ }
+ if isV1 {
+ err = ErrV1NotSupported
+ }
+ }
+ return err
+}
+
+// detectProperties detects various properties of the registry.
+// See the dockerClient documentation for members which are affected by this.
+func (c *dockerClient) detectProperties(ctx context.Context) error {
+ c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
+ return c.detectPropertiesError
+}
+
+func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+ res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
+func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ var (
+ resp *http.Response
+ err error
+ )
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+ for _, u := range urls {
+ blobURL, err := url.Parse(u)
+ if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ // NOTE: we must not authenticate on additional URLs as those
+ // can be abused to leak credentials or tokens. Please
+ // refer to CVE-2020-15157 for more information.
+ resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
+ logrus.Debug(err)
+ resp.Body.Close()
+ continue
+ }
+ break
+ }
+ }
+ if resp == nil && err == nil {
+ return nil, 0, nil // fallback to non-external blob
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ return resp.Body, getBlobSize(resp), nil
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ r, s, err := c.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if res.StatusCode != http.StatusOK {
+ err := registryHTTPResponseToError(res)
+ res.Body.Close()
+ return nil, 0, fmt.Errorf("fetching blob: %w", err)
+ }
+ cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
+ blobSize := getBlobSize(res)
+
+ reconnectingReader, err := newBodyReader(ctx, c, path, res.Body)
+ if err != nil {
+ res.Body.Close()
+ return nil, 0, err
+ }
+ return reconnectingReader, blobSize, nil
+}
+
+// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit.
+func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+ payload, err := iolimits.ReadAtMost(reader, maxSize)
+ if err != nil {
+ return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
+ }
+ return payload, nil
+}
+
+// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
+func isManifestUnknownError(err error) bool {
+ // docker/distribution, and as defined in the spec
+ var ec errcode.ErrorCoder
+ if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
+ return true
+ }
+ // registry.redhat.io as of October 2022
+ var e errcode.Error
+ if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
+ return true
+ }
+ // opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
+ // but specifies that the HTTP status must be 404.
+ var unexpected *unexpectedHTTPResponseError
+ if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound {
+ return true
+ }
+ return false
+}
+
+// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
+// digest in ref.
+// It returns (nil, nil) if the manifest does not exist.
+func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
+ tag := sigstoreAttachmentTag(digest)
+ sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String())
+ manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag)
+ if err != nil {
+ // FIXME: Are we going to need better heuristics??
+ // This alone is probably a good enough reason for sigstore to be opt-in only,
+ // otherwise we would just break ordinary copies.
+ if isManifestUnknownError(err) {
+ logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err)
+ return nil, nil
+ }
+ logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err)
+ return nil, err
+ }
+ if mimeType != imgspecv1.MediaTypeImageManifest {
+ // FIXME: Try anyway??
+ return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q",
+ sigstoreRef.String(), mimeType)
+ }
+ res, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err)
+ }
+ return res, nil
+}
+
+// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
+// using the original data structures.
+func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
+ if err != nil {
+ return nil, err
+ }
+
+ var parsedBody extensionSignatureList
+ if err := json.Unmarshal(body, &parsedBody); err != nil {
+ return nil, fmt.Errorf("decoding signature list: %w", err)
+ }
+ return &parsedBody, nil
+}
+
+// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
+func sigstoreAttachmentTag(d digest.Digest) string {
+ return strings.Replace(d.String(), ":", "-", 1) + ".sig"
+}
+
+// Close removes resources associated with an initialized dockerClient, if any.
+func (c *dockerClient) Close() error {
+ if c.client != nil {
+ c.client.CloseIdleConnections()
+ }
+ return nil
+}
diff --git a/docker/docker_client_test.go b/docker/docker_client_test.go
new file mode 100644
index 0000000..67e3876
--- /dev/null
+++ b/docker/docker_client_test.go
@@ -0,0 +1,412 @@
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/internal/useragent"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDockerCertDir(t *testing.T) {
+ const nondefaultFullPath = "/this/is/not/the/default/full/path"
+ const nondefaultPerHostDir = "/this/is/not/the/default/certs.d"
+ const variableReference = "$HOME"
+ const rootPrefix = "/root/prefix"
+ const registryHostPort = "thishostdefinitelydoesnotexist:5000"
+
+ systemPerHostResult := filepath.Join(perHostCertDirs[len(perHostCertDirs)-1].path, registryHostPort)
+ for _, c := range []struct {
+ sys *types.SystemContext
+ expected string
+ }{
+ // The common case
+ {nil, systemPerHostResult},
+ // There is a context, but it does not override the path.
+ {&types.SystemContext{}, systemPerHostResult},
+ // Full path overridden
+ {&types.SystemContext{DockerCertPath: nondefaultFullPath}, nondefaultFullPath},
+ // Per-host path overridden
+ {
+ &types.SystemContext{DockerPerHostCertDirPath: nondefaultPerHostDir},
+ filepath.Join(nondefaultPerHostDir, registryHostPort),
+ },
+ // Both overridden
+ {
+ &types.SystemContext{
+ DockerCertPath: nondefaultFullPath,
+ DockerPerHostCertDirPath: nondefaultPerHostDir,
+ },
+ nondefaultFullPath,
+ },
+ // Root overridden
+ {
+ &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix},
+ filepath.Join(rootPrefix, systemPerHostResult),
+ },
+ // Root and path overrides present simultaneously,
+ {
+ &types.SystemContext{
+ DockerCertPath: nondefaultFullPath,
+ RootForImplicitAbsolutePaths: rootPrefix,
+ },
+ nondefaultFullPath,
+ },
+ {
+ &types.SystemContext{
+ DockerPerHostCertDirPath: nondefaultPerHostDir,
+ RootForImplicitAbsolutePaths: rootPrefix,
+ },
+ filepath.Join(nondefaultPerHostDir, registryHostPort),
+ },
+ // … and everything at once
+ {
+ &types.SystemContext{
+ DockerCertPath: nondefaultFullPath,
+ DockerPerHostCertDirPath: nondefaultPerHostDir,
+ RootForImplicitAbsolutePaths: rootPrefix,
+ },
+ nondefaultFullPath,
+ },
+ // No environment expansion happens in the overridden paths
+ {&types.SystemContext{DockerCertPath: variableReference}, variableReference},
+ {
+ &types.SystemContext{DockerPerHostCertDirPath: variableReference},
+ filepath.Join(variableReference, registryHostPort),
+ },
+ } {
+ path, err := dockerCertDir(c.sys, registryHostPort)
+ require.Equal(t, nil, err)
+ assert.Equal(t, c.expected, path)
+ }
+}
+
+func TestNewBearerTokenFromJsonBlob(t *testing.T) {
+ expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 100, IssuedAt: time.Unix(1514800802, 0)}
+ tokenBlob := []byte(`{"token":"IAmAToken","expires_in":100,"issued_at":"2018-01-01T10:00:02+00:00"}`)
+ token, err := newBearerTokenFromJSONBlob(tokenBlob)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ assertBearerTokensEqual(t, expected, token)
+}
+
+func TestNewBearerAccessTokenFromJsonBlob(t *testing.T) {
+ expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 100, IssuedAt: time.Unix(1514800802, 0)}
+ tokenBlob := []byte(`{"access_token":"IAmAToken","expires_in":100,"issued_at":"2018-01-01T10:00:02+00:00"}`)
+ token, err := newBearerTokenFromJSONBlob(tokenBlob)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ assertBearerTokensEqual(t, expected, token)
+}
+
+func TestNewBearerTokenFromInvalidJsonBlob(t *testing.T) {
+ tokenBlob := []byte("IAmNotJson")
+ _, err := newBearerTokenFromJSONBlob(tokenBlob)
+ if err == nil {
+ t.Fatalf("unexpected an error unmarshaling JSON")
+ }
+}
+
+func TestNewBearerTokenSmallExpiryFromJsonBlob(t *testing.T) {
+ expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 60, IssuedAt: time.Unix(1514800802, 0)}
+ tokenBlob := []byte(`{"token":"IAmAToken","expires_in":1,"issued_at":"2018-01-01T10:00:02+00:00"}`)
+ token, err := newBearerTokenFromJSONBlob(tokenBlob)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ assertBearerTokensEqual(t, expected, token)
+}
+
+func TestNewBearerTokenIssuedAtZeroFromJsonBlob(t *testing.T) {
+ zeroTime := time.Time{}.Format(time.RFC3339)
+ now := time.Now()
+ tokenBlob := []byte(fmt.Sprintf(`{"token":"IAmAToken","expires_in":100,"issued_at":"%s"}`, zeroTime))
+ token, err := newBearerTokenFromJSONBlob(tokenBlob)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ if token.IssuedAt.Before(now) {
+ t.Fatalf("expected [%s] not to be before [%s]", token.IssuedAt, now)
+ }
+
+}
+
+func assertBearerTokensEqual(t *testing.T, expected, subject *bearerToken) {
+ if expected.Token != subject.Token {
+ t.Fatalf("expected [%s] to equal [%s], it did not", subject.Token, expected.Token)
+ }
+ if expected.ExpiresIn != subject.ExpiresIn {
+ t.Fatalf("expected [%d] to equal [%d], it did not", subject.ExpiresIn, expected.ExpiresIn)
+ }
+ if !expected.IssuedAt.Equal(subject.IssuedAt) {
+ t.Fatalf("expected [%s] to equal [%s], it did not", subject.IssuedAt, expected.IssuedAt)
+ }
+}
+
+func TestUserAgent(t *testing.T) {
+ const sentinelUA = "sentinel/1.0"
+
+ var expectedUA string
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ got := r.Header.Get("User-Agent")
+ assert.Equal(t, expectedUA, got)
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer s.Close()
+
+ for _, tc := range []struct {
+ sys *types.SystemContext
+ expected string
+ }{
+ // Can't both test nil and set DockerInsecureSkipTLSVerify :(
+ // {nil, defaultUA},
+ {&types.SystemContext{}, useragent.DefaultUserAgent},
+ {&types.SystemContext{DockerRegistryUserAgent: sentinelUA}, sentinelUA},
+ } {
+ // For this test against localhost, we don't care.
+ tc.sys.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+
+ registry := strings.TrimPrefix(s.URL, "http://")
+
+ expectedUA = tc.expected
+ if err := CheckAuth(context.Background(), tc.sys, "", "", registry); err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ }
+}
+
+func TestNeedsRetryOnError(t *testing.T) {
+ needsRetry, _ := needsRetryWithUpdatedScope(errors.New("generic"), nil)
+ if needsRetry {
+ t.Fatal("Got needRetry for a connection that included an error")
+ }
+}
+
+var registrySuseComResp = http.Response{
+ Status: "401 Unauthorized",
+ StatusCode: http.StatusUnauthorized,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: map[string][]string{
+ "Content-Length": {"145"},
+ "Content-Type": {"application/json"},
+ "Date": {"Fri, 26 Aug 2022 08:03:13 GMT"},
+ "Docker-Distribution-Api-Version": {"registry/2.0"},
+ // "Www-Authenticate": {`Bearer realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="registry:catalog:*",error="insufficient_scope"`},
+ "X-Content-Type-Options": {"nosniff"},
+ },
+ Request: nil,
+}
+
+func TestNeedsRetryOnInsuficientScope(t *testing.T) {
+ resp := registrySuseComResp
+ resp.Header["Www-Authenticate"] = []string{
+ `Bearer realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="registry:catalog:*",error="insufficient_scope"`,
+ }
+ expectedScope := authScope{
+ resourceType: "registry",
+ remoteName: "catalog",
+ actions: "*",
+ }
+
+ needsRetry, scope := needsRetryWithUpdatedScope(nil, &resp)
+
+ if !needsRetry {
+ t.Fatal("Expected needing to retry")
+ }
+
+ if expectedScope != *scope {
+ t.Fatalf("Got an invalid scope, expected '%q' but got '%q'", expectedScope, *scope)
+ }
+}
+
+func TestNeedsRetryNoRetryWhenNoAuthHeader(t *testing.T) {
+ resp := registrySuseComResp
+ delete(resp.Header, "Www-Authenticate")
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+
+ if needsRetry {
+ t.Fatal("Expected no need to retry, as no Authentication headers are present")
+ }
+}
+
+func TestNeedsRetryNoRetryWhenNoBearerAuthHeader(t *testing.T) {
+ resp := registrySuseComResp
+ resp.Header["Www-Authenticate"] = []string{
+ `OAuth2 realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="registry:catalog:*"`,
+ }
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+
+ if needsRetry {
+ t.Fatal("Expected no need to retry, as no bearer authentication header is present")
+ }
+}
+
+func TestNeedsRetryNoRetryWhenNoErrorInBearer(t *testing.T) {
+ resp := registrySuseComResp
+ resp.Header["Www-Authenticate"] = []string{
+ `Bearer realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="registry:catalog:*"`,
+ }
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+
+ if needsRetry {
+ t.Fatal("Expected no need to retry, as no insufficient error is present in the authentication header")
+ }
+}
+
+func TestNeedsRetryNoRetryWhenInvalidErrorInBearer(t *testing.T) {
+ resp := registrySuseComResp
+ resp.Header["Www-Authenticate"] = []string{
+ `Bearer realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="registry:catalog:*,error="random_error"`,
+ }
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+
+ if needsRetry {
+ t.Fatal("Expected no need to retry, as no insufficient_error is present in the authentication header")
+ }
+}
+
+func TestNeedsRetryNoRetryWhenInvalidScope(t *testing.T) {
+ resp := registrySuseComResp
+ resp.Header["Www-Authenticate"] = []string{
+ `Bearer realm="https://registry.suse.com/auth",service="SUSE Linux Docker Registry",scope="foo:bar",error="insufficient_scope"`,
+ }
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+
+ if needsRetry {
+ t.Fatal("Expected no need to retry, as no insufficient_error is present in the authentication header")
+ }
+}
+
+func TestNeedsNoRetry(t *testing.T) {
+ resp := http.Response{
+ Status: "200 OK",
+ StatusCode: http.StatusOK,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: map[string][]string{"Apptime": {"D=49722"},
+ "Content-Length": {"1683"},
+ "Content-Type": {"application/json; charset=utf-8"},
+ "Date": {"Fri, 26 Aug 2022 09:00:21 GMT"},
+ "Docker-Distribution-Api-Version": {"registry/2.0"},
+ "Link": {`</v2/_catalog?last=f35%2Fs2i-base&n=100>; rel="next"`},
+ "Referrer-Policy": {"same-origin"},
+ "Server": {"Apache"},
+ "Strict-Transport-Security": {"max-age=31536000; includeSubDomains; preload"},
+ "Vary": {"Accept"},
+ "X-Content-Type-Options": {"nosniff"},
+ "X-Fedora-Proxyserver": {"proxy10.iad2.fedoraproject.org"},
+ "X-Fedora-Requestid": {"YwiLpHEhLsbSTugJblBF8QAAAEI"},
+ "X-Frame-Options": {"SAMEORIGIN"},
+ "X-Xss-Protection": {"1; mode=block"},
+ },
+ }
+
+ needsRetry, _ := needsRetryWithUpdatedScope(nil, &resp)
+ if needsRetry {
+ t.Fatal("Got the need to retry, but none should be required")
+ }
+}
+
+func TestParseRegistryWarningHeader(t *testing.T) {
+ for _, c := range []struct{ header, expected string }{
+ {"completely invalid", ""},
+ {`299 - "trivial"`, "trivial"},
+ {`100 - "not-299"`, ""},
+ {`299 localhost "warn-agent set"`, ""},
+ {`299 - "no-terminating-quote`, ""},
+ {"299 - \"\x01 control\"", ""},
+ {"299 - \"\\\x01 escaped control\"", ""},
+ {"299 - \"e\\scaped\"", "escaped"},
+ {"299 - \"non-UTF8 \xA1\xA2\"", "non-UTF8 \xA1\xA2"},
+ {"299 - \"non-UTF8 escaped \\\xA1\\\xA2\"", "non-UTF8 escaped \xA1\xA2"},
+ {"299 - \"UTF8 žluťoučký\"", "UTF8 žluťoučký"},
+ {"299 - \"UTF8 \\\xC5\\\xBEluťoučký\"", "UTF8 žluťoučký"},
+ {`299 - "unterminated`, ""},
+ {`299 - "warning" "some-date"`, ""},
+ } {
+ res := parseRegistryWarningHeader(c.header)
+ assert.Equal(t, c.expected, res, c.header)
+ }
+}
+
+func TestIsManifestUnknownError(t *testing.T) {
+ // Mostly a smoke test; we can add more registries here if they need special handling.
+
+ for _, c := range []struct{ name, response string }{
+ {
+ name: "docker.io when a tag in an _existing repo_ is not found",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 109\r\n" +
+ "Content-Type: application/json\r\n" +
+ "Date: Thu, 12 Aug 2021 20:51:32 GMT\r\n" +
+ "Docker-Distribution-Api-Version: registry/2.0\r\n" +
+ "Ratelimit-Limit: 100;w=21600\r\n" +
+ "Ratelimit-Remaining: 100;w=21600\r\n" +
+ "Strict-Transport-Security: max-age=31536000\r\n" +
+ "\r\n" +
+ "{\"errors\":[{\"code\":\"MANIFEST_UNKNOWN\",\"message\":\"manifest unknown\",\"detail\":{\"Tag\":\"this-does-not-exist\"}}]}\n",
+ },
+ {
+ name: "registry.redhat.io/v2/this-does-not-exist/manifests/latest",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 53\r\n" +
+ "Cache-Control: max-age=0, no-cache, no-store\r\n" +
+ "Content-Type: application/json\r\n" +
+ "Date: Thu, 13 Oct 2022 18:15:15 GMT\r\n" +
+ "Expires: Thu, 13 Oct 2022 18:15:15 GMT\r\n" +
+ "Pragma: no-cache\r\n" +
+ "Server: Apache\r\n" +
+ "Strict-Transport-Security: max-age=63072000; includeSubdomains; preload\r\n" +
+ "X-Hostname: crane-tbr06.cran-001.prod.iad2.dc.redhat.com\r\n" +
+ "\r\n" +
+ "{\"errors\": [{\"code\": \"404\", \"message\": \"Not Found\"}]}\r\n",
+ },
+ {
+ name: "registry.redhat.io/v2/rhosp15-rhel8/openstack-cron/manifests/sha256-8df5e60c42668706ac108b59c559b9187fa2de7e4e262e2967e3e9da35d5a8d7.sig",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 10\r\n" +
+ "Accept-Ranges: bytes\r\n" +
+ "Date: Thu, 13 Oct 2022 18:13:53 GMT\r\n" +
+ "Server: AkamaiNetStorage\r\n" +
+ "X-Docker-Size: -1\r\n" +
+ "\r\n" +
+ "Not found\r\n",
+ },
+ } {
+ resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader([]byte(c.response))), nil)
+ require.NoError(t, err, c.name)
+ defer resp.Body.Close()
+ err = fmt.Errorf("wrapped: %w", registryHTTPResponseToError(resp))
+
+ res := isManifestUnknownError(err)
+ assert.True(t, res, "%#v", err, c.name)
+ }
+}
diff --git a/docker/docker_image.go b/docker/docker_image.go
new file mode 100644
index 0000000..9316048
--- /dev/null
+++ b/docker/docker_image.go
@@ -0,0 +1,166 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
+// which are specific to Docker.
+type Image struct {
+ types.ImageCloser
+ src *dockerImageSource
+}
+
+// newImage returns a new Image interface type after setting up
+// a client to the registry hosting the given image.
+// The caller must call .Close() on the returned Image.
+func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
+ s, err := newImageSource(ctx, sys, ref)
+ if err != nil {
+ return nil, err
+ }
+ img, err := image.FromSource(ctx, sys, s)
+ if err != nil {
+ return nil, err
+ }
+ return &Image{ImageCloser: img, src: s}, nil
+}
+
+// SourceRefFullName returns a fully expanded name for the repository this image is in.
+func (i *Image) SourceRefFullName() string {
+ return i.src.logicalRef.ref.Name()
+}
+
+// GetRepositoryTags list all tags available in the repository. The tag
+// provided inside the ImageReference will be ignored. (This is a
+// backward-compatible shim method which calls the module-level
+// GetRepositoryTags)
+func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
+ return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef)
+}
+
+// GetRepositoryTags list all tags available in the repository. The tag
+// provided inside the ImageReference will be ignored.
+func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) {
+ dr, ok := ref.(dockerReference)
+ if !ok {
+ return nil, errors.New("ref must be a dockerReference")
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+ path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create client: %w", err)
+ }
+ defer client.Close()
+
+ tags := make([]string, 0)
+
+ for {
+ res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res))
+ }
+
+ var tagsHolder struct {
+ Tags []string
+ }
+ if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
+ return nil, err
+ }
+ tags = append(tags, tagsHolder.Tags...)
+
+ link := res.Header.Get("Link")
+ if link == "" {
+ break
+ }
+
+ linkURLPart, _, _ := strings.Cut(link, ";")
+ linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
+ if err != nil {
+ return tags, err
+ }
+
+ // can be relative or absolute, but we only want the path (and I
+ // guess we're in trouble if it forwards to a new place...)
+ path = linkURL.Path
+ if linkURL.RawQuery != "" {
+ path += "?"
+ path += linkURL.RawQuery
+ }
+ }
+ return tags, nil
+}
+
+// GetDigest returns the image's digest
+// Use this to optimize and avoid use of an ImageSource based on the returned digest;
+// if you are going to use an ImageSource anyway, it’s more efficient to create it first
+// and compute the digest from the value returned by GetManifest.
+// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be
+// ignored (but may be implemented in the future)
+func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
+ dr, ok := ref.(dockerReference)
+ if !ok {
+ return "", errors.New("ref must be a dockerReference")
+ }
+ if dr.isUnknownDigest {
+ return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport())
+ }
+
+ tagOrDigest, err := dr.tagOrDigest()
+ if err != nil {
+ return "", err
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return "", err
+ }
+ client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
+ if err != nil {
+ return "", fmt.Errorf("failed to create client: %w", err)
+ }
+ defer client.Close()
+
+ path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+
+ res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return "", err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res))
+ }
+
+ dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
+ if err != nil {
+ return "", err
+ }
+
+ return dig, nil
+}
diff --git a/docker/docker_image_dest.go b/docker/docker_image_dest.go
new file mode 100644
index 0000000..a9a36f0
--- /dev/null
+++ b/docker/docker_image_dest.go
@@ -0,0 +1,919 @@
+package docker
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/streamdigest"
+ "github.com/containers/image/v5/internal/uploadreader"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+type dockerImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+
+ ref dockerReference
+ c *dockerClient
+ // State
+ manifestDigest digest.Digest // or "" if not yet known.
+}
+
+// newImageDestination creates a new ImageDestination for the specified image reference.
+func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) {
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push")
+ if err != nil {
+ return nil, err
+ }
+ mimeTypes := []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageIndex,
+ manifest.DockerV2ListMediaType,
+ }
+ if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes {
+ mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
+ }
+
+ dest := &dockerImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: mimeTypes,
+ DesiredLayerCompression: types.Compress,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ c: c,
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *dockerImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *dockerImageDestination) Close() error {
+ return d.c.Close()
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
+ if err := d.c.detectProperties(ctx); err != nil {
+ return err
+ }
+ switch {
+ case d.c.supportsSignatures:
+ return nil
+ case d.c.signatureBase != nil:
+ return nil
+ default:
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// sizeCounter is an io.Writer which only counts the total size of its input.
+type sizeCounter struct{ size int64 }
+
+func (c *sizeCounter) Write(p []byte) (n int, err error) {
+ c.size += int64(len(p))
+ return len(p), nil
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
+ // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
+ // the source blob is uncompressed, and the destination blob is being compressed "on the fly".
+ if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
+ logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer cleanup()
+ stream = streamCopy
+ }
+
+ if inputInfo.Digest != "" {
+ // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
+ // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ if haveBlob {
+ return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
+ }
+ }
+
+ // FIXME? Chunked upload, progress reporting, etc.
+ uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
+ logrus.Debugf("Uploading %s", uploadPath)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusAccepted {
+ logrus.Debugf("Error initiating layer upload, response %#v", *res)
+ return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
+ }
+
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
+ sizeCounter := &sizeCounter{}
+ stream = io.TeeReader(stream, sizeCounter)
+
+ uploadLocation, err = func() (*url.URL, error) { // A scope for defer
+ uploadReader := uploadreader.NewUploadReader(stream)
+ // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
+ // returns, so there isn’t a way for the error text to be provided to any of our callers.
+ defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ if err != nil {
+ logrus.Debugf("Error uploading layer chunked %v", err)
+ return nil, err
+ }
+ defer res.Body.Close()
+ if !successStatus(res.StatusCode) {
+ return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res))
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return nil, fmt.Errorf("determining upload URL: %w", err)
+ }
+ return uploadLocation, nil
+ }()
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ blobDigest := digester.Digest()
+
+ // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
+
+ locationQuery := uploadLocation.Query()
+ locationQuery.Set("digest", blobDigest.String())
+ uploadLocation.RawQuery = locationQuery.Encode()
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ logrus.Debugf("Error uploading layer, response %#v", *res)
+ return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
+ }
+
+ logrus.Debugf("Upload of layer %s complete", blobDigest)
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
+ return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
+}
+
+// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
+// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
+ logrus.Debugf("Checking %s", checkPath)
+ res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
+ if err != nil {
+ return false, -1, err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusOK:
+ logrus.Debugf("... already exists")
+ return true, getBlobSize(res), nil
+ case http.StatusUnauthorized:
+ logrus.Debugf("... not authorized")
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
+ case http.StatusNotFound:
+ logrus.Debugf("... not present")
+ return false, -1, nil
+ default:
+ return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
+ }
+}
+
+// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
+ u := url.URL{
+ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
+ RawQuery: url.Values{
+ "mount": {srcDigest.String()},
+ "from": {reference.Path(srcRepo)},
+ }.Encode(),
+ }
+ logrus.Debugf("Trying to mount %s", u.Redacted())
+ res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusCreated:
+ logrus.Debugf("... mount OK")
+ return nil
+ case http.StatusAccepted:
+ // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
+ // Abort, and let the ultimate caller do an upload when its ready, instead.
+ // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return fmt.Errorf("determining upload URL after a mount attempt: %w", err)
+ }
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
+ if err != nil {
+ logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
+ } else {
+ defer res2.Body.Close()
+ if res2.StatusCode != http.StatusNoContent {
+ logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
+ }
+ }
+ // Anyway, if canceling the upload fails, ignore it and return the more important error:
+ return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ default:
+ logrus.Debugf("Error mounting, response %#v", *res)
+ return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res))
+ }
+}
+
+// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
+// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
+// The caller must ensure info.Digest is set.
+func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
+ }
+ return false, private.ReusedBlob{}, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if info.Digest == "" {
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
+ }
+
+ if impl.OriginalBlobMatchesRequiredCompression(options) {
+ // First, check whether the blob happens to already exist at the destination.
+ haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ if haveBlob {
+ return true, reusedInfo, nil
+ }
+ } else {
+ requiredCompression := "nil"
+ if options.OriginalCompression != nil {
+ requiredCompression = options.OriginalCompression.Name()
+ }
+ logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
+ }
+
+ // Then try reusing blobs from other locations.
+ candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
+ for _, candidate := range candidates {
+ var err error
+ compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
+ if err != nil {
+ logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
+ continue
+ }
+ var candidateRepo reference.Named
+ if !candidate.UnknownLocation {
+ candidateRepo, err = parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ }
+ if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
+ requiredCompression := "nil"
+ if compressionAlgorithm != nil {
+ requiredCompression = compressionAlgorithm.Name()
+ }
+ if !candidate.UnknownLocation {
+ logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
+ } else {
+ logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
+ }
+ continue
+ }
+ if !candidate.UnknownLocation {
+ if candidate.CompressorName != blobinfocache.Uncompressed {
+ logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
+ } else {
+ logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
+ }
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
+ // (the "from" parameter); in that case we might try to use these candidates as well.
+ //
+ // OTOH that would mean we can’t do the “blobExists” check, and if there is no match
+ // we could get an upload request that we would have to cancel.
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ } else {
+ if candidate.CompressorName != blobinfocache.Uncompressed {
+ logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
+ } else {
+ logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
+ }
+ // This digest is a known variant of this blob but we don’t
+ // have a recorded location in this registry, let’s try looking
+ // for it in the current repo.
+ candidateRepo = reference.TrimNamed(d.ref.ref)
+ }
+ if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ logrus.Debug("... Already tried the primary destination")
+ continue
+ }
+
+ // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
+
+ // Checking candidateRepo, and mounting from it, requires an
+ // expanded token scope.
+ extraScope := &authScope{
+ resourceType: "repository",
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
+ }
+ // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
+ // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
+ // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
+ // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
+ // Even worse, docker/distribution does not actually reasonably implement canceling uploads
+ // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
+ // so, be a nice client and don't create unnecessary upload sessions on the server.
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
+ if err != nil {
+ logrus.Debugf("... Failed: %v", err)
+ continue
+ }
+ if !exists {
+ // FIXME? Should we drop the blob from cache here (and elsewhere?)?
+ continue // logrus.Debug() already happened in blobExists
+ }
+ if candidateRepo.Name() != d.ref.ref.Name() {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
+ logrus.Debugf("... Mount failed: %v", err)
+ continue
+ }
+ }
+
+ options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+
+ return true, private.ReusedBlob{
+ Digest: candidate.Digest,
+ Size: size,
+ CompressionOperation: compressionOperation,
+ CompressionAlgorithm: compressionAlgorithm}, nil
+ }
+
+ return false, private.ReusedBlob{}, nil
+}
+
+// PutManifest writes manifest to the destination.
+// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list
+// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the
+// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ var refTail string
+ // If d.ref.isUnknownDigest=true, then we push without a tag, so get the
+ // digest that will be used
+ if d.ref.isUnknownDigest {
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ refTail = digest.String()
+ } else if instanceDigest != nil {
+ // If the instanceDigest is provided, then use it as the refTail, because the reference,
+ // whether it includes a tag or a digest, refers to the list as a whole, and not this
+ // particular instance.
+ refTail = instanceDigest.String()
+ // Double-check that the manifest we've been given matches the digest we've been given.
+ matches, err := manifest.MatchesDigest(m, *instanceDigest)
+ if err != nil {
+ return fmt.Errorf("digesting manifest in PutManifest: %w", err)
+ }
+ if !matches {
+ manifestDigest, merr := manifest.Digest(m)
+ if merr != nil {
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr)
+ }
+ return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
+ }
+ } else {
+ // Compute the digest of the main manifest, or the list if it's a list, so that we
+ // have a digest value to use if we're asked to save a signature for the manifest.
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.manifestDigest = digest
+ // The refTail should be either a digest (which we expect to match the value we just
+ // computed) or a tag name.
+ refTail, err = d.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ }
+
+ return d.uploadManifest(ctx, m, refTail)
+}
+
+// uploadManifest writes manifest to tagOrDigest.
+func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error {
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest)
+
+ headers := map[string][]string{}
+ mimeType := manifest.GuessMIMEType(m)
+ if mimeType != "" {
+ headers["Content-Type"] = []string{mimeType}
+ }
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if !successStatus(res.StatusCode) {
+ rawErr := registryHTTPResponseToError(res)
+ err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr)
+ if isManifestInvalidError(rawErr) {
+ err = types.ManifestTypeRejectedError{Err: err}
+ }
+ return err
+ }
+ // A HTTP server may not be a registry at all, and just return 200 OK to everything
+ // (in particular that can fairly easily happen after tearing down a website and
+ // replacing it with a global 302 redirect to a new website, completely ignoring the
+ // path in the request); in that case we could “succeed” uploading a whole image.
+ // With docker/distribution we could rely on a Docker-Content-Digest header being present
+ // (because docker/distribution/registry/client has been failing uploads if it was missing),
+ // but that has been defined as explicitly optional by
+ // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers
+ // So, just note the missing header in a debug log.
+ if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 {
+ logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry")
+ }
+ return nil
+}
+
+// successStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func successStatus(status int) bool {
+ return status >= 200 && status <= 399
+}
+
+// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid” error.
+func isManifestInvalidError(err error) bool {
+ var ec errcode.ErrorCoder
+ if ok := errors.As(err, &ec); !ok {
+ return false
+ }
+
+ switch ec.ErrorCode() {
+ // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
+ case v2.ErrorCodeManifestInvalid:
+ return true
+ // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
+ // when uploading to a tag (because it can’t find a matching tag inside the manifest)
+ case v2.ErrorCodeTagInvalid:
+ return true
+ // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
+ // uploading an OCI manifest that is (correctly, according to the spec) missing
+ // a top-level media type. See libpod issue #1719
+ // FIXME: remove this case when ECR behavior is fixed
+ case errcode.ErrorCodeUnsupported:
+ return strings.Contains(err.Error(), "Invalid JSON syntax")
+ default:
+ return false
+ }
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ if d.manifestDigest == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.New("Unknown manifest digest, can't add signatures")
+ }
+ instanceDigest = &d.manifestDigest
+ }
+
+ sigstoreSignatures := []signature.Sigstore{}
+ otherSignatures := []signature.Signature{}
+ for _, sig := range signatures {
+ if sigstoreSig, ok := sig.(signature.Sigstore); ok {
+ sigstoreSignatures = append(sigstoreSignatures, sigstoreSig)
+ } else {
+ otherSignatures = append(otherSignatures, sig)
+ }
+ }
+
+ // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside
+ // instead, but that would probably be rather surprising.
+ // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read?
+
+ if len(sigstoreSignatures) != 0 {
+ if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil {
+ return err
+ }
+ }
+
+ if len(otherSignatures) != 0 {
+ if err := d.c.detectProperties(ctx); err != nil {
+ return err
+ }
+ switch {
+ case d.c.supportsSignatures:
+ if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil {
+ return err
+ }
+ case d.c.signatureBase != nil:
+ if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+ }
+
+ return nil
+}
+
+// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
+// which is not nil, for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error {
+ // FIXME? This overwrites files one at a time, definitely not atomic.
+ // A failure when updating signatures with a reordered copy could lose some of them.
+
+ // Skip dealing with the manifest digest if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ for i, signature := range signatures {
+ sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
+ err := d.putOneSignature(sigURL, signature)
+ if err != nil {
+ return err
+ }
+ }
+ // Remove any other signatures, if present.
+ // We stop at the first missing signature; if a previous deleting loop aborted
+ // prematurely, this may not clean up all of them, but one missing signature
+ // is enough for dockerImageSource to stop looking for other signatures, so that
+ // is sufficient.
+ for i := len(signatures); ; i++ {
+ sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
+ missing, err := d.c.deleteOneSignature(sigURL)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+
+ return nil
+}
+
+// putOneSignature stores sig to sigURL.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error {
+ switch sigURL.Scheme {
+ case "file":
+ logrus.Debugf("Writing to %s", sigURL.Path)
+ err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755)
+ if err != nil {
+ return err
+ }
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile(sigURL.Path, blob, 0644)
+ if err != nil {
+ return err
+ }
+ return nil
+
+ case "http", "https":
+ return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
+ default:
+ return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted())
+ }
+}
+
+func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error {
+ if !d.c.useSigstoreAttachments {
+ return errors.New("writing sigstore attachments is disabled by configuration")
+ }
+
+ ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
+ if err != nil {
+ return err
+ }
+ var ociConfig imgspecv1.Image // Most fields empty by default
+ if ociManifest == nil {
+ ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Digest: "", // We will fill this in later.
+ Size: 0,
+ }, nil)
+ ociConfig.RootFS.Type = "layers"
+ } else {
+ logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize,
+ none.NoCache)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(configBlob, &ociConfig); err != nil {
+ return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(),
+ d.ref.ref.Name(), err)
+ }
+ }
+
+ // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
+ ociManifest.Layers = slices.Clone(ociManifest.Layers)
+ // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
+ // the slice in the original object (or in a newly allocated object).
+ for _, sig := range signatures {
+ mimeType := sig.UntrustedMIMEType()
+ payloadBlob := sig.UntrustedPayload()
+ annotations := sig.UntrustedAnnotations()
+
+ alreadyOnRegistry := false
+ for _, layer := range ociManifest.Layers {
+ if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) {
+ logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String())
+ alreadyOnRegistry = true
+ break
+ }
+ }
+ if alreadyOnRegistry {
+ continue
+ }
+
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: false,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
+ }
+ sigDesc.Annotations = annotations
+ ociManifest.Layers = append(ociManifest.Layers, sigDesc)
+ ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest)
+ logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String())
+ }
+
+ configBlob, err := json.Marshal(ociConfig)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Uploading updated sigstore attachment config")
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
+ configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{
+ Cache: none.NoCache,
+ IsConfig: true,
+ EmptyLayer: false,
+ LayerIndex: nil,
+ })
+ if err != nil {
+ return err
+ }
+ ociManifest.Config = configDesc
+
+ manifestBlob, err := ociManifest.Serialize()
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Uploading sigstore attachment manifest")
+ return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
+}
+
+func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
+ payloadBlob []byte, annotations map[string]string) bool {
+ if layer.MediaType != mimeType ||
+ layer.Size != int64(len(payloadBlob)) ||
+ // This is not quite correct, we should use the layer’s digest algorithm.
+ // But right now we don’t want to deal with corner cases like bad digest formats
+ // or unavailable algorithms; in the worst case we end up with duplicate signature
+ // entries.
+ layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
+ !maps.Equal(layer.Annotations, annotations) {
+ return false
+ }
+ return true
+}
+
+// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
+// OCI descriptor.
+func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
+ blobDigest := digest.FromBytes(contents)
+ info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
+ types.BlobInfo{
+ Digest: blobDigest,
+ Size: int64(len(contents)),
+ MediaType: mimeType,
+ }, options)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err)
+ }
+ return imgspecv1.Descriptor{
+ MediaType: mimeType,
+ Digest: info.Digest,
+ Size: info.Size,
+ }, nil
+}
+
+// deleteOneSignature deletes a signature from sigURL, if it exists.
+// If it successfully determines that the signature does not exist, returns (true, nil)
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) {
+ switch sigURL.Scheme {
+ case "file":
+ logrus.Debugf("Deleting %s", sigURL.Path)
+ err := os.Remove(sigURL.Path)
+ if err != nil && os.IsNotExist(err) {
+ return true, nil
+ }
+ return false, err
+
+ case "http", "https":
+ return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
+ default:
+ return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted())
+ }
+}
+
+// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
+// for a manifest with manifestDigest.
+func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error {
+ // Skip dealing with the manifest digest, or reading the old state, if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures,
+ // but the X-Registry-Supports-Signatures API extension does not support that yet.
+
+ existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest)
+ if err != nil {
+ return err
+ }
+ existingSigNames := set.New[string]()
+ for _, sig := range existingSignatures.Signatures {
+ existingSigNames.Add(sig.Name)
+ }
+
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
+ return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
+ }) {
+ continue
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
+ if !existingSigNames.Contains(signatureName) {
+ break
+ }
+ }
+ sig := extensionSignature{
+ Version: extensionSignatureSchemaVersion,
+ Name: signatureName,
+ Type: extensionSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
+ return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res))
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
+ return nil
+}
diff --git a/docker/docker_image_dest_test.go b/docker/docker_image_dest_test.go
new file mode 100644
index 0000000..8c196e8
--- /dev/null
+++ b/docker/docker_image_dest_test.go
@@ -0,0 +1,36 @@
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "net/http"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageDestination = (*dockerImageDestination)(nil)
+
+func TestIsManifestInvalidError(t *testing.T) {
+ // Sadly only a smoke test; this really should record all known errors exactly as they happen.
+
+ // docker/distribution 2.1.1 when uploading to a tag (because it can’t find a matching tag
+ // inside the manifest)
+ response := "HTTP/1.1 400 Bad Request\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 79\r\n" +
+ "Content-Type: application/json; charset=utf-8\r\n" +
+ "Date: Sat, 14 Aug 2021 19:27:29 GMT\r\n" +
+ "Docker-Distribution-Api-Version: registry/2.0\r\n" +
+ "\r\n" +
+ "{\"errors\":[{\"code\":\"TAG_INVALID\",\"message\":\"manifest tag did not match URI\"}]}\n"
+ resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader([]byte(response))), nil)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ err = registryHTTPResponseToError(resp)
+
+ res := isManifestInvalidError(err)
+ assert.True(t, res, "%#v", err)
+}
diff --git a/docker/docker_image_src.go b/docker/docker_image_src.go
new file mode 100644
index 0000000..f9d4d60
--- /dev/null
+++ b/docker/docker_image_src.go
@@ -0,0 +1,798 @@
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server,
+// even if it were broken or malicious and it continued serving an enormous number of items.
+const maxLookasideSignatures = 128
+
+type dockerImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.ImplementsGetBlobAt
+
+ logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest
+ physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest
+ c *dockerClient
+ // State
+ cachedManifest []byte // nil if not loaded yet
+ cachedManifestMIMEType string // Only valid if cachedManifest != nil
+}
+
+// newImageSource creates a new ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+// The caller must ensure !ref.isUnknownDigest.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ if ref.isUnknownDigest {
+ return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport())
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+ registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
+ if err != nil {
+ return nil, fmt.Errorf("loading registries configuration: %w", err)
+ }
+ if registry == nil {
+ // No configuration was found for the provided reference, so use the
+ // equivalent of a default configuration.
+ registry = &sysregistriesv2.Registry{
+ Endpoint: sysregistriesv2.Endpoint{
+ Location: ref.ref.String(),
+ },
+ Prefix: ref.ref.String(),
+ }
+ }
+
+ // Check all endpoints for the manifest availability. If we find one that does
+ // contain the image, it will be used for all future pull actions. Always try the
+ // non-mirror original location last; this both transparently handles the case
+ // of no mirrors configured, and ensures we return the error encountered when
+ // accessing the upstream location if all endpoints fail.
+ pullSources, err := registry.PullSourcesFromReference(ref.ref)
+ if err != nil {
+ return nil, err
+ }
+ type attempt struct {
+ ref reference.Named
+ err error
+ }
+ attempts := []attempt{}
+ for _, pullSource := range pullSources {
+ if sys != nil && sys.DockerLogMirrorChoice {
+ logrus.Infof("Trying to access %q", pullSource.Reference)
+ } else {
+ logrus.Debugf("Trying to access %q", pullSource.Reference)
+ }
+ s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig)
+ if err == nil {
+ return s, nil
+ }
+ logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err)
+ attempts = append(attempts, attempt{
+ ref: pullSource.Reference,
+ err: err,
+ })
+ }
+ switch len(attempts) {
+ case 0:
+ return nil, errors.New("Internal error: newImageSource returned without trying any endpoint")
+ case 1:
+ return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise.
+ default:
+ // Don’t just build a string, try to preserve the typed error.
+ primary := &attempts[len(attempts)-1]
+ extras := []string{}
+ for i := 0; i < len(attempts)-1; i++ {
+ // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
+ // The paired [] at least have some chance of being unambiguous.
+ extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
+ }
+ return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
+ }
+}
+
+// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
+// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
+ registryConfig *registryConfiguration) (*dockerImageSource, error) {
+ physicalRef, err := newReference(pullSource.Reference, false)
+ if err != nil {
+ return nil, err
+ }
+
+ endpointSys := sys
+ // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
+ if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) {
+ copy := *endpointSys
+ copy.DockerAuthConfig = nil
+ copy.DockerBearerRegistryToken = ""
+ endpointSys = &copy
+ }
+
+ client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull")
+ if err != nil {
+ return nil, err
+ }
+ client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
+
+ s := &dockerImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+
+ logicalRef: logicalRef,
+ physicalRef: physicalRef,
+ c: client,
+ }
+ s.Compat = impl.AddCompat(s)
+
+ if err := s.ensureManifestIsLoaded(ctx); err != nil {
+ client.Close()
+ return nil, err
+ }
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *dockerImageSource) Reference() types.ImageReference {
+ return s.logicalRef
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *dockerImageSource) Close() error {
+ return s.c.Close()
+}
+
+// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
+// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
+func simplifyContentType(contentType string) string {
+ if contentType == "" {
+ return contentType
+ }
+ mimeType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return ""
+ }
+ return mimeType
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return s.fetchManifest(ctx, instanceDigest.String())
+ }
+ err := s.ensureManifestIsLoaded(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ return s.cachedManifest, s.cachedManifestMIMEType, nil
+}
+
+func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
+ return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
+}
+
+// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
+//
+// ImageSource implementations are not required or expected to do any caching,
+// but because our signatures are “attached” to the manifest digest,
+// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil)
+// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious
+// signature verification failures when pulling while a tag is being updated.
+func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
+ if s.cachedManifest != nil {
+ return nil
+ }
+
+ reference, err := s.physicalRef.tagOrDigest()
+ if err != nil {
+ return err
+ }
+
+ manblob, mt, err := s.fetchManifest(ctx, reference)
+ if err != nil {
+ return err
+ }
+ // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
+ s.cachedManifest = manblob
+ s.cachedManifestMIMEType = mt
+ return nil
+}
+
+// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
+func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
+ defer close(streams)
+ defer close(errs)
+ currentOffset := uint64(0)
+
+ body = makeBufferedNetworkReader(body, 64, 16384)
+ defer body.Close()
+ for _, c := range chunks {
+ if c.Offset != currentOffset {
+ if c.Offset < currentOffset {
+ errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset)
+ break
+ }
+ toSkip := c.Offset - currentOffset
+ if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
+ errs <- err
+ break
+ }
+ currentOffset += toSkip
+ }
+ s := signalCloseReader{
+ closed: make(chan struct{}),
+ stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
+ consumeStream: true,
+ }
+ streams <- s
+
+ // Wait until the stream is closed before going to the next chunk
+ <-s.closed
+ currentOffset += c.Length
+ }
+}
+
+// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
+func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
+ defer close(streams)
+ defer close(errs)
+ if !strings.HasPrefix(mediaType, "multipart/") {
+ streams <- body
+ return
+ }
+ boundary, found := params["boundary"]
+ if !found {
+ errs <- errors.New("could not find boundary")
+ body.Close()
+ return
+ }
+ buffered := makeBufferedNetworkReader(body, 64, 16384)
+ defer buffered.Close()
+ mr := multipart.NewReader(buffered, boundary)
+ parts := 0
+ for {
+ p, err := mr.NextPart()
+ if err != nil {
+ if err != io.EOF {
+ errs <- err
+ }
+ if parts != len(chunks) {
+ errs <- errors.New("invalid number of chunks returned by the server")
+ }
+ return
+ }
+ s := signalCloseReader{
+ closed: make(chan struct{}),
+ stream: p,
+ }
+ streams <- s
+ // NextPart() cannot be called while the current part
+ // is being read, so wait until it is closed
+ <-s.closed
+ parts++
+ }
+}
+
+var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
+
+func parseMediaType(contentType string) (string, map[string]string, error) {
+ mediaType, params, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ if err == mime.ErrInvalidMediaParameter {
+ // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary
+ // param, let's handle it here.
+ matches := multipartByteRangesRe.FindStringSubmatch(contentType)
+ if len(matches) == 2 {
+ mediaType = "multipart/byteranges"
+ params = map[string]string{
+ "boundary": matches[1],
+ }
+ err = nil
+ }
+ }
+ if err != nil {
+ return "", nil, err
+ }
+ }
+ return mediaType, params, err
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ headers := make(map[string][]string)
+
+ rangeVals := make([]string, 0, len(chunks))
+ for _, c := range chunks {
+ rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
+ }
+
+ headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
+
+ if len(info.URLs) != 0 {
+ return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch res.StatusCode {
+ case http.StatusOK:
+ // if the server replied with a 200 status code, convert the full body response to a series of
+ // streams as it would have been done with 206.
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
+ return streams, errs, nil
+ case http.StatusPartialContent:
+ mediaType, params, err := parseMediaType(res.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+
+ go handle206Response(streams, errs, res.Body, chunks, mediaType, params)
+ return streams, errs, nil
+ case http.StatusBadRequest:
+ res.Body.Close()
+ return nil, nil, private.BadPartialRequestError{Status: res.Status}
+ default:
+ err := registryHTTPResponseToError(res)
+ res.Body.Close()
+ return nil, nil, fmt.Errorf("fetching partial blob: %w", err)
+ }
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.c.getBlob(ctx, s.physicalRef, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ if err := s.c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+ var res []signature.Signature
+ switch {
+ case s.c.supportsSignatures:
+ sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
+ case s.c.signatureBase != nil:
+ sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigs...)
+ default:
+ return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
+ }
+
+ sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, sigstoreSigs...)
+ return res, nil
+}
+
+// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
+// or finally, from a fetched manifest.
+func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) {
+ if instanceDigest != nil {
+ return *instanceDigest, nil
+ }
+ if digested, ok := s.physicalRef.ref.(reference.Digested); ok {
+ d := digested.Digest()
+ if d.Algorithm() == digest.Canonical {
+ return d, nil
+ }
+ }
+ if err := s.ensureManifestIsLoaded(ctx); err != nil {
+ return "", err
+ }
+ return manifest.Digest(s.cachedManifest)
+}
+
+// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
+// which is not nil.
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ signatures := []signature.Signature{}
+ for i := 0; ; i++ {
+ if i >= maxLookasideSignatures {
+ return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
+ }
+
+ sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
+ signature, missing, err := s.getOneSignature(ctx, sigURL)
+ if err != nil {
+ return nil, err
+ }
+ if missing {
+ break
+ }
+ signatures = append(signatures, signature)
+ }
+ return signatures, nil
+}
+
+// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
+// If it successfully determines that the signature does not exist, returns (nil, true, nil).
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) {
+ switch sigURL.Scheme {
+ case "file":
+ logrus.Debugf("Reading %s", sigURL.Path)
+ sigBlob, err := os.ReadFile(sigURL.Path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, true, nil
+ }
+ return nil, false, err
+ }
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err)
+ }
+ return sig, false, nil
+
+ case "http", "https":
+ logrus.Debugf("GET %s", sigURL.Redacted())
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil)
+ if err != nil {
+ return nil, false, err
+ }
+ res, err := s.c.client.Do(req)
+ if err != nil {
+ return nil, false, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ logrus.Debugf("... got status 404, as expected = end of signatures")
+ return nil, true, nil
+ } else if res.StatusCode != http.StatusOK {
+ return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
+ }
+
+ contentType := res.Header.Get("Content-Type")
+ if mimeType := simplifyContentType(contentType); mimeType == "text/html" {
+ logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType)
+ // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type.
+ // If the content really is HTML, it’s going to fail in signature.FromBlob.
+ }
+
+ sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
+ if err != nil {
+ return nil, false, err
+ }
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err)
+ }
+ return sig, false, nil
+
+ default:
+ return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted())
+ }
+}
+
+// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ var sigs []signature.Signature
+ for _, sig := range parsedBody.Signatures {
+ if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return sigs, nil
+}
+
+func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ if !s.c.useSigstoreAttachments {
+ logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
+ return nil, nil
+ }
+
+ manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+ if ociManifest == nil {
+ return nil, nil
+ }
+
+ logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
+ res := []signature.Signature{}
+ for layerIndex, layer := range ociManifest.Layers {
+ // Note that this copies all kinds of attachments: attestations, and whatever else is there,
+ // not just signatures. We leave the signature consumers to decide based on the MIME type.
+ logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String())
+ // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads.
+ // That might eventually need to change if payloads grow to be not just signatures, but something
+ // significantly large.
+ payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
+ none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
+ }
+ return res, nil
+}
+
+// deleteImage deletes the named image from the registry, if supported.
+func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
+ if ref.isUnknownDigest {
+ return fmt.Errorf("Docker reference without a tag or digest cannot be deleted")
+ }
+
+ registryConfig, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return err
+ }
+ // docker/distribution does not document what action should be used for deleting images.
+ //
+ // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
+ // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included.
+ // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
+ //
+ // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
+ c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*")
+ if err != nil {
+ return err
+ }
+ defer c.Close()
+
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+ refTail, err := ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
+ get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
+ if err != nil {
+ return err
+ }
+ defer get.Body.Close()
+ switch get.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound:
+ return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ default:
+ return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get))
+ }
+ manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
+ if err != nil {
+ return err
+ }
+
+ manifestDigest, err := manifest.Digest(manifestBody)
+ if err != nil {
+ return fmt.Errorf("computing manifest digest: %w", err)
+ }
+ deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest)
+
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
+ if err != nil {
+ return err
+ }
+ defer delete.Body.Close()
+ if delete.StatusCode != http.StatusAccepted {
+ return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete))
+ }
+
+ for i := 0; ; i++ {
+ sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i)
+ missing, err := c.deleteOneSignature(sigURL)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+
+ return nil
+}
+
+type bufferedNetworkReaderBuffer struct {
+ data []byte
+ len int
+ consumed int
+ err error
+}
+
+type bufferedNetworkReader struct {
+ stream io.ReadCloser
+ emptyBuffer chan *bufferedNetworkReaderBuffer
+ readyBuffer chan *bufferedNetworkReaderBuffer
+ terminate chan bool
+ current *bufferedNetworkReaderBuffer
+ mutex sync.Mutex
+ gotEOF bool
+}
+
+// handleBufferedNetworkReader runs in a goroutine
+func handleBufferedNetworkReader(br *bufferedNetworkReader) {
+ defer close(br.readyBuffer)
+ for {
+ select {
+ case b := <-br.emptyBuffer:
+ b.len, b.err = br.stream.Read(b.data)
+ br.readyBuffer <- b
+ if b.err != nil {
+ return
+ }
+ case <-br.terminate:
+ return
+ }
+ }
+}
+
+func (n *bufferedNetworkReader) Close() error {
+ close(n.terminate)
+ close(n.emptyBuffer)
+ return n.stream.Close()
+}
+
+func (n *bufferedNetworkReader) read(p []byte) (int, error) {
+ if n.current != nil {
+ copied := copy(p, n.current.data[n.current.consumed:n.current.len])
+ n.current.consumed += copied
+ if n.current.consumed == n.current.len {
+ n.emptyBuffer <- n.current
+ n.current = nil
+ }
+ if copied > 0 {
+ return copied, nil
+ }
+ }
+ if n.gotEOF {
+ return 0, io.EOF
+ }
+
+ var b *bufferedNetworkReaderBuffer
+
+ select {
+ case b = <-n.readyBuffer:
+ if b.err != nil {
+ if b.err != io.EOF {
+ return b.len, b.err
+ }
+ n.gotEOF = true
+ }
+ b.consumed = 0
+ n.current = b
+ return n.read(p)
+ case <-n.terminate:
+ return 0, io.EOF
+ }
+}
+
+func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
+ n.mutex.Lock()
+ defer n.mutex.Unlock()
+
+ return n.read(p)
+}
+
+func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader {
+ br := bufferedNetworkReader{
+ stream: stream,
+ emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
+ readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
+ terminate: make(chan bool),
+ }
+
+ go func() {
+ handleBufferedNetworkReader(&br)
+ }()
+
+ for i := uint(0); i < nBuffers; i++ {
+ b := bufferedNetworkReaderBuffer{
+ data: make([]byte, bufferSize),
+ }
+ br.emptyBuffer <- &b
+ }
+
+ return &br
+}
+
+type signalCloseReader struct {
+ closed chan struct{}
+ stream io.ReadCloser
+ consumeStream bool
+}
+
+func (s signalCloseReader) Read(p []byte) (int, error) {
+ return s.stream.Read(p)
+}
+
+func (s signalCloseReader) Close() error {
+ defer close(s.closed)
+ if s.consumeStream {
+ if _, err := io.Copy(io.Discard, s.stream); err != nil {
+ s.stream.Close()
+ return err
+ }
+ }
+ return s.stream.Close()
+}
diff --git a/docker/docker_image_src_test.go b/docker/docker_image_src_test.go
new file mode 100644
index 0000000..585d3dd
--- /dev/null
+++ b/docker/docker_image_src_test.go
@@ -0,0 +1,213 @@
+package docker
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageSource = (*dockerImageSource)(nil)
+
+func TestDockerImageSourceReference(t *testing.T) {
+ manifestPathRegex := regexp.MustCompile("^/v2/.*/manifests/latest$")
+
+ server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.Method == http.MethodGet && r.URL.Path == "/v2/":
+ rw.WriteHeader(http.StatusOK)
+ case r.Method == http.MethodGet && manifestPathRegex.MatchString(r.URL.Path):
+ rw.WriteHeader(http.StatusOK)
+ // Empty body is good enough for this test
+ default:
+ require.FailNowf(t, "Unexpected request", "%v %v", r.Method, r.URL.Path)
+ }
+ }))
+ defer server.Close()
+ registryURL, err := url.Parse(server.URL)
+ require.NoError(t, err)
+ registry := registryURL.Host
+
+ mirrorConfiguration := strings.ReplaceAll(
+ `[[registry]]
+prefix = "primary-override.example.com"
+location = "@REGISTRY@/primary-override"
+
+[[registry]]
+location = "with-mirror.example.com"
+
+[[registry.mirror]]
+location = "@REGISTRY@/with-mirror"
+`, "@REGISTRY@", registry)
+ registriesConf, err := os.CreateTemp("", "docker-image-src")
+ require.NoError(t, err)
+ defer registriesConf.Close()
+ defer os.Remove(registriesConf.Name())
+ err = os.WriteFile(registriesConf.Name(), []byte(mirrorConfiguration), 0600)
+ require.NoError(t, err)
+
+ for _, c := range []struct{ input, physical string }{
+ {registry + "/no-redirection/busybox:latest", registry + "/no-redirection/busybox:latest"},
+ {"primary-override.example.com/busybox:latest", registry + "/primary-override/busybox:latest"},
+ {"with-mirror.example.com/busybox:latest", registry + "/with-mirror/busybox:latest"},
+ } {
+ ref, err := ParseReference("//" + c.input)
+ require.NoError(t, err, c.input)
+ src, err := ref.NewImageSource(context.Background(), &types.SystemContext{
+ RegistriesDirPath: "/this/does/not/exist",
+ DockerPerHostCertDirPath: "/this/does/not/exist",
+ SystemRegistriesConfPath: registriesConf.Name(),
+ DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
+ })
+ require.NoError(t, err, c.input)
+ defer src.Close()
+
+ // The observable behavior
+ assert.Equal(t, "//"+c.input, src.Reference().StringWithinTransport(), c.input)
+ assert.Equal(t, ref.StringWithinTransport(), src.Reference().StringWithinTransport(), c.input)
+ // Also peek into internal state
+ src2, ok := src.(*dockerImageSource)
+ require.True(t, ok, c.input)
+ assert.Equal(t, "//"+c.input, src2.logicalRef.StringWithinTransport(), c.input)
+ assert.Equal(t, "//"+c.physical, src2.physicalRef.StringWithinTransport(), c.input)
+ }
+}
+
+func TestSimplifyContentType(t *testing.T) {
+ for _, c := range []struct{ input, expected string }{
+ {"", ""},
+ {"application/json", "application/json"},
+ {"application/json;charset=utf-8", "application/json"},
+ {"application/json; charset=utf-8", "application/json"},
+ {"application/json ; charset=utf-8", "application/json"},
+ {"application/json\t;\tcharset=utf-8", "application/json"},
+ {"application/json ;charset=utf-8", "application/json"},
+ {`application/json; charset="utf-8"`, "application/json"},
+ {"completely invalid", ""},
+ } {
+ out := simplifyContentType(c.input)
+ assert.Equal(t, c.expected, out, c.input)
+ }
+}
+
+func readNextStream(streams chan io.ReadCloser, errs chan error) ([]byte, error) {
+ select {
+ case r := <-streams:
+ if r == nil {
+ return nil, nil
+ }
+ defer r.Close()
+ return io.ReadAll(r)
+ case err := <-errs:
+ return nil, err
+ }
+}
+
+type verifyGetBlobAtData struct {
+ expectedData []byte
+ expectedError error
+}
+
+func verifyGetBlobAtOutput(t *testing.T, streams chan io.ReadCloser, errs chan error, expected []verifyGetBlobAtData) {
+ for _, c := range expected {
+ data, err := readNextStream(streams, errs)
+ assert.Equal(t, c.expectedData, data)
+ assert.Equal(t, c.expectedError, err)
+ }
+}
+
+func TestSplitHTTP200ResponseToPartial(t *testing.T) {
+ body := io.NopCloser(bytes.NewReader([]byte("123456789")))
+ defer body.Close()
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ chunks := []private.ImageSourceChunk{
+ {Offset: 1, Length: 2},
+ {Offset: 4, Length: 1},
+ }
+ go splitHTTP200ResponseToPartial(streams, errs, body, chunks)
+
+ expected := []verifyGetBlobAtData{
+ {[]byte("23"), nil},
+ {[]byte("5"), nil},
+ {[]byte(nil), nil},
+ }
+
+ verifyGetBlobAtOutput(t, streams, errs, expected)
+}
+
+func TestHandle206Response(t *testing.T) {
+ body := io.NopCloser(bytes.NewReader([]byte("--AAA\r\n\r\n23\r\n--AAA\r\n\r\n5\r\n--AAA--")))
+ defer body.Close()
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ chunks := []private.ImageSourceChunk{
+ {Offset: 1, Length: 2},
+ {Offset: 4, Length: 1},
+ }
+ mediaType := "multipart/form-data"
+ params := map[string]string{
+ "boundary": "AAA",
+ }
+ go handle206Response(streams, errs, body, chunks, mediaType, params)
+
+ expected := []verifyGetBlobAtData{
+ {[]byte("23"), nil},
+ {[]byte("5"), nil},
+ {[]byte(nil), nil},
+ }
+ verifyGetBlobAtOutput(t, streams, errs, expected)
+
+ body = io.NopCloser(bytes.NewReader([]byte("HELLO")))
+ defer body.Close()
+ streams = make(chan io.ReadCloser)
+ errs = make(chan error)
+ chunks = []private.ImageSourceChunk{{Offset: 100, Length: 5}}
+ mediaType = "text/plain"
+ params = map[string]string{}
+ go handle206Response(streams, errs, body, chunks, mediaType, params)
+
+ expected = []verifyGetBlobAtData{
+ {[]byte("HELLO"), nil},
+ {[]byte(nil), nil},
+ }
+ verifyGetBlobAtOutput(t, streams, errs, expected)
+}
+
+func TestParseMediaType(t *testing.T) {
+ mediaType, params, err := parseMediaType("multipart/byteranges; boundary=CloudFront:3F750DE0752BEDE3882F7DBE80010D31")
+ require.NoError(t, err)
+ assert.Equal(t, mediaType, "multipart/byteranges")
+ assert.Equal(t, params["boundary"], "CloudFront:3F750DE0752BEDE3882F7DBE80010D31")
+
+ mediaType, params, err = parseMediaType("multipart/byteranges; boundary=00000000000061573284")
+ require.NoError(t, err)
+ assert.Equal(t, mediaType, "multipart/byteranges")
+ assert.Equal(t, params["boundary"], "00000000000061573284")
+
+ mediaType, params, err = parseMediaType("multipart/byteranges; foo=bar; bar=baz")
+ require.NoError(t, err)
+ assert.Equal(t, mediaType, "multipart/byteranges")
+ assert.Equal(t, params["foo"], "bar")
+ assert.Equal(t, params["bar"], "baz")
+
+ // quoted symbols '@'
+ _, params, err = parseMediaType("multipart/byteranges; boundary=\"@:\"")
+ require.NoError(t, err)
+ assert.Equal(t, params["boundary"], "@:")
+
+ // unquoted '@'
+ _, _, err = parseMediaType("multipart/byteranges; boundary=@")
+ require.Error(t, err)
+}
diff --git a/docker/docker_transport.go b/docker/docker_transport.go
new file mode 100644
index 0000000..1c89302
--- /dev/null
+++ b/docker/docker_transport.go
@@ -0,0 +1,215 @@
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+// UnknownDigestSuffix can be appended to a reference when the caller
+// wants to push an image without a tag or digest.
+// NewReferenceUnknownDigest() is called when this const is detected.
+const UnknownDigestSuffix = "@@unknown-digest@@"
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for container registry-hosted images.
+var Transport = dockerTransport{}
+
+type dockerTransport struct{}
+
+func (t dockerTransport) Name() string {
+ return "docker"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // FIXME? We could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// dockerReference is an ImageReference for Docker images.
+type dockerReference struct {
+ ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true
+ isUnknownDigest bool
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if !strings.HasPrefix(refString, "//") {
+ return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
+ }
+ // Check if ref has UnknownDigestSuffix suffixed to it
+ unknownDigest := false
+ if strings.HasSuffix(refString, UnknownDigestSuffix) {
+ unknownDigest = true
+ refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
+ }
+ ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
+ if err != nil {
+ return nil, err
+ }
+
+ if unknownDigest {
+ if !reference.IsNameOnly(ref) {
+ return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix)
+ }
+ return NewReferenceUnknownDigest(ref)
+ }
+
+ ref = reference.TagNameOnly(ref)
+ return NewReference(ref)
+}
+
+// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
+func NewReference(ref reference.Named) (types.ImageReference, error) {
+ return newReference(ref, false)
+}
+
+// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting
+// a tag on the registry. The reference must satisfy reference.IsNameOnly()
+func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) {
+ return newReference(ref, true)
+}
+
+// newReference returns a dockerReference for a named reference.
+func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) {
+ if reference.IsNameOnly(ref) && !unknownDigest {
+ return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref))
+ }
+ if !reference.IsNameOnly(ref) && unknownDigest {
+ return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref))
+ }
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // The docker/distribution API does not really support that (we can’t ask for an image with a specific
+ // tag and digest), so fail. This MAY be accepted in the future.
+ // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
+ // the tag or the digest first?)
+ _, isTagged := ref.(reference.NamedTagged)
+ _, isDigested := ref.(reference.Canonical)
+ if isTagged && isDigested {
+ return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported")
+ }
+
+ return dockerReference{
+ ref: ref,
+ isUnknownDigest: unknownDigest,
+ }, nil
+}
+
+func (ref dockerReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref dockerReference) StringWithinTransport() string {
+ famString := "//" + reference.FamiliarString(ref.ref)
+ if ref.isUnknownDigest {
+ return famString + UnknownDigestSuffix
+ }
+ return famString
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref dockerReference) DockerReference() reference.Named {
+ return ref.ref
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref dockerReference) PolicyConfigurationIdentity() string {
+ if ref.isUnknownDigest {
+ return ref.ref.Name()
+ }
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref dockerReference) PolicyConfigurationNamespaces() []string {
+ namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref)
+ if ref.isUnknownDigest {
+ if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() {
+ namespaces = namespaces[1:]
+ }
+ }
+ return namespaces
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return newImage(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return deleteImage(ctx, sys, ref)
+}
+
+// tagOrDigest returns a tag or digest from the reference.
+func (ref dockerReference) tagOrDigest() (string, error) {
+ if ref, ok := ref.ref.(reference.Canonical); ok {
+ return ref.Digest().String(), nil
+ }
+ if ref, ok := ref.ref.(reference.NamedTagged); ok {
+ return ref.Tag(), nil
+ }
+
+ if ref.isUnknownDigest {
+ return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+ }
+ // This should not happen, NewReference above refuses reference.IsNameOnly values.
+ return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+}
diff --git a/docker/docker_transport_test.go b/docker/docker_transport_test.go
new file mode 100644
index 0000000..c7bc191
--- /dev/null
+++ b/docker/docker_transport_test.go
@@ -0,0 +1,289 @@
+package docker
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256digest = "@sha256:" + sha256digestHex
+ unknownDigestSuffixTest = "@@unknown-digest@@"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "docker", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testParseReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "docker.io/library/busybox" + sha256digest,
+ "docker.io/library/busybox:notlatest",
+ "docker.io/library/busybox",
+ "docker.io/library",
+ "docker.io",
+ "*.io",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+}
+
+func TestParseReference(t *testing.T) {
+ testParseReference(t, ParseReference)
+}
+
+// testParseReference is a test shared for Transport.ParseReference and ParseReference.
+func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ for _, c := range []struct {
+ input, expected string
+ expectedUnknownDigest bool
+ }{
+ {"busybox", "", false}, // Missing // prefix
+ {"//busybox:notlatest", "docker.io/library/busybox:notlatest", false}, // Explicit tag
+ {"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, false}, // Explicit digest
+ {"//busybox", "docker.io/library/busybox:latest", false}, // Default tag
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // The docker/distribution API does not really support that (we can’t ask for an image with a specific
+ // tag and digest), so fail. This MAY be accepted in the future.
+ {"//busybox:latest" + sha256digest, "", false}, // Both tag and digest
+ {"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest", false}, // All implied values explicitly specified
+ {"//UPPERCASEISINVALID", "", false}, // Invalid input
+ {"//busybox" + unknownDigestSuffixTest, "docker.io/library/busybox", true}, // UnknownDigest suffix
+ {"//example.com/ns/busybox" + unknownDigestSuffixTest, "example.com/ns/busybox", true}, // UnknownDigest with registry/repo
+ {"//example.com/ns/busybox:tag1" + unknownDigestSuffixTest, "", false}, // UnknownDigest with tag should fail
+ {"//example.com/ns/busybox" + sha256digest + unknownDigestSuffixTest, "", false}, // UnknownDigest with digest should fail
+ } {
+ ref, err := fn(c.input)
+ if c.expected == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ dockerRef, ok := ref.(dockerReference)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.expected, dockerRef.ref.String(), c.input)
+ assert.Equal(t, c.expectedUnknownDigest, dockerRef.isUnknownDigest)
+ }
+ }
+}
+
+// A common list of reference formats to test for the various ImageReference methods.
+var validReferenceTestCases = []struct {
+ input, dockerRef, stringWithinTransport string
+ expectedUnknownDigest bool
+}{
+ {"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest", false}, // Explicit tag
+ {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest, false}, // Explicit digest
+ {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest", false}, // All implied values explicitly specified
+ {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar", false}, // All values explicitly specified
+ {"example.com/ns/busybox" + unknownDigestSuffixTest, "example.com/ns/busybox", "//example.com/ns/busybox" + unknownDigestSuffixTest, true}, // UnknownDigest Suffix full name
+ {"busybox" + unknownDigestSuffixTest, "docker.io/library/busybox", "//busybox" + unknownDigestSuffixTest, true}, // UnknownDigest short name
+}
+
+func TestNewReference(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ if strings.HasSuffix(c.input, unknownDigestSuffixTest) {
+ continue
+ }
+ parsed, err := reference.ParseNormalizedNamed(c.input)
+ require.NoError(t, err)
+ ref, err := NewReference(parsed)
+ require.NoError(t, err, c.input)
+ dockerRef, ok := ref.(dockerReference)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input)
+ assert.Equal(t, false, dockerRef.isUnknownDigest)
+ }
+
+ // Neither a tag nor digest
+ parsed, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ _, err = NewReference(parsed)
+ assert.Error(t, err)
+
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ parsed, err = reference.ParseNormalizedNamed("busybox:notlatest" + sha256digest)
+ require.NoError(t, err)
+ _, ok := parsed.(reference.Canonical)
+ require.True(t, ok)
+ _, ok = parsed.(reference.NamedTagged)
+ require.True(t, ok)
+ _, err = NewReference(parsed)
+ assert.Error(t, err)
+}
+
+func TestNewReferenceUnknownDigest(t *testing.T) {
+ // References with tags and digests should be rejected
+ for _, c := range validReferenceTestCases {
+ if !strings.Contains(c.input, unknownDigestSuffixTest) {
+ parsed, err := reference.ParseNormalizedNamed(c.input)
+ require.NoError(t, err)
+ _, err = NewReferenceUnknownDigest(parsed)
+ assert.Error(t, err)
+ continue
+ }
+ in := strings.TrimSuffix(c.input, unknownDigestSuffixTest)
+ parsed, err := reference.ParseNormalizedNamed(in)
+ require.NoError(t, err)
+ ref, err := NewReferenceUnknownDigest(parsed)
+ require.NoError(t, err, c.input)
+ dockerRef, ok := ref.(dockerReference)
+ require.True(t, ok, c.input)
+ assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input)
+ assert.Equal(t, true, dockerRef.isUnknownDigest)
+ }
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, err := ParseReference("//busybox")
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference("//" + c.input)
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ for _, c := range validReferenceTestCases {
+ ref, err := ParseReference("//" + c.input)
+ require.NoError(t, err, c.input)
+ dockerRef := ref.DockerReference()
+ require.NotNil(t, dockerRef, c.input)
+ assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
+ }
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err := ParseReference("//busybox")
+ require.NoError(t, err)
+ assert.Equal(t, "docker.io/library/busybox:latest", ref.PolicyConfigurationIdentity())
+
+ ref, err = ParseReference("//busybox" + unknownDigestSuffixTest)
+ require.NoError(t, err)
+ assert.Equal(t, "docker.io/library/busybox", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err := ParseReference("//busybox")
+ require.NoError(t, err)
+ assert.Equal(t, []string{
+ "docker.io/library/busybox",
+ "docker.io/library",
+ "docker.io",
+ "*.io",
+ }, ref.PolicyConfigurationNamespaces())
+
+ ref, err = ParseReference("//busybox" + unknownDigestSuffixTest)
+ require.NoError(t, err)
+ assert.Equal(t, []string{
+ "docker.io/library",
+ "docker.io",
+ "*.io",
+ }, ref.PolicyConfigurationNamespaces())
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ sysCtx := &types.SystemContext{
+ RegistriesDirPath: "/this/does/not/exist",
+ DockerPerHostCertDirPath: "/this/does/not/exist",
+ ArchitectureChoice: "amd64",
+ OSChoice: "linux",
+ }
+ ref, err := ParseReference("//quay.io/libpod/busybox")
+ require.NoError(t, err)
+ img, err := ref.NewImage(context.Background(), sysCtx)
+ require.NoError(t, err)
+ defer img.Close()
+
+ // unknownDigest case should return error
+ ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest)
+ require.NoError(t, err)
+ _, err = ref.NewImage(context.Background(), sysCtx)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ sysCtx := &types.SystemContext{
+ RegistriesDirPath: "/this/does/not/exist",
+ DockerPerHostCertDirPath: "/this/does/not/exist",
+ }
+ ref, err := ParseReference("//quay.io/libpod/busybox")
+ require.NoError(t, err)
+ src, err := ref.NewImageSource(context.Background(), sysCtx)
+ require.NoError(t, err)
+ defer src.Close()
+
+ // unknownDigest case should return error
+ ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest)
+ require.NoError(t, err)
+ _, err = ref.NewImageSource(context.Background(), sysCtx)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ ref, err := ParseReference("//quay.io/libpod/busybox")
+ require.NoError(t, err)
+ dest, err := ref.NewImageDestination(context.Background(),
+ &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"})
+ require.NoError(t, err)
+ defer dest.Close()
+
+ ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest)
+ require.NoError(t, err)
+ dest2, err := ref.NewImageDestination(context.Background(),
+ &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"})
+ require.NoError(t, err)
+ defer dest2.Close()
+}
+
+func TestReferenceTagOrDigest(t *testing.T) {
+ for input, expected := range map[string]string{
+ "//busybox:notlatest": "notlatest",
+ "//busybox" + sha256digest: "sha256:" + sha256digestHex,
+ } {
+ ref, err := ParseReference(input)
+ require.NoError(t, err, input)
+ dockerRef, ok := ref.(dockerReference)
+ require.True(t, ok, input)
+ tod, err := dockerRef.tagOrDigest()
+ require.NoError(t, err, input)
+ assert.Equal(t, expected, tod, input)
+ }
+
+ // Invalid input
+ ref, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ dockerRef := dockerReference{ref: ref}
+ _, err = dockerRef.tagOrDigest()
+ assert.Error(t, err)
+
+ // Invalid input, unknownDigest case
+ ref, err = reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ dockerRef = dockerReference{ref: ref, isUnknownDigest: true}
+ _, err = dockerRef.tagOrDigest()
+ assert.Error(t, err)
+}
diff --git a/docker/errors.go b/docker/errors.go
new file mode 100644
index 0000000..4392f9d
--- /dev/null
+++ b/docker/errors.go
@@ -0,0 +1,101 @@
+package docker
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // ErrV1NotSupported is returned when we're trying to talk to a
+ // docker V1 registry.
+ ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
+ // ErrTooManyRequests is returned when the status code returned is 429
+ ErrTooManyRequests = errors.New("too many requests to registry")
+)
+
+// ErrUnauthorizedForCredentials is returned when the status code returned is 401
+type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
+ Err error
+}
+
+func (e ErrUnauthorizedForCredentials) Error() string {
+ return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error())
+}
+
+// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns
+// nil if the response is not considered an error.
+// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead.
+func httpResponseToError(res *http.Response, context string) error {
+ switch res.StatusCode {
+ case http.StatusOK:
+ return nil
+ case http.StatusTooManyRequests:
+ return ErrTooManyRequests
+ case http.StatusUnauthorized:
+ err := registryHTTPResponseToError(res)
+ return ErrUnauthorizedForCredentials{Err: err}
+ default:
+ if context != "" {
+ context += ": "
+ }
+ return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
+ }
+}
+
+// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
+// registry.
+//
+// WARNING: The OCI distribution spec says
+// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is
+// JSON, it MUST use the errcode.Error structure.
+// So, callers should primarily decide based on HTTP StatusCode, not based on error type here.
+func registryHTTPResponseToError(res *http.Response) error {
+ err := handleErrorResponse(res)
+ // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.
+ if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 {
+ // The docker/distribution registry implementation almost never returns
+ // more than one error in the HTTP body; it seems there is only one
+ // possible instance, where the second error reports a cleanup failure
+ // we don't really care about.
+ //
+ // The only _common_ case where a multi-element error is returned is
+ // created by the handleErrorResponse parser when OAuth authorization fails:
+ // the first element contains errors from a WWW-Authenticate header, the second
+ // element contains errors from the response body.
+ //
+ // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized
+ // for invalid tokens, ErrorCodeDenied for permission denied with a valid token
+ // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.)
+ //
+ // Also, docker/docker similarly only logs the other errors and returns the
+ // first one.
+ if len(errs) > 1 {
+ logrus.Debugf("Discarding non-primary errors:")
+ for _, err := range errs[1:] {
+ logrus.Debugf(" %s", err.Error())
+ }
+ }
+ err = errs[0]
+ }
+ switch e := err.(type) {
+ case *unexpectedHTTPResponseError:
+ response := string(e.Response)
+ if len(response) > 50 {
+ response = response[:50] + "..."
+ }
+ // %.0w makes e visible to error.Unwrap() without including any text
+ err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e)
+ case errcode.Error:
+ // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
+ // rather redundant. So reword it without using e.Code.Error() if e.Message is the default.
+ if e.Message == e.Code.Message() {
+ // %.0w makes e visible to error.Unwrap() without including any text
+ err = fmt.Errorf("%s%.0w", e.Message, e)
+ }
+ }
+ return err
+}
diff --git a/docker/errors_test.go b/docker/errors_test.go
new file mode 100644
index 0000000..2463d27
--- /dev/null
+++ b/docker/errors_test.go
@@ -0,0 +1,200 @@
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "net/http"
+ "testing"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ v2 "github.com/docker/distribution/registry/api/v2"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// NOTE: This test records expected text strings, but NEITHER the returned error types
+// NOR the error texts are an API commitment subject to API stability expectations;
+// they can change at any time for any reason.
+func TestRegistryHTTPResponseToError(t *testing.T) {
+ var unwrappedUnexpectedHTTPResponseError *unexpectedHTTPResponseError
+ var unwrappedErrcodeError errcode.Error
+ for _, c := range []struct {
+ name string
+ response string
+ errorString string
+ errorType any // A value of the same type as the expected error, or nil
+ unwrappedErrorPtr any // A pointer to a value expected to be reachable using errors.As, or nil
+ errorCode *errcode.ErrorCode // A matching ErrorCode, or nil
+ fn func(t *testing.T, err error) // A more specialized test, or nil
+ }{
+ {
+ name: "HTTP status out of registry error range",
+ response: "HTTP/1.1 333 HTTP status out of range\r\n" +
+ "Header1: Value1\r\n" +
+ "\r\n" +
+ "Body of the request\r\n",
+ errorString: "received unexpected HTTP status: 333 HTTP status out of range",
+ errorType: &unexpectedHTTPStatusError{},
+ },
+ {
+ name: "HTTP body not in expected format",
+ response: "HTTP/1.1 400 I don't like this request\r\n" +
+ "Header1: Value1\r\n" +
+ "\r\n" +
+ "<html><body>JSON? What JSON?</body></html>\r\n",
+ errorString: `StatusCode: 400, "<html><body>JSON? What JSON?</body></html>\r\n"`,
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedUnexpectedHTTPResponseError,
+ },
+ {
+ name: "401 body not in expected format",
+ response: "HTTP/1.1 401 I don't like this request\r\n" +
+ "Header1: Value1\r\n" +
+ "\r\n" +
+ "<html><body>JSON? What JSON?</body></html>\r\n",
+ errorString: "authentication required",
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedErrcodeError,
+ errorCode: &errcode.ErrorCodeUnauthorized,
+ },
+ { // docker.io when an image is not found
+ name: "GET https://registry-1.docker.io/v2/library/this-does-not-exist/manifests/latest",
+ response: "HTTP/1.1 401 Unauthorized\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 170\r\n" +
+ "Content-Type: application/json\r\n" +
+ "Date: Thu, 12 Aug 2021 20:11:01 GMT\r\n" +
+ "Docker-Distribution-Api-Version: registry/2.0\r\n" +
+ "Strict-Transport-Security: max-age=31536000\r\n" +
+ "Www-Authenticate: Bearer realm=\"https://auth.docker.io/token\",service=\"registry.docker.io\",scope=\"repository:library/this-does-not-exist:pull\",error=\"insufficient_scope\"\r\n" +
+ "\r\n" +
+ "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"authentication required\",\"detail\":[{\"Type\":\"repository\",\"Class\":\"\",\"Name\":\"library/this-does-not-exist\",\"Action\":\"pull\"}]}]}\n",
+ errorString: "requested access to the resource is denied",
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedErrcodeError,
+ errorCode: &errcode.ErrorCodeDenied,
+ },
+ { // docker.io when a tag is not found
+ name: "GET https://registry-1.docker.io/v2/library/busybox/manifests/this-does-not-exist",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 109\r\n" +
+ "Content-Type: application/json\r\n" +
+ "Date: Thu, 12 Aug 2021 20:51:32 GMT\r\n" +
+ "Docker-Distribution-Api-Version: registry/2.0\r\n" +
+ "Ratelimit-Limit: 100;w=21600\r\n" +
+ "Ratelimit-Remaining: 100;w=21600\r\n" +
+ "Strict-Transport-Security: max-age=31536000\r\n" +
+ "\r\n" +
+ "{\"errors\":[{\"code\":\"MANIFEST_UNKNOWN\",\"message\":\"manifest unknown\",\"detail\":{\"Tag\":\"this-does-not-exist\"}}]}\n",
+ errorString: "manifest unknown",
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedErrcodeError,
+ errorCode: &v2.ErrorCodeManifestUnknown,
+ },
+ { // public.ecr.aws does not implement tag list
+ name: "GET https://public.ecr.aws/v2/nginx/nginx/tags/list",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 65\r\n" +
+ "Content-Type: application/json; charset=utf-8\r\n" +
+ "Date: Tue, 06 Sep 2022 21:19:02 GMT\r\n" +
+ "Docker-Distribution-Api-Version: registry/2.0\r\n" +
+ "\r\n" +
+ "{\"errors\":[{\"code\":\"NOT_FOUND\",\"message\":\"404 page not found\"}]}\r\n",
+ errorString: "unknown: 404 page not found",
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedErrcodeError,
+ errorCode: &errcode.ErrorCodeUnknown,
+ fn: func(t *testing.T, err error) {
+ var e errcode.Error
+ ok := errors.As(err, &e)
+ require.True(t, ok)
+ // Note: (skopeo inspect) is checking for this errcode.Error value
+ assert.Equal(t, errcode.Error{
+ Code: errcode.ErrorCodeUnknown, // The NOT_FOUND value is not defined, and turns into Unknown
+ Message: "404 page not found",
+ Detail: nil,
+ }, e)
+ },
+ },
+ { // registry.redhat.io is not compliant, variant 1: invalid "code" value
+ name: "registry.redhat.io/v2/this-does-not-exist/manifests/latest",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 53\r\n" +
+ "Cache-Control: max-age=0, no-cache, no-store\r\n" +
+ "Content-Type: application/json\r\n" +
+ "Date: Thu, 13 Oct 2022 18:15:15 GMT\r\n" +
+ "Expires: Thu, 13 Oct 2022 18:15:15 GMT\r\n" +
+ "Pragma: no-cache\r\n" +
+ "Server: Apache\r\n" +
+ "Strict-Transport-Security: max-age=63072000; includeSubdomains; preload\r\n" +
+ "X-Hostname: crane-tbr06.cran-001.prod.iad2.dc.redhat.com\r\n" +
+ "\r\n" +
+ "{\"errors\": [{\"code\": \"404\", \"message\": \"Not Found\"}]}\r\n",
+ errorString: "unknown: Not Found",
+ errorType: errcode.Error{},
+ unwrappedErrorPtr: &unwrappedErrcodeError,
+ errorCode: &errcode.ErrorCodeUnknown,
+ fn: func(t *testing.T, err error) {
+ var e errcode.Error
+ ok := errors.As(err, &e)
+ require.True(t, ok)
+ // isManifestUnknownError is checking for this
+ assert.Equal(t, errcode.Error{
+ Code: errcode.ErrorCodeUnknown, // The 404 value is not defined, and turns into Unknown
+ Message: "Not Found",
+ Detail: nil,
+ }, e)
+ },
+ },
+ { // registry.redhat.io is not compliant, variant 2: a completely out-of-protocol response
+ name: "registry.redhat.io/v2/rhosp15-rhel8/openstack-cron/manifests/sha256-8df5e60c42668706ac108b59c559b9187fa2de7e4e262e2967e3e9da35d5a8d7.sig",
+ response: "HTTP/1.1 404 Not Found\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 10\r\n" +
+ "Accept-Ranges: bytes\r\n" +
+ "Date: Thu, 13 Oct 2022 18:13:53 GMT\r\n" +
+ "Server: AkamaiNetStorage\r\n" +
+ "X-Docker-Size: -1\r\n" +
+ "\r\n" +
+ "Not found\r\n",
+ errorString: `StatusCode: 404, "Not found\r"`,
+ errorType: nil,
+ unwrappedErrorPtr: &unwrappedUnexpectedHTTPResponseError,
+ fn: func(t *testing.T, err error) {
+ var e *unexpectedHTTPResponseError
+ ok := errors.As(err, &e)
+ require.True(t, ok)
+ // isManifestUnknownError is checking for this
+ assert.Equal(t, 404, e.StatusCode)
+ assert.Equal(t, []byte("Not found\r"), e.Response)
+ },
+ },
+ } {
+ res, err := http.ReadResponse(bufio.NewReader(bytes.NewReader([]byte(c.response))), nil)
+ require.NoError(t, err, c.name)
+ defer res.Body.Close()
+
+ err = registryHTTPResponseToError(res)
+ assert.Equal(t, c.errorString, err.Error(), c.name)
+ if c.errorType != nil {
+ assert.IsType(t, c.errorType, err, c.name)
+ }
+ if c.unwrappedErrorPtr != nil {
+ found := errors.As(err, c.unwrappedErrorPtr)
+ assert.True(t, found, c.name)
+ }
+ if c.errorCode != nil {
+ var ec errcode.ErrorCoder
+ ok := errors.As(err, &ec)
+ require.True(t, ok, c.name)
+ assert.Equal(t, *c.errorCode, ec.ErrorCode(), c.name)
+ }
+ if c.fn != nil {
+ c.fn(t, err)
+ }
+ }
+}
diff --git a/docker/fixtures/registries.d/emptyConfig.yaml b/docker/fixtures/registries.d/emptyConfig.yaml
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/docker/fixtures/registries.d/emptyConfig.yaml
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/docker/fixtures/registries.d/internal-example.com.yaml b/docker/fixtures/registries.d/internal-example.com.yaml
new file mode 100644
index 0000000..aea2512
--- /dev/null
+++ b/docker/fixtures/registries.d/internal-example.com.yaml
@@ -0,0 +1,18 @@
+docker:
+ example.com:
+ lookaside: https://lookaside.example.com
+ registry.test.example.com:
+ lookaside: http://registry.test.example.com/lookaside
+ registry.test.example.com:8888:
+ lookaside: http://registry.test.example.com:8889/lookaside
+ lookaside-staging: https://registry.test.example.com:8889/lookaside/specialAPIserverWhichDoesNotExist
+ localhost:
+ lookaside: file:///home/mitr/mydevelopment1
+ localhost:8080:
+ lookaside: file:///home/mitr/mydevelopment2
+ localhost/invalid/url/test:
+ lookaside: ":emptyscheme"
+ localhost/file/path/test:
+ lookaside: "/no/scheme/just/a/path"
+ localhost/relative/path/test:
+ lookaside: "no/scheme/relative/path"
diff --git a/docker/fixtures/registries.d/internet-user.yaml b/docker/fixtures/registries.d/internet-user.yaml
new file mode 100644
index 0000000..89eec0b
--- /dev/null
+++ b/docker/fixtures/registries.d/internet-user.yaml
@@ -0,0 +1,12 @@
+default-docker:
+ lookaside: file:///mnt/companywide/signatures/for/other/repositories
+docker:
+ docker.io/contoso:
+ lookaside: https://lookaside.contoso.com/fordocker
+ docker.io/centos:
+ lookaside: https://lookaside.centos.org/
+ docker.io/centos/mybetaproduct:
+ lookaside: http://localhost:9999/mybetaWIP/lookaside
+ lookaside-staging: file:///srv/mybetaWIP/lookaside
+ docker.io/centos/mybetaproduct:latest:
+ lookaside: https://lookaside.centos.org/
diff --git a/docker/fixtures/registries.d/invalid-but.notyaml b/docker/fixtures/registries.d/invalid-but.notyaml
new file mode 100644
index 0000000..5c34318
--- /dev/null
+++ b/docker/fixtures/registries.d/invalid-but.notyaml
@@ -0,0 +1 @@
+}
diff --git a/docker/internal/tarfile/dest.go b/docker/internal/tarfile/dest.go
new file mode 100644
index 0000000..7507d85
--- /dev/null
+++ b/docker/internal/tarfile/dest.go
@@ -0,0 +1,173 @@
+package tarfile
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/streamdigest"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer.
+type Destination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ archive *Writer
+ repoTags []reference.NamedTagged
+ // Other state.
+ config []byte
+ sysCtx *types.SystemContext
+}
+
+// NewDestination returns a tarfile.Destination adding images to the specified Writer.
+func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination {
+ repoTags := []reference.NamedTagged{}
+ if ref != nil {
+ repoTags = append(repoTags, ref)
+ }
+ dest := &Destination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ },
+ DesiredLayerCompression: types.Decompress,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
+ // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
+ // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
+ // much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
+ NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
+
+ archive: archive,
+ repoTags: repoTags,
+ sysCtx: sys,
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest
+}
+
+// AddRepoTags adds the specified tags to the destination's repoTags.
+func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
+ d.repoTags = append(d.repoTags, tags...)
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ // Ouch, we need to stream the blob into a temporary file just to determine the size.
+ // When the layer is decompressed, we also have to generate the digest on uncompressed data.
+ if inputInfo.Size == -1 || inputInfo.Digest == "" {
+ logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
+ streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer cleanup()
+ stream = streamCopy
+ logrus.Debugf("... streaming done")
+ }
+
+ if err := d.archive.lock(); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer d.archive.unlock()
+
+ // Maybe the blob has been already sent
+ ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ if ok {
+ return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
+ }
+
+ if options.IsConfig {
+ buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
+ }
+ d.config = buf
+ if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
+ }
+ } else {
+ if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ }
+ d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
+ return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if err := d.archive.lock(); err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ defer d.archive.unlock()
+
+ return d.archive.tryReusingBlobLocked(info)
+}
+
+// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported for docker tar files`)
+ }
+ // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
+ // so the caller trying a different manifest kind would be pointless.
+ var man manifest.Schema2
+ if err := json.Unmarshal(m, &man); err != nil {
+ return fmt.Errorf("parsing manifest: %w", err)
+ }
+ if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
+ return errors.New("Unsupported manifest type, need a Docker schema 2 manifest")
+ }
+
+ if err := d.archive.lock(); err != nil {
+ return err
+ }
+ defer d.archive.unlock()
+
+ if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
+ return err
+ }
+
+ return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
+}
diff --git a/docker/internal/tarfile/reader.go b/docker/internal/tarfile/reader.go
new file mode 100644
index 0000000..6845893
--- /dev/null
+++ b/docker/internal/tarfile/reader.go
@@ -0,0 +1,273 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+)
+
+// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
+type Reader struct {
+ // None of the fields below are modified after the archive is created, until .Close();
+ // this allows concurrent readers of the same archive.
+ path string // "" if the archive has already been closed.
+ removeOnClose bool // Remove file on close if true
+ Manifest []ManifestItem // Guaranteed to exist after the archive is created.
+}
+
+// NewReaderFromFile returns a Reader for the specified path.
+// The caller should call .Close() on the returned archive when done.
+func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("opening file %q: %w", path, err)
+ }
+ defer file.Close()
+
+ // If the file is seekable and already not compressed we can just return the file itself
+ // as a source. Otherwise we pass the stream to NewReaderFromStream.
+ var stream io.Reader = file
+ if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible
+ decompressed, isCompressed, err := compression.AutoDecompress(file)
+ if err != nil {
+ return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
+ }
+ defer decompressed.Close()
+ stream = decompressed
+ if !isCompressed {
+ return newReader(path, false)
+ }
+ }
+ return NewReaderFromStream(sys, stream)
+}
+
+// NewReaderFromStream returns a Reader for the specified inputStream,
+// which can be either compressed or uncompressed. The caller can close the
+// inputStream immediately after NewReaderFromFile returns.
+// The caller should call .Close() on the returned archive when done.
+func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
+ // Save inputStream to a temporary file
+ tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
+ if err != nil {
+ return nil, fmt.Errorf("creating temporary file: %w", err)
+ }
+ defer tarCopyFile.Close()
+
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.Remove(tarCopyFile.Name())
+ }
+ }()
+
+ // In order to be compatible with docker-load, we need to support
+ // auto-decompression (it's also a nice quality-of-life thing to avoid
+ // giving users really confusing "invalid tar header" errors).
+ uncompressedStream, _, err := compression.AutoDecompress(inputStream)
+ if err != nil {
+ return nil, fmt.Errorf("auto-decompressing input: %w", err)
+ }
+ defer uncompressedStream.Close()
+
+ // Copy the plain archive to the temporary file.
+ //
+ // TODO: This can take quite some time, and should ideally be cancellable
+ // using a context.Context.
+ if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
+ return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err)
+ }
+ succeeded = true
+
+ return newReader(tarCopyFile.Name(), true)
+}
+
+// newReader creates a Reader for the specified path and removeOnClose flag.
+// The caller should call .Close() on the returned archive when done.
+func newReader(path string, removeOnClose bool) (*Reader, error) {
+ // This is a valid enough archive, except Manifest is not yet filled.
+ r := Reader{
+ path: path,
+ removeOnClose: removeOnClose,
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ r.Close()
+ }
+ }()
+
+ // We initialize Manifest immediately when constructing the Reader instead
+ // of later on-demand because every caller will need the data, and because doing it now
+ // removes the need to synchronize the access/creation of the data if the archive is later
+ // used from multiple goroutines to access different images.
+
+ // FIXME? Do we need to deal with the legacy format?
+ bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
+ return nil, fmt.Errorf("decoding tar manifest.json: %w", err)
+ }
+
+ succeeded = true
+ return &r, nil
+}
+
+// Close removes resources associated with an initialized Reader, if any.
+func (r *Reader) Close() error {
+ path := r.path
+ r.path = "" // Mark the archive as closed
+ if r.removeOnClose {
+ return os.Remove(path)
+ }
+ return nil
+}
+
+// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
+// both of which should be (nil, -1).
+// On success, it returns the manifest item and an index of the matching tag, if a tag was used
+// for matching; the index is -1 if a tag was not used.
+func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
+ switch {
+ case ref != nil && sourceIndex != -1:
+ return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+ ref.String(), sourceIndex)
+
+ case ref != nil:
+ refString := ref.String()
+ for i := range r.Manifest {
+ for tagIndex, tag := range r.Manifest[i].RepoTags {
+ parsedTag, err := reference.ParseNormalizedNamed(tag)
+ if err != nil {
+ return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err)
+ }
+ if parsedTag.String() == refString {
+ return &r.Manifest[i], tagIndex, nil
+ }
+ }
+ }
+ return nil, -1, fmt.Errorf("Tag %#v not found", refString)
+
+ case sourceIndex != -1:
+ if sourceIndex >= len(r.Manifest) {
+ return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available",
+ sourceIndex, len(r.Manifest))
+ }
+ return &r.Manifest[sourceIndex], -1, nil
+
+ default:
+ if len(r.Manifest) != 1 {
+ return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
+ }
+ return &r.Manifest[0], -1, nil
+ }
+}
+
+// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
+type tarReadCloser struct {
+ *tar.Reader
+ backingFile *os.File
+}
+
+func (t *tarReadCloser) Close() error {
+ return t.backingFile.Close()
+}
+
+// openTarComponent returns a ReadCloser for the specific file within the archive.
+// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
+// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
+// It is safe to call this method from multiple goroutines simultaneously.
+// The caller should call .Close() on the returned stream.
+func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
+ // This is only a sanity check; if anyone did concurrently close ra, this access is technically
+ // racy against the write in .Close().
+ if r.path == "" {
+ return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
+ }
+
+ f, err := os.Open(r.path)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ f.Close()
+ }
+ }()
+
+ tarReader, header, err := findTarComponent(f, componentPath)
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
+ // We follow only one symlink; so no loops are possible.
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+ // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
+ // so we don't care.
+ tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ }
+
+ if !header.FileInfo().Mode().IsRegular() {
+ return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ }
+ succeeded = true
+ return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
+}
+
+// findTarComponent returns a header and a reader matching componentPath within inputFile,
+// or (nil, nil, nil) if not found.
+func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
+ t := tar.NewReader(inputFile)
+ componentPath = path.Clean(componentPath)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ if path.Clean(h.Name) == componentPath {
+ return t, h, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// readTarComponent returns full contents of componentPath.
+// It is safe to call this method from multiple goroutines simultaneously.
+func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
+ file, err := r.openTarComponent(path)
+ if err != nil {
+ return nil, fmt.Errorf("loading tar component %s: %w", path, err)
+ }
+ defer file.Close()
+ bytes, err := iolimits.ReadAtMost(file, limit)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
diff --git a/docker/internal/tarfile/src.go b/docker/internal/tarfile/src.go
new file mode 100644
index 0000000..b63b531
--- /dev/null
+++ b/docker/internal/tarfile/src.go
@@ -0,0 +1,319 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// Source is a partial implementation of types.ImageSource for reading from tarPath.
+type Source struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ archive *Reader
+ closeArchive bool // .Close() the archive when the source is closed.
+ // If ref is nil and sourceIndex is -1, indicates the only image in the archive.
+ ref reference.NamedTagged // May be nil
+ sourceIndex int // May be -1
+ // The following data is only available after ensureCachedDataIsPresent() succeeds
+ tarManifest *ManifestItem // nil if not available yet.
+ configBytes []byte
+ configDigest digest.Digest
+ orderedDiffIDList []digest.Digest
+ knownLayers map[digest.Digest]*layerInfo
+ // Other state
+ generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+ cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
+ cacheDataResult error // Private state for ensureCachedDataIsPresent
+}
+
+type layerInfo struct {
+ path string
+ size int64
+}
+
+// NewSource returns a tarfile.Source for an image in the specified archive matching ref
+// and sourceIndex (or the only image if they are (nil, -1)).
+// The archive will be closed if closeArchive
+func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source {
+ s := &Source{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName),
+
+ archive: archive,
+ closeArchive: closeArchive,
+ ref: ref,
+ sourceIndex: sourceIndex,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s
+}
+
+// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
+// It is safe to call this from multi-threaded code.
+func (s *Source) ensureCachedDataIsPresent() error {
+ s.cacheDataLock.Do(func() {
+ s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
+ })
+ return s.cacheDataResult
+}
+
+// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
+// Call ensureCachedDataIsPresent instead.
+func (s *Source) ensureCachedDataIsPresentPrivate() error {
+ tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
+ if err != nil {
+ return err
+ }
+
+ // Read and parse config.
+ configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return err
+ }
+ var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
+ }
+ if parsedConfig.RootFS == nil {
+ return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
+ }
+
+ knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Success; commit.
+ s.tarManifest = tarManifest
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ return nil
+}
+
+// Close removes resources associated with an initialized Source, if any.
+func (s *Source) Close() error {
+ if s.closeArchive {
+ return s.archive.Close()
+ }
+ return nil
+}
+
+// TarManifest returns contents of manifest.json
+func (s *Source) TarManifest() []ManifestItem {
+ return s.archive.Manifest
+}
+
+func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
+ // Collect layer data available in manifest and config.
+ if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
+ return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ }
+ knownLayers := map[digest.Digest]*layerInfo{}
+ unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
+ for i, diffID := range parsedConfig.RootFS.DiffIDs {
+ if _, ok := knownLayers[diffID]; ok {
+ // Apparently it really can happen that a single image contains the same layer diff more than once.
+ // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
+ // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
+ continue
+ }
+ layerPath := path.Clean(tarManifest.Layers[i])
+ if _, ok := unknownLayerSizes[layerPath]; ok {
+ return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ }
+ li := &layerInfo{ // A new element in each iteration
+ path: layerPath,
+ size: -1,
+ }
+ knownLayers[diffID] = li
+ unknownLayerSizes[layerPath] = li
+ }
+
+ // Scan the tar file to collect layer sizes.
+ file, err := os.Open(s.archive.path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ t := tar.NewReader(file)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ layerPath := path.Clean(h.Name)
+ // FIXME: Cache this data across images in Reader.
+ if li, ok := unknownLayerSizes[layerPath]; ok {
+ // Since GetBlob will decompress layers that are compressed we need
+ // to do the decompression here as well, otherwise we will
+ // incorrectly report the size. Pretty critical, since tools like
+ // umoci always compress layer blobs. Obviously we only bother with
+ // the slower method of checking if it's compressed.
+ uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
+ if err != nil {
+ return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
+ }
+ defer uncompressedStream.Close()
+
+ uncompressedSize := h.Size
+ if isCompressed {
+ uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
+ }
+ }
+ li.size = uncompressedSize
+ delete(unknownLayerSizes, layerPath)
+ }
+ }
+ if len(unknownLayerSizes) != 0 {
+ return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ }
+
+ return knownLayers, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary instances.
+func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
+ return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
+ }
+ if s.generatedManifest == nil {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, "", err
+ }
+ m := manifest.Schema2{
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ ConfigDescriptor: manifest.Schema2Descriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ Size: int64(len(s.configBytes)),
+ Digest: s.configDigest,
+ },
+ LayersDescriptors: []manifest.Schema2Descriptor{},
+ }
+ for _, diffID := range s.orderedDiffIDList {
+ li, ok := s.knownLayers[diffID]
+ if !ok {
+ return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ }
+ m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
+ Digest: diffID, // diffID is a digest of the uncompressed tarball
+ MediaType: manifest.DockerV2Schema2LayerMediaType,
+ Size: li.size,
+ })
+ }
+ manifestBytes, err := json.Marshal(&m)
+ if err != nil {
+ return nil, "", err
+ }
+ s.generatedManifest = manifestBytes
+ }
+ return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
+}
+
+// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
+type uncompressedReadCloser struct {
+ io.Reader
+ underlyingCloser func() error
+ uncompressedCloser func() error
+}
+
+func (r uncompressedReadCloser) Close() error {
+ var res error
+ if err := r.uncompressedCloser(); err != nil {
+ res = err
+ }
+ if err := r.underlyingCloser(); err != nil && res == nil {
+ res = err
+ }
+ return res
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, 0, err
+ }
+
+ if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
+ return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ }
+
+ if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
+ underlyingStream, err := s.archive.openTarComponent(li.path)
+ if err != nil {
+ return nil, 0, err
+ }
+ closeUnderlyingStream := true
+ defer func() {
+ if closeUnderlyingStream {
+ underlyingStream.Close()
+ }
+ }()
+
+ // In order to handle the fact that digests != diffIDs (and thus that a
+ // caller which is trying to verify the blob will run into problems),
+ // we need to decompress blobs. This is a bit ugly, but it's a
+ // consequence of making everything addressable by their DiffID rather
+ // than by their digest...
+ //
+ // In particular, because the v2s2 manifest being generated uses
+ // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
+ // layers not their _actual_ digest. The result is that copy/... will
+ // be verifying a "digest" which is not the actual layer's digest (but
+ // is instead the DiffID).
+
+ uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
+ if err != nil {
+ return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err)
+ }
+
+ newStream := uncompressedReadCloser{
+ Reader: uncompressedStream,
+ underlyingCloser: underlyingStream.Close,
+ uncompressedCloser: uncompressedStream.Close,
+ }
+ closeUnderlyingStream = false
+
+ return newStream, li.size, nil
+ }
+
+ return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
+}
diff --git a/docker/internal/tarfile/src_test.go b/docker/internal/tarfile/src_test.go
new file mode 100644
index 0000000..1f8248a
--- /dev/null
+++ b/docker/internal/tarfile/src_test.go
@@ -0,0 +1,66 @@
+package tarfile
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSourcePrepareLayerData(t *testing.T) {
+ // Just a smoke test to verify prepareLayerData does not crash on missing data
+ for _, c := range []struct {
+ config string
+ shouldFail bool
+ }{
+ {`{}`, true}, // No RootFS entry: can fail, shouldn’t crash
+ {`{"rootfs":{}}`, false}, // Useless no-layer configuration
+ } {
+ cache := memory.New()
+ var tarfileBuffer bytes.Buffer
+ ctx := context.Background()
+
+ writer := NewWriter(&tarfileBuffer)
+ dest := NewDestination(nil, writer, "transport name", nil)
+ // No layers
+ configInfo, err := dest.PutBlob(ctx, strings.NewReader(c.config),
+ types.BlobInfo{Size: -1}, cache, true)
+ require.NoError(t, err, c.config)
+ manifest, err := manifest.Schema2FromComponents(
+ manifest.Schema2Descriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ Size: configInfo.Size,
+ Digest: configInfo.Digest,
+ }, []manifest.Schema2Descriptor{}).Serialize()
+ require.NoError(t, err, c.config)
+ err = dest.PutManifest(ctx, manifest, nil)
+ require.NoError(t, err, c.config)
+ err = writer.Close()
+ require.NoError(t, err, c.config)
+
+ reader, err := NewReaderFromStream(nil, &tarfileBuffer)
+ require.NoError(t, err, c.config)
+ src := NewSource(reader, true, "transport name", nil, -1)
+ require.NoError(t, err, c.config)
+ defer src.Close()
+ configStream, _, err := src.GetBlob(ctx, types.BlobInfo{
+ Digest: configInfo.Digest,
+ Size: -1,
+ }, cache)
+ if !c.shouldFail {
+ require.NoError(t, err, c.config)
+ config2, err := io.ReadAll(configStream)
+ require.NoError(t, err, c.config)
+ assert.Equal(t, []byte(c.config), config2, c.config)
+ } else {
+ assert.Error(t, err, c.config)
+ }
+ }
+}
diff --git a/docker/internal/tarfile/types.go b/docker/internal/tarfile/types.go
new file mode 100644
index 0000000..6e6ccd2
--- /dev/null
+++ b/docker/internal/tarfile/types.go
@@ -0,0 +1,28 @@
+package tarfile
+
+import (
+ "github.com/containers/image/v5/manifest"
+ "github.com/opencontainers/go-digest"
+)
+
+// Various data structures.
+
+// Based on github.com/docker/docker/image/tarexport/tarexport.go
+const (
+ manifestFileName = "manifest.json"
+ legacyLayerFileName = "layer.tar"
+ legacyConfigFileName = "json"
+ legacyVersionFileName = "VERSION"
+ legacyRepositoriesFileName = "repositories"
+)
+
+// ManifestItem is an element of the array stored in the top-level manifest.json file.
+type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
+ Config string
+ RepoTags []string
+ Layers []string
+ Parent imageID `json:",omitempty"`
+ LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
+}
+
+type imageID string
diff --git a/docker/internal/tarfile/writer.go b/docker/internal/tarfile/writer.go
new file mode 100644
index 0000000..df7b2c0
--- /dev/null
+++ b/docker/internal/tarfile/writer.go
@@ -0,0 +1,379 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
+type Writer struct {
+ mutex sync.Mutex
+ // ALL of the following members can only be accessed with the mutex held.
+ // Use Writer.lock() to obtain the mutex.
+ writer io.Writer
+ tar *tar.Writer // nil if the Writer has already been closed.
+ // Other state.
+ blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
+ repositories map[string]map[string]string
+ legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
+ manifest []ManifestItem
+ manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
+}
+
+// NewWriter returns a Writer for the specified io.Writer.
+// The caller must eventually call .Close() on the returned object to create a valid archive.
+func NewWriter(dest io.Writer) *Writer {
+ return &Writer{
+ writer: dest,
+ tar: tar.NewWriter(dest),
+ blobs: make(map[digest.Digest]types.BlobInfo),
+ repositories: map[string]map[string]string{},
+ legacyLayers: set.New[string](),
+ manifestByConfig: map[digest.Digest]int{},
+ }
+}
+
+// lock does some sanity checks and locks the Writer.
+// If this function succeeds, the caller must call w.unlock.
+// Do not use Writer.mutex directly.
+func (w *Writer) lock() error {
+ w.mutex.Lock()
+ if w.tar == nil {
+ w.mutex.Unlock()
+ return errors.New("Internal error: trying to use an already closed tarfile.Writer")
+ }
+ return nil
+}
+
+// unlock releases the lock obtained by Writer.lock
+// Do not use Writer.mutex directly.
+func (w *Writer) unlock() {
+ w.mutex.Unlock()
+}
+
+// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// The caller must have locked the Writer.
+func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
+ if info.Digest == "" {
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
+ }
+ if blob, ok := w.blobs[info.Digest]; ok {
+ return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
+ }
+ return false, private.ReusedBlob{}, nil
+}
+
+// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
+// The caller must have locked the Writer.
+func (w *Writer) recordBlobLocked(info types.BlobInfo) {
+ w.blobs[info.Digest] = info
+}
+
+// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
+// The caller must have locked the Writer.
+func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
+ if !w.legacyLayers.Contains(layerID) {
+ // Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
+ // See also the comment in physicalLayerPath.
+ physicalLayerPath := w.physicalLayerPath(layerDigest)
+ if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
+ return fmt.Errorf("creating layer symbolic link: %w", err)
+ }
+
+ b := []byte("1.0")
+ if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
+ return fmt.Errorf("writing VERSION file: %w", err)
+ }
+
+ if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
+ return fmt.Errorf("writing config json file: %w", err)
+ }
+
+ w.legacyLayers.Add(layerID)
+ }
+ return nil
+}
+
+// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
+func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
+ var chainID digest.Digest
+ lastLayerID := ""
+ for i, l := range layerDescriptors {
+ // The legacy format requires a config file per layer
+ layerConfig := make(map[string]any)
+
+ // The root layer doesn't have any parent
+ if lastLayerID != "" {
+ layerConfig["parent"] = lastLayerID
+ }
+ // The top layer configuration file is generated by using subpart of the image configuration
+ if i == len(layerDescriptors)-1 {
+ var config map[string]*json.RawMessage
+ err := json.Unmarshal(configBytes, &config)
+ if err != nil {
+ return fmt.Errorf("unmarshaling config: %w", err)
+ }
+ for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
+ layerConfig[attr] = config[attr]
+ }
+ }
+
+ // This chainID value matches the computation in docker/docker/layer.CreateChainID …
+ if chainID == "" {
+ chainID = l.Digest
+ } else {
+ chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
+ }
+ // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
+ // we create the image configs differently in details. At least recent versions allocate new IDs on load,
+ // so this is fine as long as the IDs we use are unique / cannot loop.
+ //
+ // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
+ // config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
+ //
+ // Temporarily add the chainID to the config, only for the purpose of generating the image ID.
+ layerConfig["layer_id"] = chainID
+ b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
+ if err != nil {
+ return fmt.Errorf("marshaling layer config: %w", err)
+ }
+ delete(layerConfig, "layer_id")
+ layerID := digest.Canonical.FromBytes(b).Hex()
+ layerConfig["id"] = layerID
+
+ configBytes, err := json.Marshal(layerConfig)
+ if err != nil {
+ return fmt.Errorf("marshaling layer config: %w", err)
+ }
+
+ if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
+ return err
+ }
+
+ lastLayerID = layerID
+ }
+
+ if lastLayerID != "" {
+ for _, repoTag := range repoTags {
+ if val, ok := w.repositories[repoTag.Name()]; ok {
+ val[repoTag.Tag()] = lastLayerID
+ } else {
+ w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
+ }
+ }
+ }
+ return nil
+}
+
+// checkManifestItemsMatch checks that a and b describe the same image,
+// and returns an error if that’s not the case (which should never happen).
+func checkManifestItemsMatch(a, b *ManifestItem) error {
+ if a.Config != b.Config {
+ return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
+ }
+ if !slices.Equal(a.Layers, b.Layers) {
+ return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
+ }
+ // Ignore RepoTags, that will be built later.
+ // Ignore Parent and LayerSources, which we don’t set to anything meaningful.
+ return nil
+}
+
+// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
+// The caller must have locked the Writer.
+func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
+ layerPaths := []string{}
+ for _, l := range layerDescriptors {
+ layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
+ }
+
+ var item *ManifestItem
+ newItem := ManifestItem{
+ Config: w.configPath(configDigest),
+ RepoTags: []string{},
+ Layers: layerPaths,
+ Parent: "", // We don’t have this information
+ LayerSources: nil,
+ }
+ if i, ok := w.manifestByConfig[configDigest]; ok {
+ item = &w.manifest[i]
+ if err := checkManifestItemsMatch(item, &newItem); err != nil {
+ return err
+ }
+ } else {
+ i := len(w.manifest)
+ w.manifestByConfig[configDigest] = i
+ w.manifest = append(w.manifest, newItem)
+ item = &w.manifest[i]
+ }
+
+ knownRepoTags := set.New[string]()
+ for _, repoTag := range item.RepoTags {
+ knownRepoTags.Add(repoTag)
+ }
+ for _, tag := range repoTags {
+ // For github.com/docker/docker consumers, this works just as well as
+ // refString := ref.String()
+ // because when reading the RepoTags strings, github.com/docker/docker/reference
+ // normalizes both of them to the same value.
+ //
+ // Doing it this way to include the normalized-out `docker.io[/library]` does make
+ // a difference for github.com/projectatomic/docker consumers, with the
+ // “Add --add-registry and --block-registry options to docker daemon” patch.
+ // These consumers treat reference strings which include a hostname and reference
+ // strings without a hostname differently.
+ //
+ // Using the host name here is more explicit about the intent, and it has the same
+ // effect as (docker pull) in projectatomic/docker, which tags the result using
+ // a hostname-qualified reference.
+ // See https://github.com/containers/image/issues/72 for a more detailed
+ // analysis and explanation.
+ refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
+
+ if !knownRepoTags.Contains(refString) {
+ item.RepoTags = append(item.RepoTags, refString)
+ knownRepoTags.Add(refString)
+ }
+ }
+
+ return nil
+}
+
+// Close writes all outstanding data about images to the archive, and finishes writing data
+// to the underlying io.Writer.
+// No more images can be added after this is called.
+func (w *Writer) Close() error {
+ if err := w.lock(); err != nil {
+ return err
+ }
+ defer w.unlock()
+
+ b, err := json.Marshal(&w.manifest)
+ if err != nil {
+ return err
+ }
+ if err := w.sendBytesLocked(manifestFileName, b); err != nil {
+ return err
+ }
+
+ b, err = json.Marshal(w.repositories)
+ if err != nil {
+ return fmt.Errorf("marshaling repositories: %w", err)
+ }
+ if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
+ return fmt.Errorf("writing config json file: %w", err)
+ }
+
+ if err := w.tar.Close(); err != nil {
+ return err
+ }
+ w.tar = nil // Mark the Writer as closed.
+ return nil
+}
+
+// configPath returns a path we choose for storing a config with the specified digest.
+// NOTE: This is an internal implementation detail, not a format property, and can change
+// any time.
+func (w *Writer) configPath(configDigest digest.Digest) string {
+ return configDigest.Hex() + ".json"
+}
+
+// physicalLayerPath returns a path we choose for storing a layer with the specified digest
+// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
+// NOTE: This is an internal implementation detail, not a format property, and can change
+// any time.
+func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
+ // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
+ // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
+ // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
+ // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
+ // in the root of the tarball.
+ return layerDigest.Hex() + ".tar"
+}
+
+type tarFI struct {
+ path string
+ size int64
+ isSymlink bool
+}
+
+func (t *tarFI) Name() string {
+ return t.path
+}
+func (t *tarFI) Size() int64 {
+ return t.size
+}
+func (t *tarFI) Mode() os.FileMode {
+ if t.isSymlink {
+ return os.ModeSymlink
+ }
+ return 0444
+}
+func (t *tarFI) ModTime() time.Time {
+ return time.Unix(0, 0)
+}
+func (t *tarFI) IsDir() bool {
+ return false
+}
+func (t *tarFI) Sys() any {
+ return nil
+}
+
+// sendSymlinkLocked sends a symlink into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendSymlinkLocked(path string, target string) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Sending as tar link %s -> %s", path, target)
+ return w.tar.WriteHeader(hdr)
+}
+
+// sendBytesLocked sends a path into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendBytesLocked(path string, b []byte) error {
+ return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
+}
+
+// sendFileLocked sends a file into the tar stream.
+// The caller must have locked the Writer.
+func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Sending as tar file %s", path)
+ if err := w.tar.WriteHeader(hdr); err != nil {
+ return err
+ }
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
+ size, err := io.Copy(w.tar, stream)
+ if err != nil {
+ return err
+ }
+ if size != expectedSize {
+ return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ }
+ return nil
+}
diff --git a/docker/paths_common.go b/docker/paths_common.go
new file mode 100644
index 0000000..862e880
--- /dev/null
+++ b/docker/paths_common.go
@@ -0,0 +1,6 @@
+//go:build !freebsd
+// +build !freebsd
+
+package docker
+
+const etcDir = "/etc"
diff --git a/docker/paths_freebsd.go b/docker/paths_freebsd.go
new file mode 100644
index 0000000..2bf27ac
--- /dev/null
+++ b/docker/paths_freebsd.go
@@ -0,0 +1,6 @@
+//go:build freebsd
+// +build freebsd
+
+package docker
+
+const etcDir = "/usr/local/etc"
diff --git a/docker/policyconfiguration/naming.go b/docker/policyconfiguration/naming.go
new file mode 100644
index 0000000..e1f1f1f
--- /dev/null
+++ b/docker/policyconfiguration/naming.go
@@ -0,0 +1,78 @@
+package policyconfiguration
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+)
+
+// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceIdentity(ref reference.Named) (string, error) {
+ res := ref.Name()
+ tagged, isTagged := ref.(reference.NamedTagged)
+ digested, isDigested := ref.(reference.Canonical)
+ switch {
+ case isTagged && isDigested: // Note that this CAN actually happen.
+ return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
+ return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ case isTagged:
+ res = res + ":" + tagged.Tag()
+ case isDigested:
+ res = res + "@" + digested.Digest().String()
+ default: // Coverage: The above was supposed to be exhaustive.
+ return "", errors.New("Internal inconsistency, unexpected default branch")
+ }
+ return res, nil
+}
+
+// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceNamespaces(ref reference.Named) []string {
+ // Look for a match of the repository, and then of the possible parent
+ // namespaces. Note that this only happens on the expanded host names
+ // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox",
+ // then in its parent "docker.io/library"; in none of "busybox",
+ // un-namespaced "library" nor in "" supposedly implicitly representing "library/".
+ //
+ // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last
+ // iteration matches the host name (for any namespace).
+ res := []string{}
+ name := ref.Name()
+ for {
+ res = append(res, name)
+
+ lastSlash := strings.LastIndex(name, "/")
+ if lastSlash == -1 {
+ break
+ }
+ name = name[:lastSlash]
+ }
+
+ // Strip port number if any, before appending to res slice.
+ // Currently, the most compatible behavior is to return
+ // example.com:8443/ns, example.com:8443, *.com.
+ // If a port number is not specified, the expected behavior would be
+ // example.com/ns, example.com, *.com
+ portNumColon := strings.Index(name, ":")
+ if portNumColon != -1 {
+ name = name[:portNumColon]
+ }
+
+ // Append wildcarded domains to res slice
+ for {
+ firstDot := strings.Index(name, ".")
+ if firstDot == -1 {
+ break
+ }
+ name = name[firstDot+1:]
+
+ res = append(res, "*."+name)
+ }
+ return res
+}
diff --git a/docker/policyconfiguration/naming_test.go b/docker/policyconfiguration/naming_test.go
new file mode 100644
index 0000000..8f70c66
--- /dev/null
+++ b/docker/policyconfiguration/naming_test.go
@@ -0,0 +1,86 @@
+package policyconfiguration
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestDockerReference tests DockerReferenceIdentity and DockerReferenceNamespaces simultaneously
+// to ensure they are consistent.
+func TestDockerReference(t *testing.T) {
+ sha256Digest := "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix),
+ // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are
+ // consistent.
+ for inputName, expectedNS := range map[string][]string{
+ "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com", "*.com"},
+ "example.com/repo": {"example.com/repo", "example.com", "*.com"},
+ "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"},
+ // Note that "localhost" is special here: notlocalhost/repo is parsed as docker.io/notlocalhost.repo:
+ "localhost/repo": {"localhost/repo", "localhost"},
+ "notlocalhost/repo": {"docker.io/notlocalhost/repo", "docker.io/notlocalhost", "docker.io", "*.io"},
+ "docker.io/ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io", "*.io"},
+ "docker.io/library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io", "*.io"},
+ "docker.io/repo": {"docker.io/library/repo", "docker.io/library", "docker.io", "*.io"},
+ "ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io", "*.io"},
+ "library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io", "*.io"},
+ "repo": {"docker.io/library/repo", "docker.io/library", "docker.io", "*.io"},
+ "yet.another.example.com:8443/ns/repo": {"yet.another.example.com:8443/ns/repo", "yet.another.example.com:8443/ns", "yet.another.example.com:8443", "*.another.example.com", "*.example.com", "*.com"},
+ } {
+ for inputSuffix, mappedSuffix := range map[string]string{
+ ":tag": ":tag",
+ sha256Digest: sha256Digest,
+ } {
+ fullInput := inputName + inputSuffix
+ ref, err := reference.ParseNormalizedNamed(fullInput)
+ require.NoError(t, err, fullInput)
+
+ identity, err := DockerReferenceIdentity(ref)
+ require.NoError(t, err, fullInput)
+ assert.Equal(t, expectedNS[0]+mappedSuffix, identity, fullInput)
+
+ ns := DockerReferenceNamespaces(ref)
+ require.NotNil(t, ns, fullInput)
+ require.Len(t, ns, len(expectedNS), fullInput)
+ moreSpecific := identity
+ for i := range expectedNS {
+ assert.Equal(t, ns[i], expectedNS[i], fmt.Sprintf("%s item %d", fullInput, i))
+ // Verify that expectedNS is ordered from most specific to least specific
+ if strings.HasPrefix(ns[i], "*.") {
+ // Check for subdomain matches if wildcard present
+ assert.True(t, strings.Contains(moreSpecific, ns[i][1:]))
+ } else {
+ assert.True(t, strings.HasPrefix(moreSpecific, ns[i]))
+ }
+ moreSpecific = ns[i]
+ }
+ }
+ }
+}
+
+func TestDockerReferenceIdentity(t *testing.T) {
+ // TestDockerReference above has tested the core of the functionality, this tests only the failure cases.
+
+ // Neither a tag nor digest
+ parsed, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ id, err := DockerReferenceIdentity(parsed)
+ assert.Equal(t, "", id)
+ assert.Error(t, err)
+
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
+ require.NoError(t, err)
+ _, ok := parsed.(reference.Canonical)
+ require.True(t, ok)
+ _, ok = parsed.(reference.NamedTagged)
+ require.True(t, ok)
+ id, err = DockerReferenceIdentity(parsed)
+ assert.Equal(t, "", id)
+ assert.Error(t, err)
+}
diff --git a/docker/reference/README.md b/docker/reference/README.md
new file mode 100644
index 0000000..3c4d74e
--- /dev/null
+++ b/docker/reference/README.md
@@ -0,0 +1,2 @@
+This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8,
+except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file
diff --git a/docker/reference/helpers.go b/docker/reference/helpers.go
new file mode 100644
index 0000000..978df7e
--- /dev/null
+++ b/docker/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/docker/reference/normalize.go b/docker/reference/normalize.go
new file mode 100644
index 0000000..d3f47d2
--- /dev/null
+++ b/docker/reference/normalize.go
@@ -0,0 +1,181 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// ParseDockerRef normalizes the image reference following the docker convention. This is added
+// mainly for backward compatibility.
+// The reference returned can only be either tagged or digested. For reference contains both tag
+// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
+// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
+func ParseDockerRef(ref string) (Named, error) {
+ named, err := ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := named.(NamedTagged); ok {
+ if canonical, ok := named.(Canonical); ok {
+ // The reference is both tagged and digested, only
+ // return digested.
+ newNamed, err := WithName(canonical.Name())
+ if err != nil {
+ return nil, err
+ }
+ newCanonical, err := WithDigest(newNamed, canonical.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return newCanonical, nil
+ }
+ }
+ return TagNameOnly(named), nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/<official repo name>"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/docker/reference/normalize_test.go b/docker/reference/normalize_test.go
new file mode 100644
index 0000000..a21c800
--- /dev/null
+++ b/docker/reference/normalize_test.go
@@ -0,0 +1,652 @@
+package reference
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/opencontainers/go-digest"
+)
+
+func TestValidateReferenceName(t *testing.T) {
+ validRepoNames := []string{
+ "docker/docker",
+ "library/debian",
+ "debian",
+ "docker.io/docker/docker",
+ "docker.io/library/debian",
+ "docker.io/debian",
+ "index.docker.io/docker/docker",
+ "index.docker.io/library/debian",
+ "index.docker.io/debian",
+ "127.0.0.1:5000/docker/docker",
+ "127.0.0.1:5000/library/debian",
+ "127.0.0.1:5000/debian",
+ "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
+
+ // This test case was moved from invalid to valid since it is valid input
+ // when specified with a hostname, it removes the ambiguity from about
+ // whether the value is an identifier or repository name
+ "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
+ }
+ invalidRepoNames := []string{
+ "https://github.com/docker/docker",
+ "docker/Docker",
+ "-docker",
+ "-docker/docker",
+ "-docker.io/docker/docker",
+ "docker///docker",
+ "docker.io/docker/Docker",
+ "docker.io/docker///docker",
+ "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
+ }
+
+ for _, name := range invalidRepoNames {
+ _, err := ParseNormalizedNamed(name)
+ if err == nil {
+ t.Fatalf("Expected invalid repo name for %q", name)
+ }
+ }
+
+ for _, name := range validRepoNames {
+ _, err := ParseNormalizedNamed(name)
+ if err != nil {
+ t.Fatalf("Error parsing repo name %s, got: %q", name, err)
+ }
+ }
+}
+
+func TestValidateRemoteName(t *testing.T) {
+ validRepositoryNames := []string{
+ // Sanity check.
+ "docker/docker",
+
+ // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden).
+ "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
+
+ // Allow embedded hyphens.
+ "docker-rules/docker",
+
+ // Allow multiple hyphens as well.
+ "docker---rules/docker",
+
+ // Username doc and image name docker being tested.
+ "doc/docker",
+
+ // single character names are now allowed.
+ "d/docker",
+ "jess/t",
+
+ // Consecutive underscores.
+ "dock__er/docker",
+ }
+ for _, repositoryName := range validRepositoryNames {
+ _, err := ParseNormalizedNamed(repositoryName)
+ if err != nil {
+ t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err)
+ }
+ }
+
+ invalidRepositoryNames := []string{
+ // Disallow capital letters.
+ "docker/Docker",
+
+ // Only allow one slash.
+ "docker///docker",
+
+ // Disallow 64-character hexadecimal.
+ "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
+
+ // Disallow leading and trailing hyphens in namespace.
+ "-docker/docker",
+ "docker-/docker",
+ "-docker-/docker",
+
+ // Don't allow underscores everywhere (as opposed to hyphens).
+ "____/____",
+
+ "_docker/_docker",
+
+ // Disallow consecutive periods.
+ "dock..er/docker",
+ "dock_.er/docker",
+ "dock-.er/docker",
+
+ // No repository.
+ "docker/",
+
+ // namespace too long
+ "this_is_not_a_valid_namespace_because_its_length_is_greater_than_255_this_is_not_a_valid_namespace_because_its_length_is_greater_than_255_this_is_not_a_valid_namespace_because_its_length_is_greater_than_255_this_is_not_a_valid_namespace_because_its_length_is_greater_than_255/docker",
+ }
+ for _, repositoryName := range invalidRepositoryNames {
+ if _, err := ParseNormalizedNamed(repositoryName); err == nil {
+ t.Errorf("Repository name should be invalid: %v", repositoryName)
+ }
+ }
+}
+
+func TestParseRepositoryInfo(t *testing.T) {
+ type tcase struct {
+ RemoteName, FamiliarName, FullName, AmbiguousName, Domain string
+ }
+
+ tcases := []tcase{
+ {
+ RemoteName: "fooo/bar",
+ FamiliarName: "fooo/bar",
+ FullName: "docker.io/fooo/bar",
+ AmbiguousName: "index.docker.io/fooo/bar",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "library/ubuntu",
+ FamiliarName: "ubuntu",
+ FullName: "docker.io/library/ubuntu",
+ AmbiguousName: "library/ubuntu",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "nonlibrary/ubuntu",
+ FamiliarName: "nonlibrary/ubuntu",
+ FullName: "docker.io/nonlibrary/ubuntu",
+ AmbiguousName: "",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "other/library",
+ FamiliarName: "other/library",
+ FullName: "docker.io/other/library",
+ AmbiguousName: "",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "private/moonbase",
+ FamiliarName: "127.0.0.1:8000/private/moonbase",
+ FullName: "127.0.0.1:8000/private/moonbase",
+ AmbiguousName: "",
+ Domain: "127.0.0.1:8000",
+ },
+ {
+ RemoteName: "privatebase",
+ FamiliarName: "127.0.0.1:8000/privatebase",
+ FullName: "127.0.0.1:8000/privatebase",
+ AmbiguousName: "",
+ Domain: "127.0.0.1:8000",
+ },
+ {
+ RemoteName: "private/moonbase",
+ FamiliarName: "example.com/private/moonbase",
+ FullName: "example.com/private/moonbase",
+ AmbiguousName: "",
+ Domain: "example.com",
+ },
+ {
+ RemoteName: "privatebase",
+ FamiliarName: "example.com/privatebase",
+ FullName: "example.com/privatebase",
+ AmbiguousName: "",
+ Domain: "example.com",
+ },
+ {
+ RemoteName: "private/moonbase",
+ FamiliarName: "example.com:8000/private/moonbase",
+ FullName: "example.com:8000/private/moonbase",
+ AmbiguousName: "",
+ Domain: "example.com:8000",
+ },
+ {
+ RemoteName: "privatebasee",
+ FamiliarName: "example.com:8000/privatebasee",
+ FullName: "example.com:8000/privatebasee",
+ AmbiguousName: "",
+ Domain: "example.com:8000",
+ },
+ {
+ RemoteName: "library/ubuntu-12.04-base",
+ FamiliarName: "ubuntu-12.04-base",
+ FullName: "docker.io/library/ubuntu-12.04-base",
+ AmbiguousName: "index.docker.io/library/ubuntu-12.04-base",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "library/foo",
+ FamiliarName: "foo",
+ FullName: "docker.io/library/foo",
+ AmbiguousName: "docker.io/foo",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "library/foo/bar",
+ FamiliarName: "library/foo/bar",
+ FullName: "docker.io/library/foo/bar",
+ AmbiguousName: "",
+ Domain: "docker.io",
+ },
+ {
+ RemoteName: "store/foo/bar",
+ FamiliarName: "store/foo/bar",
+ FullName: "docker.io/store/foo/bar",
+ AmbiguousName: "",
+ Domain: "docker.io",
+ },
+ }
+
+ for _, tcase := range tcases {
+ refStrings := []string{tcase.FamiliarName, tcase.FullName}
+ if tcase.AmbiguousName != "" {
+ refStrings = append(refStrings, tcase.AmbiguousName)
+ }
+
+ var refs []Named
+ for _, r := range refStrings {
+ named, err := ParseNormalizedNamed(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ refs = append(refs, named)
+ }
+
+ for _, r := range refs {
+ if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual {
+ t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual)
+ }
+ if expected, actual := tcase.FullName, r.String(); expected != actual {
+ t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual)
+ }
+ if expected, actual := tcase.Domain, Domain(r); expected != actual {
+ t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual)
+ }
+ if expected, actual := tcase.RemoteName, Path(r); expected != actual {
+ t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual)
+ }
+ }
+ }
+}
+
+func TestParseReferenceWithTagAndDigest(t *testing.T) {
+ shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"
+ ref, err := ParseNormalizedNamed(shortRef)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected {
+ t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual)
+ }
+
+ if _, isTagged := ref.(NamedTagged); !isTagged {
+ t.Fatalf("Reference from %q should support tag", ref)
+ }
+ if _, isCanonical := ref.(Canonical); !isCanonical {
+ t.Fatalf("Reference from %q should support digest", ref)
+ }
+ if expected, actual := shortRef, FamiliarString(ref); actual != expected {
+ t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual)
+ }
+}
+
+func TestInvalidReferenceComponents(t *testing.T) {
+ if _, err := ParseNormalizedNamed("-foo"); err == nil {
+ t.Fatal("Expected WithName to detect invalid name")
+ }
+ ref, err := ParseNormalizedNamed("busybox")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := WithTag(ref, "-foo"); err == nil {
+ t.Fatal("Expected WithName to detect invalid tag")
+ }
+ if _, err := WithDigest(ref, digest.Digest("foo")); err == nil {
+ t.Fatal("Expected WithDigest to detect invalid digest")
+ }
+}
+
+func equalReference(r1, r2 Reference) bool {
+ switch v1 := r1.(type) {
+ case digestReference:
+ if v2, ok := r2.(digestReference); ok {
+ return v1 == v2
+ }
+ case repository:
+ if v2, ok := r2.(repository); ok {
+ return v1 == v2
+ }
+ case taggedReference:
+ if v2, ok := r2.(taggedReference); ok {
+ return v1 == v2
+ }
+ case canonicalReference:
+ if v2, ok := r2.(canonicalReference); ok {
+ return v1 == v2
+ }
+ case reference:
+ if v2, ok := r2.(reference); ok {
+ return v1 == v2
+ }
+ }
+ return false
+}
+
+func TestParseAnyReference(t *testing.T) {
+ tcases := []struct {
+ Reference string
+ Equivalent string
+ Expected Reference
+ }{
+ {
+ Reference: "redis",
+ Equivalent: "docker.io/library/redis",
+ },
+ {
+ Reference: "redis:latest",
+ Equivalent: "docker.io/library/redis:latest",
+ },
+ {
+ Reference: "docker.io/library/redis:latest",
+ Equivalent: "docker.io/library/redis:latest",
+ },
+ {
+ Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "dmcgowan/myapp",
+ Equivalent: "docker.io/dmcgowan/myapp",
+ },
+ {
+ Reference: "dmcgowan/myapp:latest",
+ Equivalent: "docker.io/dmcgowan/myapp:latest",
+ },
+ {
+ Reference: "docker.io/mcgowan/myapp:latest",
+ Equivalent: "docker.io/mcgowan/myapp:latest",
+ },
+ {
+ Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
+ Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
+ Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
+ },
+ {
+ Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
+ Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
+ },
+ }
+
+ for _, tcase := range tcases {
+ var ref Reference
+ var err error
+ ref, err = ParseAnyReference(tcase.Reference)
+ if err != nil {
+ t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err)
+ }
+ if ref.String() != tcase.Equivalent {
+ t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent)
+ }
+
+ expected := tcase.Expected
+ if expected == nil {
+ expected, err = Parse(tcase.Equivalent)
+ if err != nil {
+ t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err)
+ }
+ }
+ if !equalReference(ref, expected) {
+ t.Errorf("Unexpected reference %#v, expected %#v", ref, expected)
+ }
+ }
+}
+
+func TestNormalizedSplitHostname(t *testing.T) {
+ testcases := []struct {
+ input string
+ domain string
+ name string
+ }{
+ {
+ input: "test.com/foo",
+ domain: "test.com",
+ name: "foo",
+ },
+ {
+ input: "test_com/foo",
+ domain: "docker.io",
+ name: "test_com/foo",
+ },
+ {
+ input: "docker/migrator",
+ domain: "docker.io",
+ name: "docker/migrator",
+ },
+ {
+ input: "test.com:8080/foo",
+ domain: "test.com:8080",
+ name: "foo",
+ },
+ {
+ input: "test-com:8080/foo",
+ domain: "test-com:8080",
+ name: "foo",
+ },
+ {
+ input: "foo",
+ domain: "docker.io",
+ name: "library/foo",
+ },
+ {
+ input: "xn--n3h.com/foo",
+ domain: "xn--n3h.com",
+ name: "foo",
+ },
+ {
+ input: "xn--n3h.com:18080/foo",
+ domain: "xn--n3h.com:18080",
+ name: "foo",
+ },
+ {
+ input: "docker.io/foo",
+ domain: "docker.io",
+ name: "library/foo",
+ },
+ {
+ input: "docker.io/library/foo",
+ domain: "docker.io",
+ name: "library/foo",
+ },
+ {
+ input: "docker.io/library/foo/bar",
+ domain: "docker.io",
+ name: "library/foo/bar",
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ named, err := ParseNormalizedNamed(testcase.input)
+ if err != nil {
+ failf("error parsing name: %s", err)
+ }
+ domain, name := SplitHostname(named)
+ if domain != testcase.domain {
+ failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
+ }
+ if name != testcase.name {
+ failf("unexpected name: got %q, expected %q", name, testcase.name)
+ }
+ }
+}
+
+func TestMatchError(t *testing.T) {
+ named, err := ParseAnyReference("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = FamiliarMatch("[-x]", named)
+ if err == nil {
+ t.Fatalf("expected an error, got nothing")
+ }
+}
+
+func TestMatch(t *testing.T) {
+ matchCases := []struct {
+ reference string
+ pattern string
+ expected bool
+ }{
+ {
+ reference: "foo",
+ pattern: "foo/**/ba[rz]",
+ expected: false,
+ },
+ {
+ reference: "foo/any/bat",
+ pattern: "foo/**/ba[rz]",
+ expected: false,
+ },
+ {
+ reference: "foo/a/bar",
+ pattern: "foo/**/ba[rz]",
+ expected: true,
+ },
+ {
+ reference: "foo/b/baz",
+ pattern: "foo/**/ba[rz]",
+ expected: true,
+ },
+ {
+ reference: "foo/c/baz:tag",
+ pattern: "foo/**/ba[rz]",
+ expected: true,
+ },
+ {
+ reference: "foo/c/baz:tag",
+ pattern: "foo/*/baz:tag",
+ expected: true,
+ },
+ {
+ reference: "foo/c/baz:tag",
+ pattern: "foo/c/baz:tag",
+ expected: true,
+ },
+ {
+ reference: "example.com/foo/c/baz:tag",
+ pattern: "*/foo/c/baz",
+ expected: true,
+ },
+ {
+ reference: "example.com/foo/c/baz:tag",
+ pattern: "example.com/foo/c/baz",
+ expected: true,
+ },
+ }
+ for _, c := range matchCases {
+ named, err := ParseAnyReference(c.reference)
+ if err != nil {
+ t.Fatal(err)
+ }
+ actual, err := FamiliarMatch(c.pattern, named)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual != c.expected {
+ t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual)
+ }
+ }
+}
+
+func TestParseDockerRef(t *testing.T) {
+ testcases := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "nothing",
+ input: "busybox",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "tag only",
+ input: "busybox:latest",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "digest only",
+ input: "busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
+ expected: "docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
+ },
+ {
+ name: "path only",
+ input: "library/busybox",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "hostname only",
+ input: "docker.io/busybox",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "no tag",
+ input: "docker.io/library/busybox",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "no path",
+ input: "docker.io/busybox:latest",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "no hostname",
+ input: "library/busybox:latest",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "full reference with tag",
+ input: "docker.io/library/busybox:latest",
+ expected: "docker.io/library/busybox:latest",
+ },
+ {
+ name: "gcr reference without tag",
+ input: "gcr.io/library/busybox",
+ expected: "gcr.io/library/busybox:latest",
+ },
+ {
+ name: "both tag and digest",
+ input: "gcr.io/library/busybox:latest@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
+ expected: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
+ },
+ }
+ for _, test := range testcases {
+ t.Run(test.name, func(t *testing.T) {
+ normalized, err := ParseDockerRef(test.input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ output := normalized.String()
+ if output != test.expected {
+ t.Fatalf("expected %q to be parsed as %v, got %v", test.input, test.expected, output)
+ }
+ _, err = Parse(output)
+ if err != nil {
+ t.Fatalf("%q should be a valid reference, but got an error: %v", output, err)
+ }
+ })
+ }
+}
diff --git a/docker/reference/reference.go b/docker/reference/reference.go
new file mode 100644
index 0000000..6c5484c
--- /dev/null
+++ b/docker/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alphanumeric [separator alphanumeric]*
+// alphanumeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// Deprecated: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/docker/reference/reference_test.go b/docker/reference/reference_test.go
new file mode 100644
index 0000000..ce1a11d
--- /dev/null
+++ b/docker/reference/reference_test.go
@@ -0,0 +1,657 @@
+package reference
+
+import (
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/opencontainers/go-digest"
+)
+
+func TestReferenceParse(t *testing.T) {
+ // referenceTestcases is a unified set of testcases for
+ // testing the parsing of references
+ referenceTestcases := []struct {
+ // input is the repository name or name component testcase
+ input string
+ // err is the error expected from Parse, or nil
+ err error
+ // repository is the string representation for the reference
+ repository string
+ // domain is the domain expected in the reference
+ domain string
+ // tag is the tag for the reference
+ tag string
+ // digest is the digest for the reference (enforces digest reference)
+ digest string
+ }{
+ {
+ input: "test_com",
+ repository: "test_com",
+ },
+ {
+ input: "test.com:tag",
+ repository: "test.com",
+ tag: "tag",
+ },
+ {
+ input: "test.com:5000",
+ repository: "test.com",
+ tag: "5000",
+ },
+ {
+ input: "test.com/repo:tag",
+ domain: "test.com",
+ repository: "test.com/repo",
+ tag: "tag",
+ },
+ {
+ input: "test:5000/repo",
+ domain: "test:5000",
+ repository: "test:5000/repo",
+ },
+ {
+ input: "test:5000/repo:tag",
+ domain: "test:5000",
+ repository: "test:5000/repo",
+ tag: "tag",
+ },
+ {
+ input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ domain: "test:5000",
+ repository: "test:5000/repo",
+ digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ },
+ {
+ input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ domain: "test:5000",
+ repository: "test:5000/repo",
+ tag: "tag",
+ digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ },
+ {
+ input: "test:5000/repo",
+ domain: "test:5000",
+ repository: "test:5000/repo",
+ },
+ {
+ input: "",
+ err: ErrNameEmpty,
+ },
+ {
+ input: ":justtag",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: "repo@sha256:ffffffffffffffffffffffffffffffffff",
+ err: digest.ErrDigestInvalidLength,
+ },
+ {
+ input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ err: digest.ErrDigestUnsupported,
+ },
+ {
+ input: "Uppercase:tag",
+ err: ErrNameContainsUppercase,
+ },
+ // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes.
+ // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175
+ // {
+ // input: "Uppercase/lowercase:tag",
+ // err: ErrNameContainsUppercase,
+ // },
+ {
+ input: "test:5000/Uppercase/lowercase:tag",
+ err: ErrNameContainsUppercase,
+ },
+ {
+ input: "lowercase:Uppercase",
+ repository: "lowercase",
+ tag: "Uppercase",
+ },
+ {
+ input: strings.Repeat("a/", 128) + "a:tag",
+ err: ErrNameTooLong,
+ },
+ {
+ input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max",
+ domain: "a",
+ repository: strings.Repeat("a/", 127) + "a",
+ tag: "tag-puts-this-over-max",
+ },
+ {
+ input: "aa/asdf$$^/aa",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: "sub-dom1.foo.com/bar/baz/quux",
+ domain: "sub-dom1.foo.com",
+ repository: "sub-dom1.foo.com/bar/baz/quux",
+ },
+ {
+ input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag",
+ domain: "sub-dom1.foo.com",
+ repository: "sub-dom1.foo.com/bar/baz/quux",
+ tag: "some-long-tag",
+ },
+ {
+ input: "b.gcr.io/test.example.com/my-app:test.example.com",
+ domain: "b.gcr.io",
+ repository: "b.gcr.io/test.example.com/my-app",
+ tag: "test.example.com",
+ },
+ {
+ input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode
+ domain: "xn--n3h.com",
+ repository: "xn--n3h.com/myimage",
+ tag: "xn--n3h.com",
+ },
+ {
+ input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode
+ domain: "xn--7o8h.com",
+ repository: "xn--7o8h.com/myimage",
+ tag: "xn--7o8h.com",
+ digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ },
+ {
+ input: "foo_bar.com:8080",
+ repository: "foo_bar.com",
+ tag: "8080",
+ },
+ {
+ input: "foo/foo_bar.com:8080",
+ domain: "foo",
+ repository: "foo/foo_bar.com",
+ tag: "8080",
+ },
+ }
+ for _, testcase := range referenceTestcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ repo, err := Parse(testcase.input)
+ if testcase.err != nil {
+ if err == nil {
+ failf("missing expected error: %v", testcase.err)
+ } else if testcase.err != err {
+ failf("mismatched error: got %v, expected %v", err, testcase.err)
+ }
+ continue
+ } else if err != nil {
+ failf("unexpected parse error: %v", err)
+ continue
+ }
+ if repo.String() != testcase.input {
+ failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input)
+ }
+
+ if named, ok := repo.(Named); ok {
+ if named.Name() != testcase.repository {
+ failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository)
+ }
+ domain, _ := SplitHostname(named)
+ if domain != testcase.domain {
+ failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
+ }
+ } else if testcase.repository != "" || testcase.domain != "" {
+ failf("expected named type, got %T", repo)
+ }
+
+ tagged, ok := repo.(Tagged)
+ if testcase.tag != "" {
+ if ok {
+ if tagged.Tag() != testcase.tag {
+ failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
+ }
+ } else {
+ failf("expected tagged type, got %T", repo)
+ }
+ } else if ok {
+ failf("unexpected tagged type")
+ }
+
+ digested, ok := repo.(Digested)
+ if testcase.digest != "" {
+ if ok {
+ if digested.Digest().String() != testcase.digest {
+ failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
+ }
+ } else {
+ failf("expected digested type, got %T", repo)
+ }
+ } else if ok {
+ failf("unexpected digested type")
+ }
+ }
+}
+
+// TestWithNameFailure tests cases where WithName should fail. Cases where it
+// should succeed are covered by TestSplitHostname, below.
+func TestWithNameFailure(t *testing.T) {
+ testcases := []struct {
+ input string
+ err error
+ }{
+ {
+ input: "",
+ err: ErrNameEmpty,
+ },
+ {
+ input: ":justtag",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ err: ErrReferenceInvalidFormat,
+ },
+ {
+ input: strings.Repeat("a/", 128) + "a:tag",
+ err: ErrNameTooLong,
+ },
+ {
+ input: "aa/asdf$$^/aa",
+ err: ErrReferenceInvalidFormat,
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ _, err := WithName(testcase.input)
+ if err == nil {
+ failf("no error parsing name. expected: %s", testcase.err)
+ }
+ }
+}
+
+func TestSplitHostname(t *testing.T) {
+ testcases := []struct {
+ input string
+ domain string
+ name string
+ }{
+ {
+ input: "test.com/foo",
+ domain: "test.com",
+ name: "foo",
+ },
+ {
+ input: "test_com/foo",
+ domain: "",
+ name: "test_com/foo",
+ },
+ {
+ input: "test:8080/foo",
+ domain: "test:8080",
+ name: "foo",
+ },
+ {
+ input: "test.com:8080/foo",
+ domain: "test.com:8080",
+ name: "foo",
+ },
+ {
+ input: "test-com:8080/foo",
+ domain: "test-com:8080",
+ name: "foo",
+ },
+ {
+ input: "xn--n3h.com:18080/foo",
+ domain: "xn--n3h.com:18080",
+ name: "foo",
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ named, err := WithName(testcase.input)
+ if err != nil {
+ failf("error parsing name: %s", err)
+ }
+ domain, name := SplitHostname(named)
+ if domain != testcase.domain {
+ failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
+ }
+ if name != testcase.name {
+ failf("unexpected name: got %q, expected %q", name, testcase.name)
+ }
+ }
+}
+
+type serializationType struct {
+ Description string
+ Field Field
+}
+
+func TestSerialization(t *testing.T) {
+ testcases := []struct {
+ description string
+ input string
+ name string
+ tag string
+ digest string
+ err error
+ }{
+ {
+ description: "empty value",
+ err: ErrNameEmpty,
+ },
+ {
+ description: "just a name",
+ input: "example.com:8000/named",
+ name: "example.com:8000/named",
+ },
+ {
+ description: "name with a tag",
+ input: "example.com:8000/named:tagged",
+ name: "example.com:8000/named",
+ tag: "tagged",
+ },
+ {
+ description: "name with digest",
+ input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112",
+ name: "other.com/named",
+ digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112",
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ m := map[string]string{
+ "Description": testcase.description,
+ "Field": testcase.input,
+ }
+ b, err := json.Marshal(m)
+ if err != nil {
+ failf("error marshaling: %v", err)
+ }
+ t := serializationType{}
+
+ if err := json.Unmarshal(b, &t); err != nil {
+ if testcase.err == nil {
+ failf("error unmarshaling: %v", err)
+ }
+ if err != testcase.err {
+ failf("wrong error, expected %v, got %v", testcase.err, err)
+ }
+
+ continue
+ } else if testcase.err != nil {
+ failf("expected error unmarshaling: %v", testcase.err)
+ }
+
+ if t.Description != testcase.description {
+ failf("wrong description, expected %q, got %q", testcase.description, t.Description)
+ }
+
+ ref := t.Field.Reference()
+
+ if named, ok := ref.(Named); ok {
+ if named.Name() != testcase.name {
+ failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name)
+ }
+ } else if testcase.name != "" {
+ failf("expected named type, got %T", ref)
+ }
+
+ tagged, ok := ref.(Tagged)
+ if testcase.tag != "" {
+ if ok {
+ if tagged.Tag() != testcase.tag {
+ failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
+ }
+ } else {
+ failf("expected tagged type, got %T", ref)
+ }
+ } else if ok {
+ failf("unexpected tagged type")
+ }
+
+ digested, ok := ref.(Digested)
+ if testcase.digest != "" {
+ if ok {
+ if digested.Digest().String() != testcase.digest {
+ failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
+ }
+ } else {
+ failf("expected digested type, got %T", ref)
+ }
+ } else if ok {
+ failf("unexpected digested type")
+ }
+
+ t = serializationType{
+ Description: testcase.description,
+ Field: AsField(ref),
+ }
+
+ b2, err := json.Marshal(t)
+ if err != nil {
+ failf("error marshaling serialization type: %v", err)
+ }
+
+ if string(b) != string(b2) {
+ failf("unexpected serialized value: expected %q, got %q", string(b), string(b2))
+ }
+
+ // Ensure t.Field is not implementing "Reference" directly, getting
+ // around the Reference type system
+ var fieldInterface interface{} = t.Field
+ if _, ok := fieldInterface.(Reference); ok {
+ failf("field should not implement Reference interface")
+ }
+ }
+}
+
+func TestWithTag(t *testing.T) {
+ testcases := []struct {
+ name string
+ digest digest.Digest
+ tag string
+ combined string
+ }{
+ {
+ name: "test.com/foo",
+ tag: "tag",
+ combined: "test.com/foo:tag",
+ },
+ {
+ name: "foo",
+ tag: "tag2",
+ combined: "foo:tag2",
+ },
+ {
+ name: "test.com:8000/foo",
+ tag: "tag4",
+ combined: "test.com:8000/foo:tag4",
+ },
+ {
+ name: "test.com:8000/foo",
+ tag: "TAG5",
+ combined: "test.com:8000/foo:TAG5",
+ },
+ {
+ name: "test.com:8000/foo",
+ digest: "sha256:1234567890098765432112345667890098765",
+ tag: "TAG5",
+ combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765",
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
+ t.Fail()
+ }
+
+ named, err := WithName(testcase.name)
+ if err != nil {
+ failf("error parsing name: %s", err)
+ }
+ if testcase.digest != "" {
+ canonical, err := WithDigest(named, testcase.digest)
+ if err != nil {
+ failf("error adding digest")
+ }
+ named = canonical
+ }
+
+ tagged, err := WithTag(named, testcase.tag)
+ if err != nil {
+ failf("WithTag failed: %s", err)
+ }
+ if tagged.String() != testcase.combined {
+ failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined)
+ }
+ }
+}
+
+func TestWithDigest(t *testing.T) {
+ testcases := []struct {
+ name string
+ digest digest.Digest
+ tag string
+ combined string
+ }{
+ {
+ name: "test.com/foo",
+ digest: "sha256:1234567890098765432112345667890098765",
+ combined: "test.com/foo@sha256:1234567890098765432112345667890098765",
+ },
+ {
+ name: "foo",
+ digest: "sha256:1234567890098765432112345667890098765",
+ combined: "foo@sha256:1234567890098765432112345667890098765",
+ },
+ {
+ name: "test.com:8000/foo",
+ digest: "sha256:1234567890098765432112345667890098765",
+ combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765",
+ },
+ {
+ name: "test.com:8000/foo",
+ digest: "sha256:1234567890098765432112345667890098765",
+ tag: "latest",
+ combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765",
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
+ t.Fail()
+ }
+
+ named, err := WithName(testcase.name)
+ if err != nil {
+ failf("error parsing name: %s", err)
+ }
+ if testcase.tag != "" {
+ tagged, err := WithTag(named, testcase.tag)
+ if err != nil {
+ failf("error adding tag")
+ }
+ named = tagged
+ }
+ digested, err := WithDigest(named, testcase.digest)
+ if err != nil {
+ failf("WithDigest failed: %s", err)
+ }
+ if digested.String() != testcase.combined {
+ failf("unexpected: got %q, expected %q", digested.String(), testcase.combined)
+ }
+ }
+}
+
+func TestParseNamed(t *testing.T) {
+ testcases := []struct {
+ input string
+ domain string
+ name string
+ err error
+ }{
+ {
+ input: "test.com/foo",
+ domain: "test.com",
+ name: "foo",
+ },
+ {
+ input: "test:8080/foo",
+ domain: "test:8080",
+ name: "foo",
+ },
+ {
+ input: "test_com/foo",
+ err: ErrNameNotCanonical,
+ },
+ {
+ input: "test.com",
+ err: ErrNameNotCanonical,
+ },
+ {
+ input: "foo",
+ err: ErrNameNotCanonical,
+ },
+ {
+ input: "library/foo",
+ err: ErrNameNotCanonical,
+ },
+ {
+ input: "docker.io/library/foo",
+ domain: "docker.io",
+ name: "library/foo",
+ },
+ // Ambiguous case, parser will add "library/" to foo
+ {
+ input: "docker.io/foo",
+ err: ErrNameNotCanonical,
+ },
+ }
+ for _, testcase := range testcases {
+ failf := func(format string, v ...interface{}) {
+ t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
+ t.Fail()
+ }
+
+ named, err := ParseNamed(testcase.input)
+ if err != nil && testcase.err == nil {
+ failf("error parsing name: %s", err)
+ continue
+ } else if err == nil && testcase.err != nil {
+ failf("parsing succeeded: expected error %v", testcase.err)
+ continue
+ } else if err != testcase.err {
+ failf("unexpected error %v, expected %v", err, testcase.err)
+ continue
+ } else if err != nil {
+ continue
+ }
+
+ domain, name := SplitHostname(named)
+ if domain != testcase.domain {
+ failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
+ }
+ if name != testcase.name {
+ failf("unexpected name: got %q, expected %q", name, testcase.name)
+ }
+ }
+}
diff --git a/docker/reference/regexp-additions.go b/docker/reference/regexp-additions.go
new file mode 100644
index 0000000..7b15871
--- /dev/null
+++ b/docker/reference/regexp-additions.go
@@ -0,0 +1,6 @@
+package reference
+
+// Return true if the specified string fully matches `IdentifierRegexp`.
+func IsFullIdentifier(s string) bool {
+ return anchoredIdentifierRegexp.MatchString(s)
+}
diff --git a/docker/reference/regexp.go b/docker/reference/regexp.go
new file mode 100644
index 0000000..76ba5c2
--- /dev/null
+++ b/docker/reference/regexp.go
@@ -0,0 +1,156 @@
+package reference
+
+import (
+ "regexp"
+ "strings"
+
+ storageRegexp "github.com/containers/storage/pkg/regexp"
+)
+
+const (
+ // alphaNumeric defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumeric = `[a-z0-9]+`
+
+ // separator defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes. Repeated dashes and underscores are intentionally treated
+ // differently. In order to support valid hostnames as name components,
+ // supporting repeated dash was added. Additionally double underscore is
+ // now allowed as a separator to loosen the restriction for previously
+ // supported names.
+ separator = `(?:[._]|__|[-]*)`
+
+ // repository name to start with a component as defined by DomainRegexp
+ // and followed by an optional port.
+ domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
+
+ // The string counterpart for TagRegexp.
+ tag = `[\w][\w.-]{0,127}`
+
+ // The string counterpart for DigestRegexp.
+ digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
+
+ // The string counterpart for IdentifierRegexp.
+ identifier = `([a-f0-9]{64})`
+
+ // The string counterpart for ShortIdentifierRegexp.
+ shortIdentifier = `([a-f0-9]{6,64})`
+)
+
+var (
+ // nameComponent restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponent = expression(
+ alphaNumeric,
+ optional(repeated(separator, alphaNumeric)))
+
+ domain = expression(
+ domainComponent,
+ optional(repeated(literal(`.`), domainComponent)),
+ optional(literal(`:`), `[0-9]+`))
+ // DomainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ DomainRegexp = re(domain)
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = re(tag)
+
+ anchoredTag = anchored(tag)
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = storageRegexp.Delayed(anchoredTag)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = re(digestPat)
+
+ anchoredDigest = anchored(digestPat)
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest)
+
+ namePat = expression(
+ optional(domain, literal(`/`)),
+ nameComponent,
+ optional(repeated(literal(`/`), nameComponent)))
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = re(namePat)
+
+ anchoredName = anchored(
+ optional(capture(domain), literal(`/`)),
+ capture(nameComponent,
+ optional(repeated(literal(`/`), nameComponent))))
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = storageRegexp.Delayed(anchoredName)
+
+ referencePat = anchored(capture(namePat),
+ optional(literal(":"), capture(tag)),
+ optional(literal("@"), capture(digestPat)))
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = re(referencePat)
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = re(identifier)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = re(shortIdentifier)
+
+ anchoredIdentifier = anchored(identifier)
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier)
+)
+
+// re compiles the string to a regular expression.
+var re = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) string {
+ return regexp.QuoteMeta(s)
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...string) string {
+ return strings.Join(res, "")
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...string) string {
+ return group(expression(res...)) + `?`
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...string) string {
+ return group(expression(res...)) + `+`
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...string) string {
+ return `(?:` + expression(res...) + `)`
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...string) string {
+ return `(` + expression(res...) + `)`
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...string) string {
+ return `^` + expression(res...) + `$`
+}
diff --git a/docker/reference/regexp_test.go b/docker/reference/regexp_test.go
new file mode 100644
index 0000000..289e559
--- /dev/null
+++ b/docker/reference/regexp_test.go
@@ -0,0 +1,525 @@
+package reference
+
+import (
+ "regexp"
+ "strings"
+ "testing"
+)
+
+type regexpMatch struct {
+ input string
+ match bool
+ subs []string
+}
+type Regex interface {
+ FindStringSubmatch(s string) []string
+ NumSubexp() int
+}
+
+func checkRegexp(t *testing.T, r Regex, m regexpMatch) {
+ matches := r.FindStringSubmatch(m.input)
+ if m.match && matches != nil {
+ if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input {
+ t.Fatalf("Bad match result %#v for %q", matches, m.input)
+ }
+ if len(matches) < (len(m.subs) + 1) {
+ t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input)
+ }
+ for i := range m.subs {
+ if m.subs[i] != matches[i+1] {
+ t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input)
+ }
+ }
+ } else if m.match {
+ t.Errorf("Expected match for %q", m.input)
+ } else if matches != nil {
+ t.Errorf("Unexpected match for %q", m.input)
+ }
+}
+
+func TestDomainRegexp(t *testing.T) {
+ hostcases := []regexpMatch{
+ {
+ input: "test.com",
+ match: true,
+ },
+ {
+ input: "test.com:10304",
+ match: true,
+ },
+ {
+ input: "test.com:http",
+ match: false,
+ },
+ {
+ input: "localhost",
+ match: true,
+ },
+ {
+ input: "localhost:8080",
+ match: true,
+ },
+ {
+ input: "a",
+ match: true,
+ },
+ {
+ input: "a.b",
+ match: true,
+ },
+ {
+ input: "ab.cd.com",
+ match: true,
+ },
+ {
+ input: "a-b.com",
+ match: true,
+ },
+ {
+ input: "-ab.com",
+ match: false,
+ },
+ {
+ input: "ab-.com",
+ match: false,
+ },
+ {
+ input: "ab.c-om",
+ match: true,
+ },
+ {
+ input: "ab.-com",
+ match: false,
+ },
+ {
+ input: "ab.com-",
+ match: false,
+ },
+ {
+ input: "0101.com",
+ match: true, // TODO(dmcgowan): valid if this should be allowed
+ },
+ {
+ input: "001a.com",
+ match: true,
+ },
+ {
+ input: "b.gbc.io:443",
+ match: true,
+ },
+ {
+ input: "b.gbc.io",
+ match: true,
+ },
+ {
+ input: "xn--n3h.com", // ☃.com in punycode
+ match: true,
+ },
+ {
+ input: "Asdf.com", // uppercase character
+ match: true,
+ },
+ }
+ r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`)
+ for i := range hostcases {
+ checkRegexp(t, r, hostcases[i])
+ }
+}
+
+func TestFullNameRegexp(t *testing.T) {
+ if anchoredNameRegexp.NumSubexp() != 2 {
+ t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2",
+ anchoredNameRegexp.String(), anchoredNameRegexp.NumSubexp())
+ }
+
+ testcases := []regexpMatch{
+ {
+ input: "",
+ match: false,
+ },
+ {
+ input: "short",
+ match: true,
+ subs: []string{"", "short"},
+ },
+ {
+ input: "simple/name",
+ match: true,
+ subs: []string{"simple", "name"},
+ },
+ {
+ input: "library/ubuntu",
+ match: true,
+ subs: []string{"library", "ubuntu"},
+ },
+ {
+ input: "docker/stevvooe/app",
+ match: true,
+ subs: []string{"docker", "stevvooe/app"},
+ },
+ {
+ input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb",
+ match: true,
+ subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"},
+ },
+ {
+ input: "aa/aa/bb/bb/bb",
+ match: true,
+ subs: []string{"aa", "aa/bb/bb/bb"},
+ },
+ {
+ input: "a/a/a/a",
+ match: true,
+ subs: []string{"a", "a/a/a"},
+ },
+ {
+ input: "a/a/a/a/",
+ match: false,
+ },
+ {
+ input: "a//a/a",
+ match: false,
+ },
+ {
+ input: "a",
+ match: true,
+ subs: []string{"", "a"},
+ },
+ {
+ input: "a/aa",
+ match: true,
+ subs: []string{"a", "aa"},
+ },
+ {
+ input: "a/aa/a",
+ match: true,
+ subs: []string{"a", "aa/a"},
+ },
+ {
+ input: "foo.com",
+ match: true,
+ subs: []string{"", "foo.com"},
+ },
+ {
+ input: "foo.com/",
+ match: false,
+ },
+ {
+ input: "foo.com:8080/bar",
+ match: true,
+ subs: []string{"foo.com:8080", "bar"},
+ },
+ {
+ input: "foo.com:http/bar",
+ match: false,
+ },
+ {
+ input: "foo.com/bar",
+ match: true,
+ subs: []string{"foo.com", "bar"},
+ },
+ {
+ input: "foo.com/bar/baz",
+ match: true,
+ subs: []string{"foo.com", "bar/baz"},
+ },
+ {
+ input: "localhost:8080/bar",
+ match: true,
+ subs: []string{"localhost:8080", "bar"},
+ },
+ {
+ input: "sub-dom1.foo.com/bar/baz/quux",
+ match: true,
+ subs: []string{"sub-dom1.foo.com", "bar/baz/quux"},
+ },
+ {
+ input: "blog.foo.com/bar/baz",
+ match: true,
+ subs: []string{"blog.foo.com", "bar/baz"},
+ },
+ {
+ input: "a^a",
+ match: false,
+ },
+ {
+ input: "aa/asdf$$^/aa",
+ match: false,
+ },
+ {
+ input: "asdf$$^/aa",
+ match: false,
+ },
+ {
+ input: "aa-a/a",
+ match: true,
+ subs: []string{"aa-a", "a"},
+ },
+ {
+ input: strings.Repeat("a/", 128) + "a",
+ match: true,
+ subs: []string{"a", strings.Repeat("a/", 127) + "a"},
+ },
+ {
+ input: "a-/a/a/a",
+ match: false,
+ },
+ {
+ input: "foo.com/a-/a/a",
+ match: false,
+ },
+ {
+ input: "-foo/bar",
+ match: false,
+ },
+ {
+ input: "foo/bar-",
+ match: false,
+ },
+ {
+ input: "foo-/bar",
+ match: false,
+ },
+ {
+ input: "foo/-bar",
+ match: false,
+ },
+ {
+ input: "_foo/bar",
+ match: false,
+ },
+ {
+ input: "foo_bar",
+ match: true,
+ subs: []string{"", "foo_bar"},
+ },
+ {
+ input: "foo_bar.com",
+ match: true,
+ subs: []string{"", "foo_bar.com"},
+ },
+ {
+ input: "foo_bar.com:8080",
+ match: false,
+ },
+ {
+ input: "foo_bar.com:8080/app",
+ match: false,
+ },
+ {
+ input: "foo.com/foo_bar",
+ match: true,
+ subs: []string{"foo.com", "foo_bar"},
+ },
+ {
+ input: "____/____",
+ match: false,
+ },
+ {
+ input: "_docker/_docker",
+ match: false,
+ },
+ {
+ input: "docker_/docker_",
+ match: false,
+ },
+ {
+ input: "b.gcr.io/test.example.com/my-app",
+ match: true,
+ subs: []string{"b.gcr.io", "test.example.com/my-app"},
+ },
+ {
+ input: "xn--n3h.com/myimage", // ☃.com in punycode
+ match: true,
+ subs: []string{"xn--n3h.com", "myimage"},
+ },
+ {
+ input: "xn--7o8h.com/myimage", // 🐳.com in punycode
+ match: true,
+ subs: []string{"xn--7o8h.com", "myimage"},
+ },
+ {
+ input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode
+ match: true,
+ subs: []string{"example.com", "xn--7o8h.com/myimage"},
+ },
+ {
+ input: "example.com/some_separator__underscore/myimage",
+ match: true,
+ subs: []string{"example.com", "some_separator__underscore/myimage"},
+ },
+ {
+ input: "example.com/__underscore/myimage",
+ match: false,
+ },
+ {
+ input: "example.com/..dots/myimage",
+ match: false,
+ },
+ {
+ input: "example.com/.dots/myimage",
+ match: false,
+ },
+ {
+ input: "example.com/nodouble..dots/myimage",
+ match: false,
+ },
+ {
+ input: "example.com/nodouble..dots/myimage",
+ match: false,
+ },
+ {
+ input: "docker./docker",
+ match: false,
+ },
+ {
+ input: ".docker/docker",
+ match: false,
+ },
+ {
+ input: "docker-/docker",
+ match: false,
+ },
+ {
+ input: "-docker/docker",
+ match: false,
+ },
+ {
+ input: "do..cker/docker",
+ match: false,
+ },
+ {
+ input: "do__cker:8080/docker",
+ match: false,
+ },
+ {
+ input: "do__cker/docker",
+ match: true,
+ subs: []string{"", "do__cker/docker"},
+ },
+ {
+ input: "b.gcr.io/test.example.com/my-app",
+ match: true,
+ subs: []string{"b.gcr.io", "test.example.com/my-app"},
+ },
+ {
+ input: "registry.io/foo/project--id.module--name.ver---sion--name",
+ match: true,
+ subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"},
+ },
+ {
+ input: "Asdf.com/foo/bar", // uppercase character in hostname
+ match: true,
+ },
+ {
+ input: "Foo/FarB", // uppercase characters in remote name
+ match: false,
+ },
+ }
+ for i := range testcases {
+ checkRegexp(t, anchoredNameRegexp, testcases[i])
+ }
+}
+
+func TestReferenceRegexp(t *testing.T) {
+ if ReferenceRegexp.NumSubexp() != 3 {
+ t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3",
+ ReferenceRegexp, ReferenceRegexp.NumSubexp())
+ }
+
+ testcases := []regexpMatch{
+ {
+ input: "registry.com:8080/myapp:tag",
+ match: true,
+ subs: []string{"registry.com:8080/myapp", "tag", ""},
+ },
+ {
+ input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: true,
+ subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
+ },
+ {
+ input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: true,
+ subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
+ },
+ {
+ input: "registry.com:8080/myapp@sha256:badbadbadbad",
+ match: false,
+ },
+ {
+ input: "registry.com:8080/myapp:invalid~tag",
+ match: false,
+ },
+ {
+ input: "bad_hostname.com:8080/myapp:tag",
+ match: false,
+ },
+ {
+ input:// localhost treated as name, missing tag with 8080 as tag
+ "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: true,
+ subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
+ },
+ {
+ input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: true,
+ subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
+ },
+ {
+ input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: false,
+ },
+ {
+ // localhost will be treated as an image name without a host
+ input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
+ match: true,
+ subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
+ },
+ {
+ input: "registry.com:8080/myapp@bad",
+ match: false,
+ },
+ {
+ input: "registry.com:8080/myapp@2bad",
+ match: false, // TODO(dmcgowan): Support this as valid
+ },
+ }
+
+ for i := range testcases {
+ checkRegexp(t, ReferenceRegexp, testcases[i])
+ }
+
+}
+
+func TestIdentifierRegexp(t *testing.T) {
+ fullCases := []regexpMatch{
+ {
+ input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
+ match: true,
+ },
+ {
+ input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C",
+ match: false,
+ },
+ {
+ input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf",
+ match: false,
+ },
+ {
+ input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
+ match: false,
+ },
+ {
+ input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482",
+ match: false,
+ },
+ }
+
+ for i := range fullCases {
+ checkRegexp(t, anchoredIdentifierRegexp, fullCases[i])
+ if IsFullIdentifier(fullCases[i].input) != fullCases[i].match {
+ t.Errorf("Expected match for %q to be %v", fullCases[i].input, fullCases[i].match)
+ }
+ }
+}
diff --git a/docker/registries_d.go b/docker/registries_d.go
new file mode 100644
index 0000000..c7b884a
--- /dev/null
+++ b/docker/registries_d.go
@@ -0,0 +1,293 @@
+package docker
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/rootless"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "gopkg.in/yaml.v3"
+)
+
+// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path'
+var systemRegistriesDirPath = builtinRegistriesDirPath
+
+// builtinRegistriesDirPath is the path to registries.d.
+// DO NOT change this, instead see systemRegistriesDirPath above.
+const builtinRegistriesDirPath = etcDir + "/containers/registries.d"
+
+// userRegistriesDirPath is the path to the per user registries.d.
+var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
+
+// defaultUserDockerDir is the default lookaside directory for unprivileged user
+var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
+
+// defaultDockerDir is the default lookaside directory for root
+var defaultDockerDir = "/var/lib/containers/sigstore"
+
+// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
+// NOTE: Keep this in sync with docs/registries.d.md!
+type registryConfiguration struct {
+ DefaultDocker *registryNamespace `yaml:"default-docker"`
+ // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
+ Docker map[string]registryNamespace `yaml:"docker"`
+}
+
+// registryNamespace defines lookaside locations for a single namespace.
+type registryNamespace struct {
+ Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
+ LookasideStaging string `yaml:"lookaside-staging"` // For writing only.
+ SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
+ SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
+ UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"`
+}
+
+// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below.
+type lookasideStorageBase *url.URL
+
+// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+// Warning: This function only exposes configuration in registries.d;
+// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
+func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
+ dr, ok := ref.(dockerReference)
+ if !ok {
+ return nil, errors.New("ref must be a dockerReference")
+ }
+ config, err := loadRegistryConfiguration(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ return config.lookasideStorageBaseURL(dr, write)
+}
+
+// loadRegistryConfiguration returns a registryConfiguration appropriate for sys.
+func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) {
+ dirPath := registriesDirPath(sys)
+ logrus.Debugf(`Using registries.d directory %s`, dirPath)
+ return loadAndMergeConfig(dirPath)
+}
+
+// registriesDirPath returns a path to registries.d
+func registriesDirPath(sys *types.SystemContext) string {
+ return registriesDirPathWithHomeDir(sys, homedir.Get())
+}
+
+// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
+// it exists only to allow testing it with an artificial home directory.
+func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
+ if sys != nil && sys.RegistriesDirPath != "" {
+ return sys.RegistriesDirPath
+ }
+ userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
+ if _, err := os.Stat(userRegistriesDirPath); err == nil {
+ return userRegistriesDirPath
+ }
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+
+ return systemRegistriesDirPath
+}
+
+// loadAndMergeConfig loads configuration files in dirPath
+// FIXME: Probably rename to loadRegistryConfigurationForPath
+func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
+ mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
+ dockerDefaultMergedFrom := ""
+ nsMergedFrom := map[string]string{}
+
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &mergedConfig, nil
+ }
+ return nil, err
+ }
+ configNames, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ for _, configName := range configNames {
+ if !strings.HasSuffix(configName, ".yaml") {
+ continue
+ }
+ configPath := filepath.Join(dirPath, configName)
+ configBytes, err := os.ReadFile(configPath)
+ if err != nil {
+ return nil, err
+ }
+
+ var config registryConfiguration
+ err = yaml.Unmarshal(configBytes, &config)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %w", configPath, err)
+ }
+
+ if config.DefaultDocker != nil {
+ if mergedConfig.DefaultDocker != nil {
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ dockerDefaultMergedFrom, configPath)
+ }
+ mergedConfig.DefaultDocker = config.DefaultDocker
+ dockerDefaultMergedFrom = configPath
+ }
+
+ for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
+ if _, ok := mergedConfig.Docker[nsName]; ok {
+ return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ nsName, nsMergedFrom[nsName], configPath)
+ }
+ mergedConfig.Docker[nsName] = nsConfig
+ nsMergedFrom[nsName] = configPath
+ }
+ }
+
+ return &mergedConfig, nil
+}
+
+// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”.
+// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
+func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) {
+ topLevel := config.signatureTopLevel(dr, write)
+ var baseURL *url.URL
+ if topLevel != "" {
+ u, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
+ }
+ baseURL = u
+ } else {
+ // returns default directory if no lookaside specified in configuration file
+ baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
+ logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted())
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
+ }
+ baseURL.Path = baseURL.Path + "/" + repo
+ return baseURL, nil
+}
+
+// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid
+func builtinDefaultLookasideStorageDir(euid int) *url.URL {
+ if euid != 0 {
+ return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
+ }
+ return &url.URL{Scheme: "file", Path: defaultDockerDir}
+}
+
+// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
+// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
+func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity)
+ if ret := ns.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name)
+ if ret := ns.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`)
+ if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" {
+ return ret
+ }
+ }
+ return ""
+}
+
+// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments.
+// for ref.
+func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name)
+ if ns.UseSigstoreAttachments != nil {
+ return *ns.UseSigstoreAttachments
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`)
+ if config.DefaultDocker.UseSigstoreAttachments != nil {
+ return *config.DefaultDocker.UseSigstoreAttachments
+ }
+ }
+ return false
+}
+
+// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
+// or "" if nothing has been configured.
+func (ns registryNamespace) signatureTopLevel(write bool) string {
+ if write {
+ if ns.LookasideStaging != "" {
+ logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging)
+ return ns.LookasideStaging
+ }
+ if ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ }
+ if ns.Lookaside != "" {
+ logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside)
+ return ns.Lookaside
+ }
+ if ns.SigStore != "" {
+ logrus.Debugf(` Using "sigstore" %s`, ns.SigStore)
+ return ns.SigStore
+ }
+ return ""
+}
+
+// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
+// base is not nil from the caller
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
+ sigURL := *base
+ sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
+ return &sigURL
+}
diff --git a/docker/registries_d_test.go b/docker/registries_d_test.go
new file mode 100644
index 0000000..8ff5058
--- /dev/null
+++ b/docker/registries_d_test.go
@@ -0,0 +1,336 @@
+package docker
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func dockerRefFromString(t *testing.T, s string) dockerReference {
+ ref, err := ParseReference(s)
+ require.NoError(t, err, s)
+ dockerRef, ok := ref.(dockerReference)
+ require.True(t, ok, s)
+ return dockerRef
+}
+
+func TestSignatureStorageBaseURL(t *testing.T) {
+ emptyDir := t.TempDir()
+ for _, c := range []struct {
+ dir, ref string
+ expected string // Or "" to expect failure
+ }{
+ { // Error reading configuration directory (/dev/null is not a directory)
+ "/dev/null", "//busybox",
+ "",
+ },
+ { // No match found: expect default user storage base
+ emptyDir, "//this/is/not/in/the:configuration",
+ "file://" + filepath.Join(os.Getenv("HOME"), defaultUserDockerDir, "//this/is/not/in/the"),
+ },
+ { // Invalid URL
+ "fixtures/registries.d", "//localhost/invalid/url/test",
+ "",
+ },
+ // URLs without a scheme: This will be rejected by consumers, so we don't really care about
+ // the returned value, but it should not crash at the very least.
+ { // Absolute path
+ "fixtures/registries.d", "//localhost/file/path/test",
+ "/no/scheme/just/a/path/file/path/test",
+ },
+ { // Relative path
+ "fixtures/registries.d", "//localhost/relative/path/test",
+ "no/scheme/relative/path/relative/path/test",
+ },
+ { // Success
+ "fixtures/registries.d", "//example.com/my/project",
+ "https://lookaside.example.com/my/project",
+ },
+ } {
+ base, err := SignatureStorageBaseURL(&types.SystemContext{RegistriesDirPath: c.dir},
+ dockerRefFromString(t, c.ref), false)
+ if c.expected != "" {
+ require.NoError(t, err, c.ref)
+ require.NotNil(t, base, c.ref)
+ assert.Equal(t, c.expected, base.String(), c.ref)
+ } else {
+ assert.Error(t, err, c.ref)
+ }
+ }
+}
+
+func TestRegistriesDirPath(t *testing.T) {
+ const nondefaultPath = "/this/is/not/the/default/registries.d"
+ const variableReference = "$HOME"
+ const rootPrefix = "/root/prefix"
+ tempHome := t.TempDir()
+ var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
+ userRegistriesDirPath := filepath.Join(tempHome, userRegistriesDir)
+ for _, c := range []struct {
+ sys *types.SystemContext
+ userFilePresent bool
+ expected string
+ }{
+ // The common case
+ {nil, false, systemRegistriesDirPath},
+ // There is a context, but it does not override the path.
+ {&types.SystemContext{}, false, systemRegistriesDirPath},
+ // Path overridden
+ {&types.SystemContext{RegistriesDirPath: nondefaultPath}, false, nondefaultPath},
+ // Root overridden
+ {
+ &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix},
+ false,
+ filepath.Join(rootPrefix, systemRegistriesDirPath),
+ },
+ // Root and path overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ RegistriesDirPath: nondefaultPath,
+ },
+ false,
+ nondefaultPath,
+ },
+ // User registries.d present, not overridden
+ {&types.SystemContext{}, true, userRegistriesDirPath},
+ // Context and user User registries.d preset simultaneously
+ {&types.SystemContext{RegistriesDirPath: nondefaultPath}, true, nondefaultPath},
+ // Root and user registries.d overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ RegistriesDirPath: nondefaultPath,
+ },
+ true,
+ nondefaultPath,
+ },
+ // No environment expansion happens in the overridden paths
+ {&types.SystemContext{RegistriesDirPath: variableReference}, false, variableReference},
+ } {
+ if c.userFilePresent {
+ err := os.MkdirAll(userRegistriesDirPath, 0700)
+ require.NoError(t, err)
+ } else {
+ err := os.RemoveAll(userRegistriesDirPath)
+ require.NoError(t, err)
+ }
+ path := registriesDirPathWithHomeDir(c.sys, tempHome)
+ assert.Equal(t, c.expected, path)
+ }
+}
+
+func TestLoadAndMergeConfig(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // No registries.d exists
+ config, err := loadAndMergeConfig(filepath.Join(tmpDir, "thisdoesnotexist"))
+ require.NoError(t, err)
+ assert.Equal(t, &registryConfiguration{Docker: map[string]registryNamespace{}}, config)
+
+ // Empty registries.d directory
+ emptyDir := filepath.Join(tmpDir, "empty")
+ err = os.Mkdir(emptyDir, 0755)
+ require.NoError(t, err)
+ config, err = loadAndMergeConfig(emptyDir)
+ require.NoError(t, err)
+ assert.Equal(t, &registryConfiguration{Docker: map[string]registryNamespace{}}, config)
+
+ // Unreadable registries.d directory
+ unreadableDir := filepath.Join(tmpDir, "unreadable")
+ err = os.Mkdir(unreadableDir, 0000)
+ require.NoError(t, err)
+ _, err = loadAndMergeConfig(unreadableDir)
+ assert.Error(t, err)
+
+ // An unreadable file in a registries.d directory
+ unreadableFileDir := filepath.Join(tmpDir, "unreadableFile")
+ err = os.Mkdir(unreadableFileDir, 0755)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(unreadableFileDir, "0.yaml"), []byte("{}"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(unreadableFileDir, "1.yaml"), nil, 0000)
+ require.NoError(t, err)
+ _, err = loadAndMergeConfig(unreadableFileDir)
+ assert.Error(t, err)
+
+ // Invalid YAML
+ invalidYAMLDir := filepath.Join(tmpDir, "invalidYAML")
+ err = os.Mkdir(invalidYAMLDir, 0755)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(invalidYAMLDir, "0.yaml"), []byte("}"), 0644)
+ require.NoError(t, err)
+ _, err = loadAndMergeConfig(invalidYAMLDir)
+ assert.Error(t, err)
+
+ // Duplicate DefaultDocker
+ duplicateDefault := filepath.Join(tmpDir, "duplicateDefault")
+ err = os.Mkdir(duplicateDefault, 0755)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(duplicateDefault, "0.yaml"),
+ []byte("default-docker:\n lookaside: file:////tmp/something"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(duplicateDefault, "1.yaml"),
+ []byte("default-docker:\n lookaside: file:////tmp/different"), 0644)
+ require.NoError(t, err)
+ _, err = loadAndMergeConfig(duplicateDefault)
+ assert.ErrorContains(t, err, "0.yaml")
+ assert.ErrorContains(t, err, "1.yaml")
+
+ // Duplicate DefaultDocker
+ duplicateNS := filepath.Join(tmpDir, "duplicateNS")
+ err = os.Mkdir(duplicateNS, 0755)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(duplicateNS, "0.yaml"),
+ []byte("docker:\n example.com:\n lookaside: file:////tmp/something"), 0644)
+ require.NoError(t, err)
+ err = os.WriteFile(filepath.Join(duplicateNS, "1.yaml"),
+ []byte("docker:\n example.com:\n lookaside: file:////tmp/different"), 0644)
+ require.NoError(t, err)
+ _, err = loadAndMergeConfig(duplicateNS)
+ assert.ErrorContains(t, err, "0.yaml")
+ assert.ErrorContains(t, err, "1.yaml")
+
+ // A fully worked example, including an empty-dictionary file and a non-.yaml file
+ config, err = loadAndMergeConfig("fixtures/registries.d")
+ require.NoError(t, err)
+ assert.Equal(t, &registryConfiguration{
+ DefaultDocker: &registryNamespace{Lookaside: "file:///mnt/companywide/signatures/for/other/repositories"},
+ Docker: map[string]registryNamespace{
+ "example.com": {Lookaside: "https://lookaside.example.com"},
+ "registry.test.example.com": {Lookaside: "http://registry.test.example.com/lookaside"},
+ "registry.test.example.com:8888": {Lookaside: "http://registry.test.example.com:8889/lookaside", LookasideStaging: "https://registry.test.example.com:8889/lookaside/specialAPIserverWhichDoesNotExist"},
+ "localhost": {Lookaside: "file:///home/mitr/mydevelopment1"},
+ "localhost:8080": {Lookaside: "file:///home/mitr/mydevelopment2"},
+ "localhost/invalid/url/test": {Lookaside: ":emptyscheme"},
+ "localhost/file/path/test": {Lookaside: "/no/scheme/just/a/path"},
+ "localhost/relative/path/test": {Lookaside: "no/scheme/relative/path"},
+ "docker.io/contoso": {Lookaside: "https://lookaside.contoso.com/fordocker"},
+ "docker.io/centos": {Lookaside: "https://lookaside.centos.org/"},
+ "docker.io/centos/mybetaproduct": {
+ Lookaside: "http://localhost:9999/mybetaWIP/lookaside",
+ LookasideStaging: "file:///srv/mybetaWIP/lookaside",
+ },
+ "docker.io/centos/mybetaproduct:latest": {Lookaside: "https://lookaside.centos.org/"},
+ },
+ }, config)
+}
+
+func TestRegistryConfigurationSignatureTopLevel(t *testing.T) {
+ config := registryConfiguration{
+ DefaultDocker: &registryNamespace{Lookaside: "=default", LookasideStaging: "=default+w"},
+ Docker: map[string]registryNamespace{},
+ }
+ for _, ns := range []string{
+ "localhost",
+ "localhost:5000",
+ "example.com",
+ "example.com/ns1",
+ "example.com/ns1/ns2",
+ "example.com/ns1/ns2/repo",
+ "example.com/ns1/ns2/repo:notlatest",
+ } {
+ config.Docker[ns] = registryNamespace{Lookaside: ns, LookasideStaging: ns + "+w"}
+ }
+
+ for _, c := range []struct{ input, expected string }{
+ {"example.com/ns1/ns2/repo:notlatest", "example.com/ns1/ns2/repo:notlatest"},
+ {"example.com/ns1/ns2/repo:unmatched", "example.com/ns1/ns2/repo"},
+ {"example.com/ns1/ns2/notrepo:notlatest", "example.com/ns1/ns2"},
+ {"example.com/ns1/notns2/repo:notlatest", "example.com/ns1"},
+ {"example.com/notns1/ns2/repo:notlatest", "example.com"},
+ {"unknown.example.com/busybox", "=default"},
+ {"localhost:5000/busybox", "localhost:5000"},
+ {"localhost/busybox", "localhost"},
+ {"localhost:9999/busybox", "=default"},
+ } {
+ dr := dockerRefFromString(t, "//"+c.input)
+
+ res := config.signatureTopLevel(dr, false)
+ assert.Equal(t, c.expected, res, c.input)
+ res = config.signatureTopLevel(dr, true) // test that forWriting is correctly propagated
+ assert.Equal(t, c.expected+"+w", res, c.input)
+ }
+
+ config = registryConfiguration{
+ Docker: map[string]registryNamespace{
+ "unmatched": {Lookaside: "a", LookasideStaging: "b"},
+ },
+ }
+ dr := dockerRefFromString(t, "//thisisnotmatched")
+ res := config.signatureTopLevel(dr, false)
+ assert.Equal(t, "", res)
+ res = config.signatureTopLevel(dr, true)
+ assert.Equal(t, "", res)
+}
+
+func TestRegistryNamespaceSignatureTopLevel(t *testing.T) {
+ for _, c := range []struct {
+ ns registryNamespace
+ forWriting bool
+ expected string
+ }{
+ {registryNamespace{LookasideStaging: "a", Lookaside: "b"}, true, "a"},
+ {registryNamespace{LookasideStaging: "a", Lookaside: "b"}, false, "b"},
+ {registryNamespace{Lookaside: "b"}, true, "b"},
+ {registryNamespace{Lookaside: "b"}, false, "b"},
+ {registryNamespace{LookasideStaging: "a"}, true, "a"},
+ {registryNamespace{LookasideStaging: "a"}, false, ""},
+ {registryNamespace{}, true, ""},
+ {registryNamespace{}, false, ""},
+
+ {registryNamespace{LookasideStaging: "a", Lookaside: "b", SigStoreStaging: "c", SigStore: "d"}, true, "a"},
+ {registryNamespace{Lookaside: "b", SigStoreStaging: "c", SigStore: "d"}, true, "c"},
+ {registryNamespace{Lookaside: "b", SigStore: "d"}, true, "b"},
+ {registryNamespace{SigStore: "d"}, true, "d"},
+
+ {registryNamespace{LookasideStaging: "a", Lookaside: "b", SigStoreStaging: "c", SigStore: "d"}, false, "b"},
+ {registryNamespace{Lookaside: "b", SigStoreStaging: "c", SigStore: "d"}, false, "b"},
+ {registryNamespace{Lookaside: "b", SigStore: "d"}, false, "b"},
+ {registryNamespace{SigStore: "d"}, false, "d"},
+ } {
+ res := c.ns.signatureTopLevel(c.forWriting)
+ assert.Equal(t, c.expected, res, fmt.Sprintf("%#v %v", c.ns, c.forWriting))
+ }
+}
+
+func TestLookasideStorageURL(t *testing.T) {
+ const mdInput = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ const mdMapped = "sha256=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+
+ for _, c := range []struct {
+ base string
+ index int
+ expected string
+ }{
+ {"file:///tmp", 0, "file:///tmp@" + mdMapped + "/signature-1"},
+ {"file:///tmp", 1, "file:///tmp@" + mdMapped + "/signature-2"},
+ {"https://localhost:5555/root", 0, "https://localhost:5555/root@" + mdMapped + "/signature-1"},
+ {"https://localhost:5555/root", 1, "https://localhost:5555/root@" + mdMapped + "/signature-2"},
+ {"http://localhost:5555/root", 0, "http://localhost:5555/root@" + mdMapped + "/signature-1"},
+ {"http://localhost:5555/root", 1, "http://localhost:5555/root@" + mdMapped + "/signature-2"},
+ } {
+ baseURL, err := url.Parse(c.base)
+ require.NoError(t, err)
+ expectedURL, err := url.Parse(c.expected)
+ require.NoError(t, err)
+ res := lookasideStorageURL(baseURL, mdInput, c.index)
+ assert.Equal(t, expectedURL, res, c.expected)
+ }
+}
+
+func TestBuiltinDefaultLookasideStorageDir(t *testing.T) {
+ base := builtinDefaultLookasideStorageDir(0)
+ assert.NotNil(t, base)
+ assert.Equal(t, "file://"+defaultDockerDir, base.String())
+
+ base = builtinDefaultLookasideStorageDir(1000)
+ assert.NotNil(t, base)
+ assert.Equal(t, "file://"+filepath.Join(os.Getenv("HOME"), defaultUserDockerDir), base.String())
+}
diff --git a/docker/tarfile/dest.go b/docker/tarfile/dest.go
new file mode 100644
index 0000000..4547654
--- /dev/null
+++ b/docker/tarfile/dest.go
@@ -0,0 +1,119 @@
+package tarfile
+
+import (
+ "context"
+ "io"
+
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+type Destination struct {
+ internal *internal.Destination
+ archive *internal.Writer
+}
+
+// NewDestination returns a tarfile.Destination for the specified io.Writer.
+// Deprecated: please use NewDestinationWithContext instead
+func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
+ return NewDestinationWithContext(nil, dest, ref)
+}
+
+// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
+func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
+ archive := internal.NewWriter(dest)
+ return &Destination{
+ internal: internal.NewDestination(sys, archive, "[An external docker/tarfile caller]", ref),
+ archive: archive,
+ }
+}
+
+// AddRepoTags adds the specified tags to the destination's repoTags.
+func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
+ d.internal.AddRepoTags(tags)
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (d *Destination) SupportedManifestMIMETypes() []string {
+ return d.internal.SupportedManifestMIMETypes()
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *Destination) SupportsSignatures(ctx context.Context) error {
+ return d.internal.SupportsSignatures(ctx)
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *Destination) AcceptsForeignLayerURLs() bool {
+ return d.internal.AcceptsForeignLayerURLs()
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *Destination) MustMatchRuntimeOS() bool {
+ return d.internal.MustMatchRuntimeOS()
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *Destination) IgnoresEmbeddedDockerReference() bool {
+ return d.internal.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *Destination) HasThreadSafePutBlob() bool {
+ return d.internal.HasThreadSafePutBlob()
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.internal.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.internal.TryReusingBlob(ctx, info, cache, canSubstitute)
+}
+
+// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ return d.internal.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
+func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ return d.internal.PutSignatures(ctx, signatures, instanceDigest)
+}
+
+// Commit finishes writing data to the underlying io.Writer.
+// It is the caller's responsibility to close it, if necessary.
+func (d *Destination) Commit(ctx context.Context) error {
+ return d.archive.Close()
+}
diff --git a/docker/tarfile/doc.go b/docker/tarfile/doc.go
new file mode 100644
index 0000000..4ea5369
--- /dev/null
+++ b/docker/tarfile/doc.go
@@ -0,0 +1,3 @@
+// Package tarfile is an internal implementation detail of some transports.
+// Do not use outside of the github.com/containers/image repo!
+package tarfile
diff --git a/docker/tarfile/src.go b/docker/tarfile/src.go
new file mode 100644
index 0000000..067716c
--- /dev/null
+++ b/docker/tarfile/src.go
@@ -0,0 +1,104 @@
+package tarfile
+
+import (
+ "context"
+ "io"
+
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// Source is a partial implementation of types.ImageSource for reading from tarPath.
+// Most users should use this via implementations of ImageReference from docker/archive or docker/daemon.
+type Source struct {
+ internal *internal.Source
+}
+
+// NewSourceFromFile returns a tarfile.Source for the specified path.
+// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
+// for big files through SystemContext.BigFilesTemporaryDir
+func NewSourceFromFile(path string) (*Source, error) {
+ return NewSourceFromFileWithContext(nil, path)
+}
+
+// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
+func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
+ archive, err := internal.NewReaderFromFile(sys, path)
+ if err != nil {
+ return nil, err
+ }
+ src := internal.NewSource(archive, true, "[An external docker/tarfile caller]", nil, -1)
+ return &Source{internal: src}, nil
+}
+
+// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
+// which can be either compressed or uncompressed. The caller can close the
+// inputStream immediately after NewSourceFromFile returns.
+// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
+// temp directory for big files through SystemContext.BigFilesTemporaryDir
+func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
+ return NewSourceFromStreamWithSystemContext(nil, inputStream)
+}
+
+// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
+// which can be either compressed or uncompressed. The caller can close the
+// inputStream immediately after NewSourceFromFile returns.
+func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
+ archive, err := internal.NewReaderFromStream(sys, inputStream)
+ if err != nil {
+ return nil, err
+ }
+ src := internal.NewSource(archive, true, "[An external docker/tarfile caller]", nil, -1)
+ return &Source{internal: src}, nil
+}
+
+// Close removes resources associated with an initialized Source, if any.
+func (s *Source) Close() error {
+ return s.internal.Close()
+}
+
+// LoadTarManifest loads and decodes the manifest.json
+func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
+ return s.internal.TarManifest(), nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary instances.
+func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ return s.internal.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *Source) HasThreadSafeGetBlob() bool {
+ return s.internal.HasThreadSafeGetBlob()
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.internal.GetBlob(ctx, info, cache)
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as there can be no secondary manifests.
+func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return s.internal.GetSignatures(ctx, instanceDigest)
+}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be no secondary manifests.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *Source) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return s.internal.LayerInfosForCopy(ctx, instanceDigest)
+}
diff --git a/docker/tarfile/types.go b/docker/tarfile/types.go
new file mode 100644
index 0000000..0f14389
--- /dev/null
+++ b/docker/tarfile/types.go
@@ -0,0 +1,8 @@
+package tarfile
+
+import (
+ internal "github.com/containers/image/v5/docker/internal/tarfile"
+)
+
+// ManifestItem is an element of the array stored in the top-level manifest.json file.
+type ManifestItem = internal.ManifestItem // All public members from the internal package remain accessible.
diff --git a/docker/wwwauthenticate.go b/docker/wwwauthenticate.go
new file mode 100644
index 0000000..6bcb835
--- /dev/null
+++ b/docker/wwwauthenticate.go
@@ -0,0 +1,172 @@
+package docker
+
+// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// challenge carries information from a WWW-Authenticate response header.
+// See RFC 7235.
+type challenge struct {
+ // Scheme is the auth-scheme according to RFC 7235
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 7235
+ Parameters map[string]string
+}
+
+// Octet types from RFC 7230.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+func parseAuthHeader(header http.Header) []challenge {
+ challenges := []challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
+func parseAuthScope(scopeStr string) (*authScope, error) {
+ if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
+ return &authScope{
+ resourceType: parts[0],
+ remoteName: parts[1],
+ actions: parts[2],
+ }, nil
+ }
+ return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
+}
+
+// NOTE: This is not a fully compliant parser per RFC 7235:
+// Most notably it does not support more than one challenge within a single header
+// Some of the whitespace parsing also seems noncompliant.
+// But it is clearly better than what we used to have…
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i++; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/docker/wwwauthenticate_test.go b/docker/wwwauthenticate_test.go
new file mode 100644
index 0000000..d11f6fb
--- /dev/null
+++ b/docker/wwwauthenticate_test.go
@@ -0,0 +1,45 @@
+package docker
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// This is just a smoke test for the common expected header formats,
+// by no means comprehensive.
+func TestParseValueAndParams(t *testing.T) {
+ for _, c := range []struct {
+ input string
+ scope string
+ params map[string]string
+ }{
+ {
+ `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull"`,
+ "bearer",
+ map[string]string{
+ "realm": "https://auth.docker.io/token",
+ "service": "registry.docker.io",
+ "scope": "repository:library/busybox:pull",
+ },
+ },
+ {
+ `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull,push"`,
+ "bearer",
+ map[string]string{
+ "realm": "https://auth.docker.io/token",
+ "service": "registry.docker.io",
+ "scope": "repository:library/busybox:pull,push",
+ },
+ },
+ {
+ `Bearer realm="http://127.0.0.1:5000/openshift/token"`,
+ "bearer",
+ map[string]string{"realm": "http://127.0.0.1:5000/openshift/token"},
+ },
+ } {
+ scope, params := parseValueAndParams(c.input)
+ assert.Equal(t, c.scope, scope, c.input)
+ assert.Equal(t, c.params, params, c.input)
+ }
+}
diff --git a/docs/atomic-signature-embedded-json.json b/docs/atomic-signature-embedded-json.json
new file mode 100644
index 0000000..5a7c899
--- /dev/null
+++ b/docs/atomic-signature-embedded-json.json
@@ -0,0 +1,66 @@
+{
+ "title": "JSON embedded in an atomic container signature",
+ "description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containers/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n",
+ "type": "object",
+ "required": [
+ "critical",
+ "optional"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "critical": {
+ "type": "object",
+ "required": [
+ "type",
+ "image",
+ "identity"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "atomic container signature"
+ ]
+ },
+ "image": {
+ "type": "object",
+ "required": [
+ "docker-manifest-digest"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "docker-manifest-digest": {
+ "type": "string"
+ }
+ }
+ },
+ "identity": {
+ "type": "object",
+ "required": [
+ "docker-reference"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "docker-reference": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "optional": {
+ "type": "object",
+ "description": "All members are optional, but if they are included, they must be valid.",
+ "additionalProperties": true,
+ "properties": {
+ "creator": {
+ "type": "string"
+ },
+ "timestamp": {
+ "type": "integer"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/docs/containers-auth.json.5.md b/docs/containers-auth.json.5.md
new file mode 100644
index 0000000..c5e22b0
--- /dev/null
+++ b/docs/containers-auth.json.5.md
@@ -0,0 +1,105 @@
+% containers-auth.json 5
+
+# NAME
+containers-auth.json - syntax for the registry authentication file
+
+# DESCRIPTION
+
+A file in JSON format controlling authentication against container image registries.
+The primary (read/write) file is stored at `${XDG_RUNTIME_DIR}/containers/auth.json` on Linux;
+on Windows and macOS, at `$HOME/.config/containers/auth.json`.
+
+When searching for the credential for a registry, the following files will be read in sequence until the valid credential is found:
+first reading the primary (read/write) file, or the explicit override using an option of the calling application.
+If credentials are not present there,
+the search continues in `${XDG_CONFIG_HOME}/containers/auth.json` (usually `~/.config/containers/auth.json`), `$HOME/.docker/config.json`, `$HOME/.dockercfg`.
+
+Except for the primary (read/write) file, other files are read-only unless the user, using an option of the calling application, explicitly points at it as an override.
+
+
+## FORMAT
+
+The auth.json file stores, or references, credentials that allow the user to authenticate
+to container image registries.
+It is primarily managed by a `login` command from a container tool such as `podman login`,
+`buildah login`, or `skopeo login`.
+
+Each entry contains a single hostname (e.g., `docker.io`) or a namespace (e.g., `quay.io/user/image`) as a key,
+and credentials in the form of a base64-encoded string as value of `auth`. The
+base64-encoded string contains a concatenation of the username, a colon, and the
+password.
+
+When checking for available credentials, the relevant repository is matched
+against available keys in its hierarchical order, going from most-specific to least-specific.
+For example, an image pull for `my-registry.local/namespace/user/image:latest` will
+result in a lookup in `auth.json` in the following order:
+
+- `my-registry.local/namespace/user/image`
+- `my-registry.local/namespace/user`
+- `my-registry.local/namespace`
+- `my-registry.local`
+
+This way it is possible to setup multiple credentials for a single registry
+which can be distinguished by their path.
+
+The following example shows the values found in auth.json after the user logged in to
+their accounts on quay.io and docker.io:
+
+```
+{
+ "auths": {
+ "docker.io": {
+ "auth": "erfi7sYi89234xJUqaqxgmzcnQ2rRFWM5aJX0EC="
+ },
+ "quay.io": {
+ "auth": "juQAqGmz5eR1ipzx8Evn6KGdw8fEa1w5MWczmgY="
+ }
+ }
+}
+```
+
+This example demonstrates how to use multiple paths for a single registry, while
+preserving a fallback for `my-registry.local`:
+
+```
+{
+ "auths": {
+ "my-registry.local/foo/bar/image": {
+ "auth": "…"
+ },
+ "my-registry.local/foo": {
+ "auth": "…"
+ },
+ "my-registry.local": {
+ "auth": "…"
+ },
+ }
+}
+```
+
+An entry can be removed by using a `logout` command from a container
+tool such as `podman logout` or `buildah logout`.
+
+In addition, credential helpers can be configured for specific registries, and the credentials-helper
+software can be used to manage the credentials more securely than storing only base64-encoded credentials in `auth.json`.
+
+When the credential helper is in use on a Linux platform, the auth.json file would contain keys that specify the registry domain, and values that specify the suffix of the program to use (i.e. everything after docker-credential-). For example:
+
+```
+{
+ "auths": {
+ "localhost:5001": {}
+ },
+ "credHelpers": {
+ "registry.example.com": "secretservice"
+ }
+}
+```
+
+For more information on credential helpers, please reference the [GitHub docker-credential-helpers project](https://github.com/docker/docker-credential-helpers/releases).
+
+# SEE ALSO
+ buildah-login(1), buildah-logout(1), podman-login(1), podman-logout(1), skopeo-login(1), skopeo-logout(1)
+
+# HISTORY
+Feb 2020, Originally compiled by Tom Sweeney <tsweeney@redhat.com>
diff --git a/docs/containers-certs.d.5.md b/docs/containers-certs.d.5.md
new file mode 100644
index 0000000..828ff76
--- /dev/null
+++ b/docs/containers-certs.d.5.md
@@ -0,0 +1,28 @@
+% containers-certs.d 5 Directory for storing custom container-registry TLS configurations
+
+# NAME
+containers-certs.d - Directory for storing custom container-registry TLS configurations
+
+# DESCRIPTION
+A custom TLS configuration for a container registry can be configured by creating a directory under `$HOME/.config/containers/certs.d` or `/etc/containers/certs.d`.
+The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`).
+
+## Directory Structure
+A certs directory can contain one or more files with the following extensions:
+
+* `*.crt` files with this extensions will be interpreted as CA certificates
+* `*.cert` files with this extensions will be interpreted as client certificates
+* `*.key` files with this extensions will be interpreted as client keys
+
+Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`).
+An exemplary setup for a registry running at `my-registry.com:5000` may look as follows:
+```
+/etc/containers/certs.d/ <- Certificate directory
+└── my-registry.com:5000 <- Hostname:port
+ ├── client.cert <- Client certificate
+ ├── client.key <- Client key
+ └── ca.crt <- Certificate authority that signed the registry certificate
+```
+
+# HISTORY
+Feb 2019, Originally compiled by Valentin Rothberg <rothberg@redhat.com>
diff --git a/docs/containers-policy.json.5.md b/docs/containers-policy.json.5.md
new file mode 100644
index 0000000..909d04a
--- /dev/null
+++ b/docs/containers-policy.json.5.md
@@ -0,0 +1,493 @@
+% CONTAINERS-POLICY.JSON 5 policy.json Man Page
+% Miloslav Trmač
+% September 2016
+
+# NAME
+containers-policy.json - syntax for the signature verification policy file
+
+## DESCRIPTION
+
+Signature verification policy files are used to specify policy, e.g. trusted keys,
+applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
+
+By default, the policy is read from `$HOME/.config/containers/policy.json`, if it exists, otherwise from `/etc/containers/policy.json`; applications performing verification may allow using a different policy instead.
+
+## FORMAT
+
+The signature verification policy file, usually called `policy.json`,
+uses a JSON format. Unlike some other JSON files, its parsing is fairly strict:
+unrecognized, duplicated or otherwise invalid fields cause the entire file,
+and usually the entire operation, to be rejected.
+
+The purpose of the policy file is to define a set of *policy requirements* for a container image,
+usually depending on its location (where it is being pulled from) or otherwise defined identity.
+
+Policy requirements can be defined for:
+
+- An individual *scope* in a *transport*.
+ The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`),
+ and *scope* values are defined by each transport; see below for more details.
+
+ Usually, a scope can be defined to match a single image, and various prefixes of
+ such a most specific scope define namespaces of matching images.
+
+- A default policy for a single transport, expressed using an empty string as a scope
+
+- A global default policy.
+
+If multiple policy requirements match a given image, only the requirements from the most specific match apply,
+the more general policy requirements definitions are ignored.
+
+This is expressed in JSON using the top-level syntax
+```js
+{
+ "default": [/* policy requirements: global default */]
+ "transports": {
+ transport_name: {
+ "": [/* policy requirements: default for transport $transport_name */],
+ scope_1: [/* policy requirements: default for $scope_1 in $transport_name */],
+ scope_2: [/*…*/]
+ /*…*/
+ },
+ transport_name_2: {/*…*/}
+ /*…*/
+ }
+}
+```
+
+The global `default` set of policy requirements is mandatory; all of the other fields
+(`transports` itself, any specific transport, the transport-specific default, etc.) are optional.
+
+<!-- NOTE: Keep this in sync with transports/transports.go! -->
+## Supported transports and their scopes
+
+See containers-transports(5) for general documentation about the transports and their reference syntax.
+
+### `atomic:`
+
+The deprecated `atomic:` transport refers to images in an Atomic Registry.
+
+Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]],
+i.e. either specifying a complete name of a tagged image, or prefix denoting
+a host/namespace/image stream, or a wildcarded expression starting with `*.` for matching all
+subdomains. For wildcarded subdomain matching, `*.example.com` is a valid case, but `example*.*.com` is not.
+
+*Note:* The _hostname_ and _port_ refer to the container registry host and port (the one used
+e.g. for `docker pull`), _not_ to the OpenShift API host and port.
+
+### `containers-storage:`
+
+Supported scopes have the form `[`_storage-specifier_`]`_image-scope_.
+
+`[`_storage-specifier_`]` is usually `[`_graph-driver-name_`@`_graph-root_`]`, e.g. `[overlay@/var/lib/containers/storage]`.
+
+_image-scope_ matching the individual image is
+- a named Docker reference *in the fully expanded form*, either using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`)
+- and/or (depending on which one the user’s input provides) `@`_image-id_
+
+More general scopes are prefixes of individual-image scopes, and specify a less-precisely-specified image, or a repository
+(by omitting first the image ID, if any; then the digest, if any; and finally a tag, if any),
+a repository namespace, or a registry host (by only specifying the host name and possibly a port number).
+
+Finally, two full-store specifiers matching all images in the store are valid scopes:
+- `[`_graph-driver-name_`@`_graph-root_`]` and
+- `[`_graph-root_`]`
+
+Note that some tools like Podman and Buildah hard-code overrides of the signature verification policy for “push” operations,
+allowing these operations regardless of configuration in `policy.json`.
+
+### `dir:`
+
+The `dir:` transport refers to images stored in local directories.
+
+Supported scopes are paths of directories (either containing a single image or
+subdirectories possibly containing images).
+
+*Note:*
+- The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
+- The top-level scope `"/"` is forbidden; use the transport default scope `""`,
+ for consistency with other transports.
+
+### `docker:`
+
+The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2".
+
+Scopes matching individual images are named Docker references *in the fully expanded form*, either
+using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
+
+More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
+a repository namespace, or a registry host (by only specifying the host name and possibly a port number)
+or a wildcarded expression starting with `*.`, for matching all subdomains (not including a port number). For wildcarded subdomain
+matching, `*.example.com` is a valid case, but `example*.*.com` is not.
+
+### `docker-archive:`
+
+Only the default `""` scope is supported.
+
+### `docker-daemon:`
+
+For references using the _algo:digest_ format (referring to an image ID), only the default `""` scope is used.
+
+For images using a named reference, scopes matching individual images are *in the fully expanded form*, either
+using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
+
+More general named scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
+a repository namespace, or a registry host (by only specifying the host name and possibly a port number)
+or a wildcarded expression starting with `*.`, for matching all subdomains (not including a port number). For wildcarded subdomain
+matching, `*.example.com` is a valid case, but `example*.*.com` is not.
+
+### `oci:`
+
+The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification".
+
+Supported scopes are paths to directories
+(either containing an OCI layout, or subdirectories possibly containing OCI layout directories).
+The _reference_ annotation value, if any, is not used.
+
+*Note:*
+- The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
+- The top-level scope `"/"` is forbidden; use the transport default scope `""`,
+ for consistency with other transports.
+
+### `oci-archive:`
+
+Supported scopes are paths to OCI archives, and their parent directories
+(either containing a single archive, or subdirectories possibly containing archives).
+The _reference_ annotation value, if any, is not used.
+
+*Note:*
+- The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
+- The top-level scope `"/"` is forbidden; use the transport default scope `""`,
+ for consistency with other transports.
+
+### `ostree`:
+
+Supported scopes have the form _repo-path_`:`_image-scope_; _repo_path_ is the path to the OSTree repository.
+
+_image-scope_ is the _docker_reference_ part of the reference, with with a `:latest` tag implied if no tag is present,
+and parent namespaces of the _docker_reference_ value (by omitting the tag, or a prefix specifying a higher-level namespace).
+
+*Note:*
+- The _repo_path_ must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
+
+### `sif:`
+
+Supported scopes are paths to Singularity images, and their parent directories
+(either containing images, or subdirectories possibly containing images).
+
+*Note:*
+- The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
+- The top-level scope `"/"` is forbidden; use the transport default scope `""`,
+ for consistency with other transports.
+
+### `tarball:`
+
+The `tarball:` transport is an implementation detail of some import workflows. Only the default `""` scope is supported.
+
+## Policy Requirements
+
+Using the mechanisms above, a set of policy requirements is looked up. The policy requirements
+are represented as a JSON array of individual requirement objects. For an image to be accepted,
+*all* of the requirements must be satisfied simultaneously.
+
+The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author);
+in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object.
+
+The following requirement objects are supported:
+
+### `insecureAcceptAnything`
+
+A simple requirement with the following syntax
+
+```json
+{"type":"insecureAcceptAnything"}
+```
+
+This requirement accepts any image (but note that other requirements in the array still apply).
+
+When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though.
+
+This is useful primarily for policy scopes where no signature verification is required;
+because the array of policy requirements must not be empty, this requirement is used
+to represent the lack of requirements explicitly.
+
+### `reject`
+
+A simple requirement with the following syntax:
+
+```json
+{"type":"reject"}
+```
+
+This requirement rejects every image, and every signature.
+
+### `signedBy`
+
+This requirement requires an image to be signed using “simple signing” with an expected identity, or accepts a signature if it is using an expected identity and key.
+
+```js
+{
+ "type": "signedBy",
+ "keyType": "GPGKeys", /* The only currently supported value */
+ "keyPath": "/path/to/local/keyring/file",
+ "keyPaths": ["/path/to/local/keyring/file1","/path/to/local/keyring/file2"…],
+ "keyData": "base64-encoded-keyring-data",
+ "signedIdentity": identity_requirement
+}
+```
+<!-- Later: other keyType values -->
+
+Exactly one of `keyPath`, `keyPaths` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted.
+
+The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image.
+One of the following alternatives are supported:
+
+- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail.
+
+ ```json
+ {"type":"matchExact"}
+ ```
+- If the image identity carries a tag, the identity in the signature must exactly match;
+ if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag).
+
+ (Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.)
+
+ ```json
+ {"type":"matchRepoDigestOrExact"}
+ ```
+- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifying an exact image version.
+
+ ```json
+ {"type":"matchRepository"}
+ ```
+- The identity in the signature must exactly match a specified identity.
+ This is useful e.g. when locally mirroring images signed using their public identity.
+
+ ```js
+ {
+ "type": "exactReference",
+ "dockerReference": docker_reference_value
+ }
+ ```
+- The identity in the signature must be in the same repository as a specified identity.
+ This combines the properties of `matchRepository` and `exactReference`.
+
+ ```js
+ {
+ "type": "exactRepository",
+ "dockerRepository": docker_repository_value
+ }
+ ```
+- Prefix remapping:
+
+ If the image identity matches the specified prefix, that prefix is replaced by the specified “signed prefix”
+ (otherwise it is used as unchanged and no remapping takes place);
+ matching then follows the `matchRepoDigestOrExact` semantics documented above
+ (i.e. if the image identity carries a tag, the identity in the signature must exactly match,
+ if it uses a digest reference, the repository must match).
+
+ The `prefix` and `signedPrefix` values can be either host[:port] values
+ (matching exactly the same host[:port], string),
+ repository namespaces, or repositories (i.e. they must not contain tags/digests),
+ and match as prefixes *of the fully expanded form*.
+ For example, `docker.io/library/busybox` (*not* `busybox`) to specify that single repository,
+ or `docker.io/library` (not an empty string) to specify the parent namespace of `docker.io/library/busybox`==`busybox`).
+
+ The `prefix` value is usually the same as the scope containing the parent `signedBy` requirement.
+
+ ```js
+ {
+ "type": "remapIdentity",
+ "prefix": prefix,
+ "signedPrefix": prefix,
+ }
+ ```
+
+If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`.
+
+*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is
+provided by the transport. In particular, the `dir:` and `oci:` transports can be only
+used with `exactReference` or `exactRepository`.
+
+<!-- ### `signedBaseLayer` -->
+
+
+### `sigstoreSigned`
+
+This requirement requires an image to be signed using a sigstore signature with an expected identity and key.
+
+```js
+{
+ "type": "sigstoreSigned",
+ "keyPath": "/path/to/local/public/key/file",
+ "keyData": "base64-encoded-public-key-data",
+ "fulcio": {
+ "caPath": "/path/to/local/CA/file",
+ "caData": "base64-encoded-CA-data",
+ "oidcIssuer": "https://expected.OIDC.issuer/",
+ "subjectEmail", "expected-signing-user@example.com",
+ },
+ "rekorPublicKeyPath": "/path/to/local/public/key/file",
+ "rekorPublicKeyData": "base64-encoded-public-key-data",
+ "signedIdentity": identity_requirement
+}
+```
+Exactly one of `keyPath`, `keyData` and `fulcio` must be present.
+
+If `keyPath` or `keyData` is present, it contains a sigstore public key.
+Only signatures made by this key are accepted.
+
+If `fulcio` is present, the signature must be based on a Fulcio-issued certificate.
+One of `caPath` and `caData` must be specified, containing the public key of the Fulcio instance.
+Both `oidcIssuer` and `subjectEmail` are mandatory,
+exactly specifying the expected identity provider,
+and the identity of the user obtaining the Fulcio certificate.
+
+At most one of `rekorPublicKeyPath` and `rekorPublicKeyData` can be present;
+it is mandatory if `fulcio` is specified.
+If a Rekor public key is specified,
+the signature must have been uploaded to a Rekor server
+and the signature must contain an (offline-verifiable) “signed entry timestamp”
+proving the existence of the Rekor log record,
+signed by the provided public key.
+
+The `signedIdentity` field has the same semantics as in the `signedBy` requirement described above.
+Note that `cosign`-created signatures only contain a repository, so only `matchRepository` and `exactRepository` can be used to accept them (and that does not protect against substitution of a signed image with an unexpected tag).
+
+To use this with images hosted on image registries, the `use-sigstore-attachments` option needs to be enabled for the relevant registry or repository in the client's containers-registries.d(5).
+
+## Examples
+
+It is *strongly* recommended to set the `default` policy to `reject`, and then
+selectively allow individual transports and scopes as desired.
+
+### A reasonably locked-down system
+
+(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.)
+
+```js
+{
+ "default": [{"type": "reject"}], /* Reject anything not explicitly allowed */
+ "transports": {
+ "docker": {
+ /* Allow installing images from a specific repository namespace, without cryptographic verification.
+ This namespace includes images like openshift/hello-openshift and openshift/origin. */
+ "docker.io/openshift": [{"type": "insecureAcceptAnything"}],
+ /* Similarly, allow installing the “official” busybox images. Note how the fully expanded
+ form, with the explicit /library/, must be used. */
+ "docker.io/library/busybox": [{"type": "insecureAcceptAnything"}],
+ /* Allow installing images from all subdomains */
+ "*.temporary-project.example.com": [{"type": "insecureAcceptAnything"}],
+ /* A sigstore-signed repository */
+ "hostname:5000/myns/sigstore-signed-with-full-references": [
+ {
+ "type": "sigstoreSigned",
+ "keyPath": "/path/to/sigstore-pubkey.pub"
+ }
+ ],
+ /* A sigstore-signed repository using the community Fulcio+Rekor servers.
+
+ The community servers’ public keys can be obtained from
+ https://github.com/sigstore/sigstore/tree/main/pkg/tuf/repository/targets . */
+ "hostname:5000/myns/sigstore-signed-fulcio-rekor": [
+ {
+ "type": "sigstoreSigned",
+ "fulcio": {
+ "caPath": "/path/to/fulcio_v1.crt.pem",
+ "oidcIssuer": "https://github.com/login/oauth",
+ "subjectEmail": "test-user@example.com"
+ },
+ "rekorPublicKeyPath": "/path/to/rekor.pub",
+ }
+ ],
+ /* A sigstore-signed repository, accepts signatures by /usr/bin/cosign */
+ "hostname:5000/myns/sigstore-signed-allows-malicious-tag-substitution": [
+ {
+ "type": "sigstoreSigned",
+ "keyPath": "/path/to/sigstore-pubkey.pub",
+ "signedIdentity": {"type": "matchRepository"}
+ }
+ ],
+ /* A sigstore-signed repository using the community Fulcio+Rekor servers,
+ accepts signatures by /usr/bin/cosign.
+
+ The community servers’ public keys can be obtained from
+ https://github.com/sigstore/sigstore/tree/main/pkg/tuf/repository/targets . */
+ "hostname:5000/myns/sigstore-signed-fulcio-rekor- allows-malicious-tag-substitution": [
+ {
+ "type": "sigstoreSigned",
+ "fulcio": {
+ "caPath": "/path/to/fulcio_v1.crt.pem",
+ "oidcIssuer": "https://github.com/login/oauth",
+ "subjectEmail": "test-user@example.com"
+ },
+ "rekorPublicKeyPath": "/path/to/rekor.pub",
+ "signedIdentity": { "type": "matchRepository" }
+ }
+ ]
+ /* Other docker: images use the global default policy and are rejected */
+ },
+ "dir": {
+ "": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */
+ },
+ "atomic": {
+ /* The common case: using a known key for a repository or set of repositories */
+ "hostname:5000/myns/official": [
+ {
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/path/to/official-pubkey.gpg"
+ }
+ ],
+ /* A more complex example, for a repository which contains a mirror of a third-party product,
+ which must be signed-off by local IT */
+ "hostname:5000/vendor/product": [
+ { /* Require the image to be signed by the original vendor, using the vendor's repository location. */
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/path/to/vendor-pubkey.gpg",
+ "signedIdentity": {
+ "type": "exactRepository",
+ "dockerRepository": "vendor-hostname/product/repository"
+ }
+ },
+ { /* Require the image to _also_ be signed by a local reviewer. */
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/path/to/reviewer-pubkey.gpg"
+ }
+ ],
+ /* A way to mirror many repositories from a single vendor */
+ "private-mirror:5000/vendor-mirror": [
+ { /* Require the image to be signed by the original vendor, using the vendor's repository location.
+ For example, private-mirror:5000/vendor-mirror/productA/image1:latest needs to be signed as
+ vendor.example/productA/image1:latest . */
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/path/to/vendor-pubkey.gpg",
+ "signedIdentity": {
+ "type": "remapIdentity",
+ "prefix": "private-mirror:5000/vendor-mirror",
+ "signedPrefix": "vendor.example.com"
+ }
+ }
+ ]
+ }
+ }
+}
+```
+
+### Completely disable security, allow all images, do not trust any signatures
+
+```json
+{
+ "default": [{"type": "insecureAcceptAnything"}]
+}
+```
+## SEE ALSO
+ atomic(1)
+
+## HISTORY
+August 2018, Rename to containers-policy.json(5) by Valentin Rothberg <vrothberg@suse.com>
+
+September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
diff --git a/docs/containers-registries.conf.5.md b/docs/containers-registries.conf.5.md
new file mode 100644
index 0000000..0263b79
--- /dev/null
+++ b/docs/containers-registries.conf.5.md
@@ -0,0 +1,323 @@
+% CONTAINERS-REGISTRIES.CONF 5 System-wide registry configuration file
+% Brent Baude
+% Aug 2017
+
+# NAME
+containers-registries.conf - Syntax of System Registry Configuration File
+
+# DESCRIPTION
+The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
+file for container image registries. The file format is TOML.
+
+Container engines will use the `$HOME/.config/containers/registries.conf` if it exists, otherwise they will use `/etc/containers/registries.conf`
+
+### GLOBAL SETTINGS
+
+`unqualified-search-registries`
+: An array of _host_[`:`_port_] registries to try when pulling an unqualified image, in order.
+
+`credential-helpers`
+: An array of default credential helpers used as external credential stores. Note that "containers-auth.json" is a reserved value to use auth files as specified in containers-auth.json(5). The credential helpers are set to `["containers-auth.json"]` if none are specified.
+
+### NAMESPACED `[[registry]]` SETTINGS
+
+The bulk of the configuration is represented as an array of `[[registry]]`
+TOML tables; the settings may therefore differ among different registries
+as well as among different namespaces/repositories within a registry.
+
+#### Choosing a `[[registry]]` TOML table
+
+Given an image name, a single `[[registry]]` TOML table is chosen based on its `prefix` field.
+
+`prefix`: A prefix of the user-specified image name, i.e. using one of the following formats:
+ - _host_[`:`_port_]
+ - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]
+ - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_
+ - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_(`:`_tag|`@`_digest_)
+ - [`*.`]_host_
+
+The user-specified image name must start with the specified `prefix` (and continue
+with the appropriate separator) for a particular `[[registry]]` TOML table to be
+considered; (only) the TOML table with the longest match is used. It can
+also include wildcarded subdomains in the format `*.example.com`.
+The wildcard should only be present at the beginning as shown in the formats
+above. Other cases will not work. For example, `*.example.com` is valid but
+`example.*.com`, `*.example.com/foo` and `*.example.com:5000/foo/bar:baz` are not.
+Note that `*` matches an arbitrary number of subdomains. `*.example.com` will hence
+match `bar.example.com`, `foo.bar.example.com` and so on.
+
+As a special case, the `prefix` field can be missing; if so, it defaults to the value
+of the `location` field (described below).
+
+#### Per-namespace settings
+
+`insecure`
+: `true` or `false`.
+By default, container runtimes require TLS when retrieving images from a registry.
+If `insecure` is set to `true`, unencrypted HTTP as well as TLS connections with untrusted
+certificates are allowed.
+
+`blocked`
+: `true` or `false`.
+If `true`, pulling images with matching names is forbidden.
+
+#### Remapping and mirroring registries
+
+The user-specified image reference is, primarily, a "logical" image name, always used for naming
+the image. By default, the image reference also directly specifies the registry and repository
+to use, but the following options can be used to redirect the underlying accesses
+to different registry servers or locations (e.g. to support configurations with no access to the
+internet without having to change `Dockerfile`s, or to add redundancy).
+
+`location`
+: Accepts the same format as the `prefix` field, and specifies the physical location
+of the `prefix`-rooted namespace.
+
+By default, this is equal to `prefix` (in which case `prefix` can be omitted and the
+`[[registry]]` TOML table can only specify `location`).
+
+Example: Given
+```
+prefix = "example.com/foo"
+location = "internal-registry-for-example.com/bar"
+```
+requests for the image `example.com/foo/myimage:latest` will actually work with the
+`internal-registry-for-example.com/bar/myimage:latest` image.
+
+With a `prefix` containing a wildcard in the format: "*.example.com" for subdomain matching,
+the location can be empty. In such a case,
+prefix matching will occur, but no reference rewrite will occur. The
+original requested image string will be used as-is. But other settings like
+`insecure` / `blocked` / `mirrors` will be applied to matching images.
+
+Example: Given
+```
+prefix = "*.example.com"
+```
+requests for the image `blah.example.com/foo/myimage:latest` will be used
+as-is. But other settings like insecure/blocked/mirrors will be applied to matching images
+
+`mirror`
+: An array of TOML tables specifying (possibly-partial) mirrors for the
+`prefix`-rooted namespace (i.e., the current `[[registry]]` TOML table).
+
+The mirrors are attempted in the specified order; the first one that can be
+contacted and contains the image will be used (and if none of the mirrors contains the image,
+the primary location specified by the `registry.location` field, or using the unmodified
+user-specified reference, is tried last).
+
+Each TOML table in the `mirror` array can contain the following fields:
+- `location`: same semantics
+as specified in the `[[registry]]` TOML table
+- `insecure`: same semantics
+as specified in the `[[registry]]` TOML table
+- `pull-from-mirror`: `all`, `digest-only` or `tag-only`. If "digest-only", mirrors will only be used for digest pulls. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Restricting mirrors to pulls by digest avoids that issue. If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror that it is less likely to be out of sync if tags move, it should not be unnecessarily used for digest references. Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry.
+Note that this per-mirror setting is allowed only when `mirror-by-digest-only` is not configured for the primary registry.
+
+`mirror-by-digest-only`
+: `true` or `false`.
+If `true`, mirrors will only be used during pulling if the image reference includes a digest.
+Note that if all mirrors are configured to be digest-only, images referenced by a tag will only use the primary
+registry.
+If all mirrors are configured to be tag-only, images referenced by a digest will only use the primary
+registry.
+
+Referencing an image by digest ensures that the same is always used
+(whereas referencing an image by a tag may cause different registries to return
+different images if the tag mapping is out of sync).
+
+
+*Note*: Redirection and mirrors are currently processed only when reading a single image,
+not when pushing to a registry nor when doing any other kind of lookup/search on a on a registry.
+This may change in the future.
+
+#### Short-Name Aliasing
+The use of unqualified-search registries entails an ambiguity as it is
+unclear from which registry a given image, referenced by a short name,
+may be pulled from.
+
+As mentioned in the note at the end of this man page, using short names is
+subject to the risk of hitting squatted registry namespaces. If the
+unqualified-search registries are set to `["registry1.com", "registry2.com"]`
+an attacker may take over a namespace of registry1.com such that an image may
+be pulled from registry1.com instead of the intended source registry2.com.
+
+While it is highly recommended to always use fully-qualified image references,
+existing deployments using short names may not be easily changed. To
+circumvent the aforementioned ambiguity, so called short-name aliases can be
+configured that point to a fully-qualified image
+reference.
+
+Short-name aliases can be configured in the `[aliases]` table in the form of
+`"name"="value"` with the left-hand `name` being the short name (e.g., "image")
+and the right-hand `value` being the fully-qualified image reference (e.g.,
+"registry.com/namespace/image"). Note that neither "name" nor "value" can
+include a tag or digest. Moreover, "name" must be a short name and hence
+cannot include a registry domain or refer to localhost.
+
+When pulling a short name, the configured aliases table will be used for
+resolving the short name. If a matching alias is found, it will be used
+without further consulting the unqualified-search registries list. If no
+matching alias is found, the behavior can be controlled via the
+`short-name-mode` option as described below.
+
+Note that tags and digests are stripped off a user-specified short name for
+alias resolution. Hence, "image", "image:tag" and "image@digest" all resolve
+to the same alias (i.e., "image"). Stripped off tags and digests are later
+appended to the resolved alias.
+
+Further note that drop-in configuration files (see containers-registries.conf.d(5))
+can override aliases in the specific loading order of the files. If the "value" of
+an alias is empty (i.e., ""), the alias will be erased. However, a given
+"name" may only be specified once in a single config file.
+
+
+#### Short-Name Aliasing: Modes
+
+The `short-name-mode` option supports three modes to control the behaviour of
+short-name resolution.
+
+* `enforcing`: If only one unqualified-search registry is set, use it as there
+ is no ambiguity. If there is more than one registry and the user program is
+ running in a terminal (i.e., stdout & stdin are a TTY), prompt the user to
+ select one of the specified search registries. If the program is not running
+ in a terminal, the ambiguity cannot be resolved which will lead to an error.
+
+* `permissive`: Behaves as enforcing but does not lead to an error if the
+ program is not running in a terminal. Instead, fallback to using all
+ unqualified-search registries.
+
+* `disabled`: Use all unqualified-search registries without prompting.
+
+If `short-name-mode` is not specified at all or left empty, default to the
+`permissive` mode. If the user-specified short name was not aliased already,
+the `enforcing` and `permissive` mode if prompted, will record a new alias
+after a successful pull. Note that the recorded alias will be written to
+`/var/cache/containers/short-name-aliases.conf` for root to have a clear
+separation between possibly human-edited registries.conf files and the
+machine-generated `short-name-aliases-conf`. Note that `$HOME/.cache` is used
+for rootless users. If an alias is specified in a
+`registries.conf` file and also the machine-generated
+`short-name-aliases.conf`, the `short-name-aliases.conf` file has precedence.
+
+#### Normalization of docker.io references
+
+The Docker Hub `docker.io` is handled in a special way: every push and pull
+operation gets internally normalized with `/library` if no other specific
+namespace is defined (for example on `docker.io/namespace/image`).
+
+(Note that the above-described normalization happens to match the behavior of
+Docker.)
+
+This means that a pull of `docker.io/alpine` will be internally translated to
+`docker.io/library/alpine`. A pull of `docker.io/user/alpine` will not be
+rewritten because this is already the correct remote path.
+
+Therefore, to remap or mirror the `docker.io` images in the (implied) `/library`
+namespace (or that whole namespace), the prefix and location fields in this
+configuration file must explicitly include that `/library` namespace. For
+example `prefix = "docker.io/library/alpine"` and not `prefix =
+"docker.io/alpine"`. The latter would match the `docker.io/alpine/*`
+repositories but not the `docker.io/[library/]alpine` image).
+
+### EXAMPLE
+
+```
+unqualified-search-registries = ["example.com"]
+
+[[registry]]
+prefix = "example.com/foo"
+insecure = false
+blocked = false
+location = "internal-registry-for-example.com/bar"
+
+[[registry.mirror]]
+location = "example-mirror-0.local/mirror-for-foo"
+
+[[registry.mirror]]
+location = "example-mirror-1.local/mirrors/foo"
+insecure = true
+
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror.registry.com"
+```
+Given the above, a pull of `example.com/foo/image:latest` will try:
+
+1. `example-mirror-0.local/mirror-for-foo/image:latest`
+2. `example-mirror-1.local/mirrors/foo/image:latest`
+3. `internal-registry-for-example.com/bar/image:latest`
+
+in order, and use the first one that exists.
+
+Note that a mirror is associated only with the current `[[registry]]` TOML table. If using the example above, pulling the image `registry.com/image:latest` will hence only reach out to `mirror.registry.com`, and the mirrors associated with `example.com/foo` will not be considered.
+
+## VERSION 1 FORMAT - DEPRECATED
+VERSION 1 format is still supported but it does not support
+using registry mirrors, longest-prefix matches, or location rewriting.
+
+The TOML format is used to build a simple list of registries under three
+categories: `registries.search`, `registries.insecure`, and `registries.block`.
+You can list multiple registries using a comma separated list.
+
+Search registries are used when the caller of a container runtime does not fully specify the
+container image that they want to execute. These registries are prepended onto the front
+of the specified container image until the named image is found at a registry.
+
+Note that insecure registries can be used for any registry, not just the registries listed
+under search.
+
+The `registries.insecure` and `registries.block` lists have the same meaning as the
+`insecure` and `blocked` fields in the current version.
+
+### EXAMPLE
+The following example configuration defines two searchable registries, one
+insecure registry, and two blocked registries.
+
+```
+[registries.search]
+registries = ['registry1.com', 'registry2.com']
+
+[registries.insecure]
+registries = ['registry3.com']
+
+[registries.block]
+registries = ['registry.untrusted.com', 'registry.unsafe.com']
+```
+
+# NOTE: RISK OF USING UNQUALIFIED IMAGE NAMES
+We recommend always using fully qualified image names including the registry
+server (full dns name), namespace, image name, and tag
+(e.g., registry.redhat.io/ubi8/ubi:latest). When using short names, there is
+always an inherent risk that the image being pulled could be spoofed. For
+example, a user wants to pull an image named `foobar` from a registry and
+expects it to come from myregistry.com. If myregistry.com is not first in the
+search list, an attacker could place a different `foobar` image at a registry
+earlier in the search list. The user would accidentally pull and run the
+attacker's image and code rather than the intended content. We recommend only
+adding registries which are completely trusted, i.e. registries which don't
+allow unknown or anonymous users to create accounts with arbitrary names. This
+will prevent an image from being spoofed, squatted or otherwise made insecure.
+If it is necessary to use one of these registries, it should be added at the
+end of the list.
+
+It is recommended to use fully-qualified images for pulling as
+the destination registry is unambiguous. Pulling by digest
+(i.e., quay.io/repository/name@digest) further eliminates the ambiguity of
+tags.
+
+# SEE ALSO
+ containers-auth.json(5) containers-certs.d(5)
+
+# HISTORY
+Dec 2019, Warning added for unqualified image names by Tom Sweeney <tsweeney@redhat.com>
+
+Mar 2019, Added additional configuration format by Sascha Grunert <sgrunert@suse.com>
+
+Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
+
+Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
+
+Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
diff --git a/docs/containers-registries.conf.d.5.md b/docs/containers-registries.conf.d.5.md
new file mode 100644
index 0000000..563ccfd
--- /dev/null
+++ b/docs/containers-registries.conf.d.5.md
@@ -0,0 +1,37 @@
+% CONTAINERS-REGISTRIES.CONF.D 5
+% Valentin Rothberg
+% Mar 2020
+
+# NAME
+containers-registries.conf.d - directory for drop-in registries.conf files
+
+# DESCRIPTION
+CONTAINERS-REGISTRIES.CONF.D is a system-wide directory for drop-in
+configuration files in the `containers-registries.conf(5)` format.
+
+By default, the directory is located at `/etc/containers/registries.conf.d`.
+
+# CONFIGURATION PRECEDENCE
+
+Once the main configuration at `/etc/containers/registries.conf` is loaded, the
+files in `/etc/containers/registries.conf.d` are loaded in alpha-numerical
+order. Then the conf files in `$HOME/.config/containers/registries.conf.d` are loaded in alpha-numerical order, if they exist. If the `$HOME/.config/containers/registries.conf` is loaded, only the conf files under `$HOME/.config/containers/registries.conf.d` are loaded in alpha-numerical order.
+Specified fields in a conf file will overwrite any previous setting. Note
+that only files with the `.conf` suffix are loaded, other files and
+sub-directories are ignored.
+
+For instance, setting the `unqualified-search-registries` in
+`/etc/containers/registries.conf.d/myregistries.conf` will overwrite previous
+settings in `/etc/containers/registries.conf`. The `[[registry]]` tables merged
+by overwriting existing items if the prefixes are identical while new ones are
+added.
+
+All drop-in configuration files must be specified in the version 2 of the
+`containers-registries.conf(5)` format.
+
+# SEE ALSO
+`containers-registries.conf(5)`
+
+# HISTORY
+
+Mar 2020, Originally compiled by Valentin Rothberg <rothberg@redhat.com>
diff --git a/docs/containers-registries.d.5.md b/docs/containers-registries.d.5.md
new file mode 100644
index 0000000..04434de
--- /dev/null
+++ b/docs/containers-registries.d.5.md
@@ -0,0 +1,140 @@
+% containers-registries.d 5 Registries.d Man Page
+% Miloslav Trmač
+% August 2016
+
+# NAME
+containers-registries.d - Directory for various registries configurations
+
+# DESCRIPTION
+
+The registries configuration directory contains configuration for various registries
+(servers storing remote container images), and for content stored in them,
+so that the configuration does not have to be provided in command-line options over and over for every command,
+and so that it can be shared by all users of containers/image.
+
+By default, the registries configuration directory is `$HOME/.config/containers/registries.d` if it exists, otherwise `/etc/containers/registries.d` (unless overridden at compile-time);
+applications may allow using a different directory instead.
+
+## Directory Structure
+
+The directory may contain any number of files with the extension `.yaml`,
+each using the YAML format. Other than the mandatory extension, names of the files
+don’t matter.
+
+The contents of these files are merged together; to have a well-defined and easy to understand
+behavior, there can be only one configuration section describing a single namespace within a registry
+(in particular there can be at most one one `default-docker` section across all files,
+and there can be at most one instance of any key under the `docker` section;
+these sections are documented later).
+
+Thus, it is forbidden to have two conflicting configurations for a single registry or scope,
+and it is also forbidden to split a configuration for a single registry or scope across
+more than one file (even if they are not semantically in conflict).
+
+## Registries, Scopes and Search Order
+
+Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined:
+
+- `default-docker` is the _configuration section_ (as documented below)
+ for registries implementing "Docker Registry HTTP API V2".
+
+ This key is optional.
+
+- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2",
+ or namespaces and individual images within these registries, as keys;
+ the value assigned to any such key is a _configuration section_.
+
+ This key is optional.
+
+ Scopes matching individual images are named Docker references *in the fully expanded form*, either
+ using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
+
+ More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
+ a repository namespace, or a registry host (and a port if it differs from the default).
+
+ Note that if a registry is accessed using a hostname+port configuration, the port-less hostname
+ is _not_ used as parent scope.
+
+When searching for a configuration to apply for an individual container image, only
+the configuration for the most-precisely matching scope is used; configuration using
+more general scopes is ignored. For example, if _any_ configuration exists for
+`docker.io/library/busybox`, the configuration for `docker.io` is ignored
+(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`).
+
+### Built-in Defaults
+
+If no `docker` section can be found for the container image, and no `default-docker` section is configured:
+
+- The default directory, `/var/lib/containers/sigstore` for root and `$HOME/.local/share/containers/sigstore` for unprivileged user, will be used for reading and writing signatures.
+- Sigstore attachments will not be read/written.
+
+## Individual Configuration Sections
+
+A single configuration section is selected for a container image using the process
+described above. The configuration section is a YAML mapping, with the following keys:
+
+<!-- `sigstore` and `sigstore-staging` are deprecated and intentionally not documented here. -->
+
+- `lookaside-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures).
+
+ This key is optional; if it is missing, `lookaside` below is used.
+
+- `lookaside` defines an URL of the signature storage.
+ This URL is used for reading existing signatures,
+ and if `lookaside-staging` does not exist, also for adding or removing them.
+
+ This key is optional; if it is missing, no signature storage is defined (no signatures
+ are download along with images, adding new signatures is possible only if `lookaside-staging` is defined).
+
+- `use-sigstore-attachments` specifies whether sigstore image attachments (signatures, attestations and the like) are going to be read/written along with the image.
+ If disabled, the images are treated as if no attachments exist; attempts to write attachments fail.
+
+## Examples
+
+### Using Containers from Various Origins
+
+The following demonstrates how to to consume and run images from various registries and namespaces:
+
+```yaml
+docker:
+ registry.database-supplier.com:
+ lookaside: https://lookaside.database-supplier.com
+ distribution.great-middleware.org:
+ lookaside: https://security-team.great-middleware.org/lookaside
+ docker.io/web-framework:
+ lookaside: https://lookaside.web-framework.io:8080
+```
+
+### Developing and Signing Containers, Staging Signatures
+
+For developers in `example.com`:
+
+- Consume most container images using the public servers also used by clients.
+- Use a separate signature storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures.
+- Craft an individual exception for a single branch a specific developer is working on locally.
+
+```yaml
+docker:
+ registry.example.com:
+ lookaside: https://registry-lookaside.example.com
+ registry.example.com/mydepartment:
+ lookaside: https://lookaside.mydepartment.example.com
+ lookaside-staging: file:///mnt/mydepartment/lookaside-staging
+ registry.example.com/mydepartment/myproject:mybranch:
+ lookaside: http://localhost:4242/lookaside
+ lookaside-staging: file:///home/useraccount/webroot/lookaside
+```
+
+### A Global Default
+
+If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server
+without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures.
+
+```yaml
+default-docker:
+ lookaside-staging: file:///mnt/company/common-lookaside-staging
+```
+
+# AUTHORS
+
+Miloslav Trmač <mitr@redhat.com>
diff --git a/docs/containers-signature.5.md b/docs/containers-signature.5.md
new file mode 100644
index 0000000..cc6f9c3
--- /dev/null
+++ b/docs/containers-signature.5.md
@@ -0,0 +1,246 @@
+% container-signature 5 Container signature format
+% Miloslav Trmač
+% March 2017
+
+# NAME
+container-signature - Container signature format
+
+# DESCRIPTION
+This document describes the format of container signatures,
+as implemented by the `github.com/containers/image/signature` package.
+
+Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
+(preferably through the higher-level `signature.PolicyContext` interface)
+without having to care about the details of the format described below.
+This documentation exists primarily for maintainers of the package
+and to allow independent reimplementations.
+
+## High-level overview
+
+The signature provides an end-to-end authenticated claim that a container image
+has been approved by a specific party (e.g. the creator of the image as their work,
+an automated build system as a result of an automated build,
+a company IT department approving the image for production) under a specified _identity_
+(e.g. an OS base image / specific application, with a specific version).
+
+A container signature consists of a cryptographic signature which identifies
+and authenticates who signed the image, and carries as a signed payload a JSON document.
+The JSON document identifies the image being signed, claims a specific identity of the
+image and if applicable, contains other information about the image.
+
+The signatures do not modify the container image (the layers, configuration, manifest, …);
+e.g. their presence does not change the manifest digest used to identify the image in
+docker/distribution servers; rather, the signatures are associated with an immutable image.
+An image can have any number of signatures so signature distribution systems SHOULD support
+associating more than one signature with an image.
+
+## The cryptographic signature
+
+As distributed, the container signature is a blob which contains a cryptographic signature
+in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
+JSON document and a signature of the JSON document; it is not a “detached signature” with
+independent blobs containing the JSON document and a cryptographic signature).
+
+Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880),
+but others may be added in the future. (The blob does not contain metadata identifying the
+cryptographic signature format. It is expected that most formats are sufficiently self-describing
+that this is not necessary and the configured expected public key provides another indication
+of the expected cryptographic signature format. Such metadata may be added in the future for
+newly added cryptographic signature formats, if necessary.)
+
+Consumers of container signatures SHOULD verify the cryptographic signature
+against one or more trusted public keys
+(e.g. defined in a [policy.json signature verification policy file](containers-policy.json.5.md))
+before parsing or processing the JSON payload in _any_ way,
+in particular they SHOULD stop processing the container signature
+if the cryptographic signature verification fails, without even starting to process the JSON payload.
+
+(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature,
+and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs,
+e.g. to list the authors and image identities of signatures associated with a single container image;
+if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted
+or in any way usable for making policy decisions about the image.)
+
+### OpenPGP signature verification
+
+When verifying a cryptographic signature in the OpenPGP format,
+the consumer MUST verify at least the following aspects of the signature
+(like the `github.com/containers/image/signature` package does):
+
+- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3.
+ (e.g. it MUST NOT be an unsigned “Literal Message”,
+ a “Cleartext Signature” as defined in RFC 4880 section 7,
+ or any other non-signature format).
+- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image).
+- The signature MUST be correctly formed and pass the cryptographic validation.
+- The signature MUST correctly authenticate the included JSON payload
+ (in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated).
+- The signature MUST NOT be expired.
+
+The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected.
+
+## JSON processing and forward compatibility
+
+The payload of the cryptographic signature is a JSON document (RFC 7159).
+Consumers SHOULD parse it very strictly,
+refusing any signature which violates the expected format (e.g. missing members, incorrect member types)
+or can be interpreted ambiguously (e.g. a duplicated member in a JSON object).
+
+Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized
+to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below)
+and if the semantics of the invalid document, as created by such an implementation, is clear.
+
+The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
+each a JSON object.
+
+The `critical` object MUST contain a `type` member identifying the document as a container signature
+(as defined [below](#criticaltype))
+and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
+
+To ensure forward compatibility (allowing older signature consumers to correctly
+accept or reject signatures created at a later date, with possible extensions to this format),
+consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects,
+contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected.
+At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members.
+
+For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object,
+and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported.
+Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid.
+
+## JSON data format
+
+An example of the full format follows, with detailed description below.
+To reiterate, consumers of the signature SHOULD perform successful cryptographic verification,
+and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above.
+
+```json
+{
+ "critical": {
+ "type": "atomic container signature",
+ "image": {
+ "docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
+ },
+ "identity": {
+ "docker-reference": "docker.io/library/busybox:latest"
+ }
+ },
+ "optional": {
+ "creator": "some software package v1.0.1-35",
+ "timestamp": 1483228800,
+ }
+}
+```
+
+### `critical`
+
+This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature.
+
+Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
+
+### `critical.type`
+
+This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces).
+
+Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value.
+
+(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future;
+if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`,
+not by this document.)
+
+### `critical.image`
+
+This MUST be a JSON object which identifies the container image this signature applies to.
+
+Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
+
+(Currently only the `docker-manifest-digest` way of identifying a container image is defined;
+alternatives to this may be defined in the future,
+but existing consumers are required to reject signatures which use formats they do not support.)
+
+### `critical.image.docker-manifest-digest`
+
+This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format.
+
+The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system.
+
+The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way
+(e.g. parsing the manifest further or downloading layers of the image).
+
+Implementation notes:
+* A single container image manifest may have several valid manifest digest values, using different algorithms.
+* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests,
+the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob.
+
+### `critical.identity`
+
+This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information),
+as asserted by the author of the signature.
+
+Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
+
+(Currently only the `docker-reference` way of claiming an image identity/purpose is defined;
+alternatives to this may be defined in the future,
+but existing consumers are required to reject signatures which use formats they do not support.)
+
+### `critical.identity.docker-reference`
+
+This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format,
+and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`).
+If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning,
+the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces).
+
+The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image
+(again, accounting for the `docker/distribution/reference` normalization semantics).
+
+In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image.
+However, depending on the specific application, users or system administrators may accept less specific matches
+(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest),
+or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image
+(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images).
+The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](containers-policy.json.5.md).
+
+The `critical.identity.docker-reference` value SHOULD contain either a tag or digest;
+in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](containers-policy.json.5.md#signedby).)
+
+### `optional`
+
+This MUST be a JSON object.
+
+Consumers SHOULD accept any members with unrecognized names in the `optional` object,
+and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported.
+Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid.
+
+### `optional.creator`
+
+If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature
+(identifying the low-level software implementation; not the top-level caller).
+
+The contents of this string is not defined in detail; however each implementation creating container signatures:
+
+- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
+- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
+ changes whenever the format or semantics of the generated signature changes in any way;
+ it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value
+- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp),
+ and which makes it easy enough to recognize a range of versions of a specific implementation
+ (e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
+ the string should contain a version number, or at least a date of the commit).
+
+Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator`
+(perhaps augmented with `optional.timestamp`),
+and MAY change their processing of the signature based on these values
+(usually to accommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
+as long as the semantics of the invalid document, as created by such an implementation, is clear.
+
+If consumers of signatures do change their behavior based on the `optional.creator` value,
+they SHOULD take care that the way they process the signatures is not inconsistent with
+strictly validating signature consumers.
+(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value
+if other implementations would completely reject the signature,
+but it would be very undesirable for the two kinds of implementations to accept the signature in different
+and inconsistent situations.)
+
+### `optional.timestamp`
+
+If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created
+as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC).
diff --git a/docs/containers-sigstore-signing-params.yaml.5.md b/docs/containers-sigstore-signing-params.yaml.5.md
new file mode 100644
index 0000000..f081cdb
--- /dev/null
+++ b/docs/containers-sigstore-signing-params.yaml.5.md
@@ -0,0 +1,117 @@
+% CONTAINERS-SIGSTORE-SIGNING-PARAMS.YAML 5 sigstore signing parameters Man Page
+% Miloslav Trmač
+% January 2023
+
+# NAME
+containers-sigstore-signing-params.yaml - syntax for the sigstore signing parameter file
+
+# DESCRIPTION
+
+Sigstore signing parameter files are used to store options that may be required to create sigstore signatures.
+There is no default location for these files; they are user-managed, and used as inputs to a container image signing operation,
+e.g. `skopeo copy --sign-by-sigstore=`_param-file_`.yaml` or `podman push --sign-by-sigstore=`_param-file_`.yaml` .
+
+## FORMAT
+
+Sigstore signing parameter files use YAML.
+
+Many parameters are optional, but the file must specify enough to create a signature;
+in particular either a private key, or Fulcio.
+
+### Signing with Private Keys
+
+- `privateKeyFile:` _path_
+
+ Create a signature using a private key at _path_.
+ Existence of this field triggers the use of a private key.
+
+- `privateKeyPassphraseFile:` _passphrasePath_
+
+ Read the passphrase required to use `privateKeyFile` from _passphrasePath_.
+ Optional: if this is not set, the user must provide the passphrase interactively.
+
+### Signing with Fulcio-generated Certificates
+
+Instead of a static private key, the signing process generates a short-lived key pair
+and requests a Fulcio server to issue a certificate for that key pair,
+based on the user authenticating to an OpenID Connect provider.
+
+To specify Fulcio, include a `fulcio` sub-object with one or more of the following keys.
+In addition, a Rekor server must be specified as well.
+
+- `fulcioURL:` _URL_
+
+ Required. URL of the Fulcio server to use.
+
+- `oidcMode:` `interactive` | `deviceGrant` | `staticToken`
+
+ Required. Specifies how to obtain the necessary OpenID Connect credential.
+
+ `interactive` opens a web browser on the same machine, or if that is not possible,
+ asks the user to open a browser manually and to type in the provided code.
+ It requires the user to be able to directly interact with the signing process.
+
+ `deviceGrant` uses a device authorization grant flow (RFC 8628).
+ It requires the user to be able to read text printed by the signing process, and to act on it reasonably promptly.
+
+ `staticToken` provides a pre-existing OpenID Connect “ID token”, which must have been obtained separately.
+
+- `oidcIssuerURL:` _URL_
+
+ Required for `oidcMode:` `interactive` or `deviceGrant`. URL of an OpenID Connect issuer server to authenticate with.
+
+- `oidcClientID:` _client ID_
+
+ Used for `oidcMode:` `interactive` or `deviceGrant` to identify the client when contacting the issuer.
+ Optional but likely to be necessary in those cases.
+
+- `oidcClientSecret:` _client secret_
+
+ Used for `oidcMode:` `interactive` or `deviceGrant` to authenticate the client when contacting the issuer.
+ Optional.
+
+- `oidcIDToken:` _token_
+
+ Required for `oidcMode: staticToken`.
+ An OpenID Connect ID token that identifies the user (and authorizes certificate issuance).
+
+### Recording the Signature to a Rekor Transparency Server
+
+This can be combined with either a private key or Fulcio.
+It is, practically speaking, required for Fulcio; it is optional when a static private key is used, but necessary for
+interoperability with the default configuration of `cosign`.
+
+- `rekorURL`: _URL_
+
+ URL of the Rekor server to use.
+
+# EXAMPLES
+
+### Sign Using a Pre-existing Private Key
+
+Uses the ”community infrastructure” Rekor server.
+
+```yaml
+privateKeyFile: "/home/user/sigstore/private-key.key"
+privateKeyPassphraseFile: "/mnt/user/sigstore-private-key"
+rekorURL: "https://rekor.sigstore.dev"
+```
+
+### Sign Using a Fulcio-Issued Certificate
+
+Uses the ”community infrastructure” Fulcio and Rekor server,
+and the Dex OIDC issuer which delegates to other major issuers like Google and GitHub.
+
+Other configurations will very likely need to also provide an OIDC client secret.
+
+```yaml
+fulcio:
+ fulcioURL: "https://fulcio.sigstore.dev"
+ oidcMode: "interactive"
+ oidcIssuerURL: "https://oauth2.sigstore.dev/auth"
+ oidcClientID: "sigstore"
+rekorURL: "https://rekor.sigstore.dev"
+```
+
+# SEE ALSO
+ skopeo(1), podman(1)
diff --git a/docs/containers-transports.5.md b/docs/containers-transports.5.md
new file mode 100644
index 0000000..481bdb7
--- /dev/null
+++ b/docs/containers-transports.5.md
@@ -0,0 +1,138 @@
+% CONTAINERS-TRANSPORTS 5 Containers Transports Man Page
+% Valentin Rothberg
+% April 2019
+
+## NAME
+
+containers-transports - description of supported transports for copying and storing container images
+
+## DESCRIPTION
+
+Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations.
+The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below.
+
+The semantics of the image names ultimately depend on the environment where
+they are evaluated. For example: if evaluated on a remote server, image names
+might refer to paths on that server; relative paths are relative to the current
+directory of the image consumer.
+
+<!-- atomic: is deprecated and not documented here. -->
+
+### **containers-storage**:[**[**storage-specifier**]**]{image-id|docker-reference[@image-id]}
+
+An image located in a local containers storage.
+The format of _docker-reference_ is described in detail in the **docker** transport.
+
+The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory.
+The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored.
+The optional `options` are a comma-separated list of driver-specific options.
+Please refer to containers-storage.conf(5) for further information on the drivers and supported options.
+
+### **dir:**_path_
+
+An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files.
+This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
+
+### **docker://**_docker-reference_
+
+An image in a registry implementing the "Docker Registry HTTP API V2".
+By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1).
+If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1).
+The containers-registries.conf(5) further allows for configuring various settings of a registry.
+
+Note that a _docker-reference_ has the following format: _name_[**:**_tag_ | **@**_digest_].
+While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do.
+Digests can also be used in an image destination as long as the manifest matches the provided digest.
+
+The docker transport supports pushing images without a tag or digest to a registry when the image name is suffixed with **@@unknown-digest@@**. The _name_**@@unknown-digest@@** reference format cannot be used with a reference that has a tag or digest.
+The digest of images can be explored with skopeo-inspect(1).
+
+If `name` does not contain a slash, it is treated as `docker.io/library/name`.
+Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost).
+If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`.
+
+### **docker-archive:**_path[:{docker-reference|@source-index}]_
+
+An image is stored in the docker-save(1) formatted file.
+_docker-reference_ must not contain a digest.
+Alternatively, for reading archives, @_source-index_ is a zero-based index in archive manifest
+(to access untagged images).
+If neither _docker-reference_ nor @_source_index is specified when reading an archive, the archive must contain exactly one image.
+
+The _path_ can refer to a stream, e.g. `docker-archive:/dev/stdin`.
+
+### **docker-daemon:**_docker-reference|algo:digest_
+
+An image stored in the docker daemon's internal storage.
+The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source.
+The _algo:digest_ refers to the image ID reported by docker-inspect(1).
+
+### **oci:**_path[:reference]_
+
+An image in a directory structure compliant with the "Open Container Image Layout Specification" at _path_.
+
+The _path_ value terminates at the first `:` character; any further `:` characters are not separators, but a part of _reference_.
+The _reference_ is used to set, or match, the `org.opencontainers.image.ref.name` annotation in the top-level index.
+If _reference_ is not specified when reading an image, the directory must contain exactly one image.
+
+### **oci-archive:**_path[:reference]_
+
+An image in a tar(1) archive with contents compliant with the "Open Container Image Layout Specification" at _path_.
+
+The _path_ value terminates at the first `:` character; any further `:` characters are not separators, but a part of _reference_.
+The _reference_ is used to set, or match, the `org.opencontainers.image.ref.name` annotation in the top-level index.
+If _reference_ is not specified when reading an archive, the archive must contain exactly one image.
+
+### **ostree:**_docker-reference[@/absolute/repo/path]_
+
+An image in the local ostree(1) repository.
+_/absolute/repo/path_ defaults to _/ostree/repo_.
+
+### **sif:**_path_
+
+An image using the Singularity image format at _path_.
+
+Only reading images is supported, and not all scripts can be represented in the OCI format.
+
+<!-- tarball: can only usefully be used from Go callers who call tarballReference.ConfigUpdate, and is not documented here. -->
+
+## Examples
+
+The following examples demonstrate how some of the containers transports can be used.
+The examples use skopeo-copy(1) for copying container images.
+
+**Copying an image from one registry to another**:
+```
+$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest
+```
+
+**Copying an image from a running Docker daemon to a directory in the OCI layout**:
+```
+$ mkdir alpine-oci
+$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci
+$ tree alpine-oci
+test-oci/
+├── blobs
+│   └── sha256
+│   ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7
+│   ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc
+│   └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7
+├── index.json
+└── oci-layout
+
+2 directories, 5 files
+```
+
+**Copying an image from a registry to the local storage**:
+```
+$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest
+```
+
+## SEE ALSO
+
+docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5)
+
+## AUTHORS
+
+Miloslav Trmač <mitr@redhat.com>
+Valentin Rothberg <rothberg@redhat.com>
diff --git a/docs/signature-protocols.md b/docs/signature-protocols.md
new file mode 100644
index 0000000..334c589
--- /dev/null
+++ b/docs/signature-protocols.md
@@ -0,0 +1,136 @@
+# Signature access protocols
+
+The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
+Some image transports (local storage formats and remote protocols) implement these signatures natively
+or trivially; for others, the protocol extensions described below are necessary.
+
+## docker/distribution registries—separate storage
+
+### Usage
+
+Any existing docker/distribution registry, whether or not it natively supports signatures,
+can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](containers-registries.d.md).
+`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
+or also separate URLs for smaller namespaces or individual repositories within the server
+(which e.g. allows image authors to manage their own signature storage while publishing
+the images on the public `docker.io` server).
+
+The signature storage URL defines a root of a path hierarchy.
+It can be either a `file:///…` URL, pointing to a local directory structure,
+or a `http`/`https` URL, pointing to a remote server.
+`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
+
+The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
+a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
+(This of course does not prevent other server implementations,
+e.g. a HTTP server reading signatures from a database.)
+
+The usual workflow for producing and distributing images using the separate storage mechanism
+is to configure the repository in `registries.d` with `lookaside-staging` URL pointing to a private
+`file:///` staging area, and a `lookaside` URL pointing to a public web server.
+To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
+and then copy the created directory structure from the `file:///` staging area
+to a subdirectory of a webroot of the public web server so that they are accessible using the public `lookaside` URL.
+The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
+set up a `lookaside` URL pointing to the public web server.
+
+### Path structure
+
+Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
+and a container image stored in a docker/distribution registry using the _fully-expanded_ name
+_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
+_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
+signatures are accessed using URLs of the form
+> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
+
+where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
+(i.e. even if the user referenced the image using a tag,
+the signature storage is always disambiguated using digest references).
+Note that in the URLs used for signatures,
+_digest-algo_ and _digest-value_ are separated using the `=` character,
+not `:` like when accessing the manifest using the docker/distribution API.
+
+Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
+Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
+and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
+Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
+then store the new signature using that _index_ value.
+
+There is no way to list existing signatures other than iterating through the successive _index_ values,
+and no way to download all of the signatures at once.
+
+### Examples
+
+For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
+(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
+and with a `registries.d` configuration specifying a `lookaside` URL `https://example.com/lookaside` for the same image,
+the following URLs would be accessed to download all signatures:
+> - `https://example.com/lookaside/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
+> - `https://example.com/lookaside/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
+> - …
+
+For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
+`lookaside` URL, the signatures would be available at
+> `https://example.com/lookaside/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
+
+and so on.
+
+## (OpenShift) docker/distribution API extension
+
+As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
+an extension of the docker/distribution API which allows simpler access to the signatures,
+using only the docker/distribution API endpoint.
+
+This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
+and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
+and it is the preferred way implement signature storage in registries.
+
+See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
+
+To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
+path to read an array of signatures. Use only the signature objects
+which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
+ignore the other fields of the signature object.
+
+To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
+and `content` set to the signature. Also set `name` to an unique name with the form
+_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
+and _per-image-name_ is any unique identifier.
+
+To add more than one signature, add them one at a time. This API does not allow deleting signatures.
+
+Note that because signatures are stored within the cluster-wide image objects,
+i.e. different namespaces can not associate different sets of signatures to the same image,
+updating signatures requires a cluster-wide access to the `imagesignatures` resource
+(by default available to the `system:image-signer` role),
+
+## OpenShift-embedded registries
+
+The OpenShift-embedded registry implements the ordinary docker/distribution API,
+and it also exposes images through the OpenShift REST API (available through the “API master” servers).
+
+Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
+which is easier to set up and should usually be preferred.
+Continue reading for details on using older versions of OpenShift.
+
+As of https://github.com/openshift/origin/pull/9181,
+signatures are exposed through the OpenShift API
+(i.e. to access the complete image, it is necessary to use both APIs,
+in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
+
+To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
+resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
+which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
+the `ImageSignature` object.
+
+To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
+with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
+_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
+and _per-image-name_ is any unique identifier.
+
+Note that because signatures are stored within the cluster-wide image objects,
+i.e. different namespaces can not associate different sets of signatures to the same image,
+updating signatures requires a cluster-wide access to the `imagesignatures` resource
+(by default available to the `system:image-signer` role),
+and deleting signatures is strongly discouraged
+(it deletes the signature from all namespaces which contain the same image).
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..56f6c54
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,144 @@
+module github.com/containers/image/v5
+
+go 1.19
+
+require (
+ dario.cat/mergo v1.0.0
+ github.com/BurntSushi/toml v1.3.2
+ github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
+ github.com/containers/ocicrypt v1.1.9
+ github.com/containers/storage v1.51.0
+ github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46
+ github.com/distribution/reference v0.5.0
+ github.com/docker/cli v24.0.7+incompatible
+ github.com/docker/distribution v2.8.3+incompatible
+ github.com/docker/docker v24.0.7+incompatible
+ github.com/docker/docker-credential-helpers v0.8.0
+ github.com/docker/go-connections v0.4.0
+ github.com/go-openapi/strfmt v0.21.7
+ github.com/go-openapi/swag v0.22.4
+ github.com/hashicorp/go-multierror v1.1.1
+ github.com/hashicorp/go-retryablehttp v0.7.5
+ github.com/klauspost/compress v1.17.3
+ github.com/klauspost/pgzip v1.2.6
+ github.com/manifoldco/promptui v0.9.0
+ github.com/mattn/go-sqlite3 v1.14.18
+ github.com/opencontainers/go-digest v1.0.0
+ github.com/opencontainers/image-spec v1.1.0-rc5
+ github.com/opencontainers/selinux v1.11.0
+ github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f
+ github.com/otiai10/copy v1.14.0
+ github.com/proglottis/gpgme v0.1.3
+ github.com/secure-systems-lab/go-securesystemslib v0.7.0
+ github.com/sigstore/fulcio v1.4.3
+ github.com/sigstore/rekor v1.2.2
+ github.com/sigstore/sigstore v1.7.5
+ github.com/sirupsen/logrus v1.9.3
+ github.com/stretchr/testify v1.8.4
+ github.com/sylabs/sif/v2 v2.15.0
+ github.com/ulikunitz/xz v0.5.11
+ github.com/vbatts/tar-split v0.11.5
+ github.com/vbauerster/mpb/v8 v8.6.2
+ github.com/xeipuuv/gojsonschema v1.2.0
+ go.etcd.io/bbolt v1.3.8
+ golang.org/x/crypto v0.15.0
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d
+ golang.org/x/oauth2 v0.14.0
+ golang.org/x/sync v0.5.0
+ golang.org/x/term v0.14.0
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect
+ github.com/VividCortex/ewma v1.2.0 // indirect
+ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/chzyer/readline v1.5.1 // indirect
+ github.com/containerd/cgroups/v3 v3.0.2 // indirect
+ github.com/containerd/containerd v1.7.0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
+ github.com/coreos/go-oidc/v3 v3.7.0 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/docker/go-metrics v0.0.1 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.1 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/analysis v0.21.4 // indirect
+ github.com/go-openapi/errors v0.20.4 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/loads v0.21.2 // indirect
+ github.com/go-openapi/runtime v0.26.0 // indirect
+ github.com/go-openapi/spec v0.20.9 // indirect
+ github.com/go-openapi/validate v0.22.1 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/go-containerregistry v0.16.1 // indirect
+ github.com/google/go-intervals v0.0.2 // indirect
+ github.com/google/uuid v1.3.1 // indirect
+ github.com/gorilla/mux v1.8.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/mattn/go-shellwords v1.0.12 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
+ github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moby/sys/mountinfo v0.7.1 // indirect
+ github.com/moby/term v0.5.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/opencontainers/runc v1.1.10 // indirect
+ github.com/opencontainers/runtime-spec v1.1.0 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.17.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.11.1 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/segmentio/ksuid v1.0.4 // indirect
+ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
+ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
+ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
+ github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
+ github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
+ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+ go.mongodb.org/mongo-driver v1.11.3 // indirect
+ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/otel v1.16.0 // indirect
+ go.opentelemetry.io/otel/metric v1.16.0 // indirect
+ go.opentelemetry.io/otel/trace v1.16.0 // indirect
+ golang.org/x/mod v0.13.0 // indirect
+ golang.org/x/net v0.18.0 // indirect
+ golang.org/x/sys v0.14.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.14.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
+ google.golang.org/grpc v1.58.3 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
+ gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gotest.tools/v3 v3.5.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..5a0a8e2
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,644 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
+github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/hcsshim v0.12.0-rc.0 h1:wX/F5huJxH9APBkhKSEAqaiZsuBvbbDnyBROZAqsSaY=
+github.com/Microsoft/hcsshim v0.12.0-rc.0/go.mod h1:rvOnw3YlfoNnEp45wReUngvsXbwRW+AFQ10GVjG1kMU=
+github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A=
+github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
+github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
+github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
+github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
+github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
+github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
+github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
+github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb h1:g1IJUHmHZuHa1YPvIiYjWrhysb+qEiiImA8p8mENhiE=
+github.com/containers/storage v1.50.3-0.20231101112703-6e72f11598fb/go.mod h1:LpKczONfqahkVHFdZGPUg/xYZVjd/qqisRu0TkO4u8k=
+github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
+github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
+github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o=
+github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
+github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
+github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
+github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
+github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
+github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
+github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
+github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
+github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
+github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
+github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
+github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
+github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
+github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
+github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
+github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
+github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
+github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8=
+github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
+github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
+github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
+github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-rod/rod v0.114.4 h1:FpkNFukjCuZLwnoLs+S9aCL95o/EMec6M+41UmvQay8=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
+github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
+github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
+github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
+github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
+github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
+github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
+github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
+github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
+github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
+github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
+github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
+github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
+github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
+github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
+github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
+github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
+github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
+github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
+github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
+github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
+github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
+github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
+github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
+github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
+github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
+github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
+github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
+github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
+github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
+github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
+github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
+github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
+github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
+github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
+github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
+github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
+github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
+go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
+go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
+go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
+go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
+go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
+go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
+go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
+go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
+go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
+go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
+golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
+golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
+golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
+golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
+gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
+gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh
new file mode 100755
index 0000000..f046d28
--- /dev/null
+++ b/hack/get_ci_vm.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+
+#
+# For help and usage information, simply execute the script w/o any arguments.
+#
+# This script is intended to be run by Red Hat image developers who need
+# to debug problems specifically related to Cirrus-CI automated testing.
+# It requires that you have been granted prior access to create VMs in
+# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
+# with supervision upon request.
+
+set -e
+
+SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
+SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
+REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
+
+# Help detect if we were called by get_ci_vm container
+GET_CI_VM="${GET_CI_VM:-0}"
+in_get_ci_vm() {
+ if ((GET_CI_VM==0)); then
+ echo "Error: $1 is not intended for use in this context"
+ exit 2
+ fi
+}
+
+# get_ci_vm APIv1 container entrypoint calls into this script
+# to obtain required repo. specific configuration options.
+if [[ "$1" == "--config" ]]; then
+ in_get_ci_vm "$1"
+ cat <<EOF
+DESTDIR="/var/tmp/go/src/github.com/containers/image"
+UPSTREAM_REPO="https://github.com/containers/image.git"
+CI_ENVFILE="/etc/ci_environment"
+GCLOUD_PROJECT="skopeo"
+GCLOUD_IMGPROJECT="libpod-218412"
+GCLOUD_CFG="image"
+GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
+GCLOUD_CPUS="2"
+GCLOUD_MEMORY="4Gb"
+GCLOUD_DISK="200"
+EOF
+elif [[ "$1" == "--setup" ]]; then
+ in_get_ci_vm "$1"
+ # get_ci_vm container entrypoint calls us with this option on the
+ # Cirrus-CI environment instance, to perform repo.-specific setup.
+ echo "+ Executing setup" > /dev/stderr
+ ${GOSRC}/${SCRIPT_BASE}/runner.sh setup
+else
+ # Create and access VM for specified Cirrus-CI task
+ mkdir -p $HOME/.config/gcloud/ssh
+ podman run -it --rm \
+ --tz=local \
+ -e NAME="$USER" \
+ -e SRCDIR=/src \
+ -e GCLOUD_ZONE="$GCLOUD_ZONE" \
+ -e DEBUG="${DEBUG:-0}" \
+ -v $REPO_DIRPATH:/src:O \
+ -v $HOME/.config/gcloud:/root/.config/gcloud:z \
+ -v $HOME/.config/gcloud/ssh:/root/.ssh:z \
+ quay.io/libpod/get_ci_vm:latest "$@"
+fi
diff --git a/hack/validate.sh b/hack/validate.sh
new file mode 100755
index 0000000..078d34f
--- /dev/null
+++ b/hack/validate.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -eo pipefail
+
+eval $(go env)
+PATH="$GOPATH/bin:$PATH"
+
+die() { echo "Error: ${1:-No message provided}" > /dev/stderr; exit 1; }
+
+# Always run from the repository root
+cd $(dirname "${BASH_SOURCE[0]}")/../
+
+if [[ -z $(type -P gofmt) ]]; then
+ die "Unable to find 'gofmt' binary in \$PATH: $PATH"
+fi
+
+echo "Executing go vet"
+GO111MODULE="on" go vet -tags="$BUILDTAGS" ./...
+
+echo "Executing gofmt"
+OUTPUT=$(gofmt -s -l . | sed -e '/^vendor/d')
+if [[ ! -z "$OUTPUT" ]]; then
+ die "Please fix the formatting of the following files:
+$OUTPUT"
+fi
diff --git a/image/docker_schema2.go b/image/docker_schema2.go
new file mode 100644
index 0000000..e5a3b89
--- /dev/null
+++ b/image/docker_schema2.go
@@ -0,0 +1,14 @@
+package image
+
+import (
+ "github.com/containers/image/v5/internal/image"
+)
+
+// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+var GzippedEmptyLayer = image.GzippedEmptyLayer
+
+// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
+const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest
diff --git a/image/sourced.go b/image/sourced.go
new file mode 100644
index 0000000..2b7f6b1
--- /dev/null
+++ b/image/sourced.go
@@ -0,0 +1,37 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/types"
+)
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ return image.FromSource(ctx, sys, src)
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
+ return image.FromUnparsedImage(ctx, sys, unparsed)
+}
diff --git a/image/unparsed.go b/image/unparsed.go
new file mode 100644
index 0000000..f2ebb92
--- /dev/null
+++ b/image/unparsed.go
@@ -0,0 +1,41 @@
+package image
+
+import (
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+type UnparsedImage = image.UnparsedImage
+
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return image.UnparsedInstance(src, instanceDigest)
+}
+
+// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef
+type unparsedWithRef struct {
+ private.UnparsedImage
+ ref types.ImageReference
+}
+
+func (uwr *unparsedWithRef) Reference() types.ImageReference {
+ return uwr.ref
+}
+
+// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef.
+// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image
+// based on a remote-registry policy.
+func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage {
+ return &unparsedWithRef{
+ UnparsedImage: unparsedimage.FromPublic(wrappedInstance),
+ ref: replacementRef,
+ }
+}
diff --git a/internal/blobinfocache/blobinfocache.go b/internal/blobinfocache/blobinfocache.go
new file mode 100644
index 0000000..2767c39
--- /dev/null
+++ b/internal/blobinfocache/blobinfocache.go
@@ -0,0 +1,70 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original
+// object if it implements BlobInfoCache2, or a wrapper which discards compression information
+// if it only implements BlobInfoCache.
+func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 {
+ if bic2, ok := bic.(BlobInfoCache2); ok {
+ return bic2
+ }
+ return &v1OnlyBlobInfoCache{
+ BlobInfoCache: bic,
+ }
+}
+
+type v1OnlyBlobInfoCache struct {
+ types.BlobInfoCache
+}
+
+func (bic *v1OnlyBlobInfoCache) Open() {
+}
+
+func (bic *v1OnlyBlobInfoCache) Close() {
+}
+
+func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
+}
+
+func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
+ return nil
+}
+
+// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of
+// types.BICReplacementCandidate, dropping compression information.
+func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate {
+ candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates))
+ for _, c := range v2candidates {
+ candidates = append(candidates, types.BICReplacementCandidate{
+ Digest: c.Digest,
+ Location: c.Location,
+ })
+ }
+ return candidates
+}
+
+// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
+// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
+// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
+// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
+// upon success.
+func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
+ switch compressorName {
+ case Uncompressed:
+ return types.Decompress, nil, nil
+ case UnknownCompression:
+ return types.PreserveOriginal, nil, nil
+ default:
+ algo, err := compression.AlgorithmByName(compressorName)
+ if err == nil {
+ return types.Compress, &algo, nil
+ }
+ return types.PreserveOriginal, nil, err
+ }
+}
diff --git a/internal/blobinfocache/types.go b/internal/blobinfocache/types.go
new file mode 100644
index 0000000..429d682
--- /dev/null
+++ b/internal/blobinfocache/types.go
@@ -0,0 +1,53 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ // Uncompressed is the value we store in a blob info cache to indicate that we know that
+ // the blob in the corresponding location is not compressed.
+ Uncompressed = "uncompressed"
+ // UnknownCompression is the value we store in a blob info cache to indicate that we don't
+ // know if the blob in the corresponding location is compressed (and if so, how) or not.
+ UnknownCompression = "unknown"
+)
+
+// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind
+// of compression was applied to the blobs it keeps information about.
+type BlobInfoCache2 interface {
+ types.BlobInfoCache
+
+ // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+ // Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+ Open()
+ // Close destroys state created by Open().
+ Close()
+
+ // RecordDigestCompressorName records a compressor for the blob with the specified digest,
+ // or Uncompressed or UnknownCompression.
+ // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
+ // digest just because some remote author claims so (e.g. because a manifest says so);
+ // otherwise the cache could be poisoned and cause us to make incorrect edits to type
+ // information in a manifest.
+ RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
+ // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+ // that could possibly be reused within the specified (transport scope) (if they still
+ // exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
+ // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
+ // up variants of the blob which have the same uncompressed digest.
+ //
+ // The CompressorName fields in returned data must never be UnknownCompression.
+ CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
+}
+
+// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
+type BICReplacementCandidate2 struct {
+ Digest digest.Digest
+ CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
+ UnknownLocation bool // is true when `Location` for this blob is not set
+ Location types.BICLocationReference // not set if UnknownLocation is set to `true`
+}
diff --git a/internal/image/common_test.go b/internal/image/common_test.go
new file mode 100644
index 0000000..d66fb41
--- /dev/null
+++ b/internal/image/common_test.go
@@ -0,0 +1,53 @@
+package image
+
+import (
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "testing"
+
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+// assertJSONEqualsFixture tests that jsonBytes is structurally equal to fixture,
+// possibly ignoring ignoreFields
+func assertJSONEqualsFixture(t *testing.T, jsonBytes []byte, fixture string, ignoreFields ...string) {
+ var contents map[string]any
+ err := json.Unmarshal(jsonBytes, &contents)
+ require.NoError(t, err)
+
+ fixtureBytes, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+ var fixtureContents map[string]any
+
+ err = json.Unmarshal(fixtureBytes, &fixtureContents)
+ require.NoError(t, err)
+ for _, f := range ignoreFields {
+ delete(contents, f)
+ delete(fixtureContents, f)
+ }
+ assert.Equal(t, fixtureContents, contents)
+}
+
+// layerInfosWithCryptoOperation returns a copy of input where CryptoOperation is set to op
+func layerInfosWithCryptoOperation(input []types.BlobInfo, op types.LayerCrypto) []types.BlobInfo {
+ res := slices.Clone(input)
+ for i := range res {
+ res[i].CryptoOperation = op
+ }
+ return res
+}
+
+// layerInfosWithCompressionEdits returns a copy of input where CompressionOperation and CompressionAlgorithm is set to op and algo
+func layerInfosWithCompressionEdits(input []types.BlobInfo, op types.LayerCompression, algo *compressiontypes.Algorithm) []types.BlobInfo {
+ res := slices.Clone(input)
+ for i := range res {
+ res[i].CompressionOperation = op
+ res[i].CompressionAlgorithm = algo
+ }
+ return res
+}
diff --git a/internal/image/docker_list.go b/internal/image/docker_list.go
new file mode 100644
index 0000000..617a451
--- /dev/null
+++ b/internal/image/docker_list.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+)
+
+func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ list, err := manifest.Schema2ListFromManifest(manblob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing schema2 manifest list: %w", err)
+ }
+ targetManifestDigest, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, fmt.Errorf("choosing image instance: %w", err)
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err)
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/internal/image/docker_schema1.go b/internal/image/docker_schema1.go
new file mode 100644
index 0000000..3ef8e14
--- /dev/null
+++ b/internal/image/docker_schema1.go
@@ -0,0 +1,257 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type manifestSchema1 struct {
+ m *manifest.Schema1
+}
+
+func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema1{m: m}, nil
+}
+
+// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
+func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) {
+ m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema1{m: m}, nil
+}
+
+func (m *manifestSchema1) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema1) manifestMIMEType() string {
+ return manifest.DockerV2Schema1SignedMediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) {
+ return nil, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return v2s2.OCIConfig(ctx)
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ // This is a bit convoluted: We can’t just have a "get embedded docker reference" method
+ // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually
+ // embed a full docker/distribution reference, but only the repo name and tag (without the host name).
+ // So we would have to provide a “return repo without host name, and tag” getter for the generic code,
+ // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the
+ // generic copy code needs to know about is reference.Named and that a manifest may need updating
+ // for some destinations.
+ name := reference.Path(ref)
+ var tag string
+ if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
+ tag = tagged.Tag()
+ } else {
+ tag = ""
+ }
+ return m.m.Name != name || m.m.Tag != tag
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) {
+ return m.m.Inspect(nil)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest)
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
+
+ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so,
+ // handle conversions between them by doing nothing.
+ if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType {
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
+ manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ if options.EmbeddedDockerReference != nil {
+ copy.m.Name = reference.Path(options.EmbeddedDockerReference)
+ if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
+ copy.m.Tag = tagged.Tag()
+ } else {
+ copy.m.Tag = ""
+ }
+ }
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema1 object.
+//
+// We need this function just because a function returning an implementation of the genericManifest
+// interface is not automatically assignable to a function type returning the genericManifest interface
+func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ return m.convertToManifestSchema2(ctx, options)
+}
+
+// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema1 object.
+//
+// Based on github.com/docker/docker/distribution/pull_v2.go
+func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
+ uploadedLayerInfos := options.InformationOnly.LayerInfos
+ layerDiffIDs := options.InformationOnly.LayerDiffIDs
+
+ if len(m.m.ExtractedV1Compatibility) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ }
+ if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
+ return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
+ }
+ if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
+ return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
+ }
+ if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
+ return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
+ }
+
+ var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
+ if options.LayerInfos != nil {
+ if len(options.LayerInfos) != len(m.m.FSLayers) {
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ len(options.LayerInfos), len(m.m.FSLayers))
+ }
+ convertedLayerUpdates = []types.BlobInfo{}
+ }
+
+ // Build a list of the diffIDs for the non-empty layers.
+ diffIDs := []digest.Digest{}
+ var layers []manifest.Schema2Descriptor
+ for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- {
+ v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index
+
+ if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway {
+ var size int64
+ if uploadedLayerInfos != nil {
+ size = uploadedLayerInfos[v2Index].Size
+ }
+ var d digest.Digest
+ if layerDiffIDs != nil {
+ d = layerDiffIDs[v2Index]
+ }
+ layers = append(layers, manifest.Schema2Descriptor{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: size,
+ Digest: m.m.FSLayers[v1Index].BlobSum,
+ })
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index])
+ }
+ diffIDs = append(diffIDs, d)
+ }
+ }
+ configJSON, err := m.m.ToSchema2Config(diffIDs)
+ if err != nil {
+ return nil, err
+ }
+ configDescriptor := manifest.Schema2Descriptor{
+ MediaType: "application/vnd.docker.container.image.v1+json",
+ Size: int64(len(configJSON)),
+ Digest: digest.FromBytes(configJSON),
+ }
+
+ if options.LayerInfos != nil {
+ options.LayerInfos = convertedLayerUpdates
+ }
+ return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
+}
+
+// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema1 object.
+func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest
+ m2, err := m.convertToManifestSchema2(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return m2.convertToManifestOCI1(ctx, options)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
+ return false
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
+ return true // There are no MIME types in the manifest, so we must assume a valid image.
+}
diff --git a/internal/image/docker_schema1_test.go b/internal/image/docker_schema1_test.go
new file mode 100644
index 0000000..ee3f311
--- /dev/null
+++ b/internal/image/docker_schema1_test.go
@@ -0,0 +1,722 @@
+package image
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+var schema1FixtureLayerInfos = []types.BlobInfo{
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 74876245,
+ Digest: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4",
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 1239,
+ Digest: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a",
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 78339724,
+ Digest: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e",
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 76857203,
+ Digest: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6",
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 25923380,
+ Digest: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788",
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: 23511300,
+ Digest: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d",
+ },
+}
+
+var schema1FixtureLayerDiffIDs = []digest.Digest{
+ "sha256:e1d829eddb62dc49f1c56dbf8acd0c71299b3996115399de853a9d66d81b822f",
+ "sha256:02404b4d7e5d89b1383ca346b4462b199128aa4b238c5a2b2c186004ac148ba8",
+ "sha256:45fad80a4b1cec165c421eb570dec312d825bd8fac362e255028fa3f2169148d",
+ "sha256:7ddef8efd44586e54880ec4797458eac87b368544c438d7e7c63fbc0d9a7ae97",
+ "sha256:b56b16b6407ba1b86252e7e50f98f142cf6844fab42e4495d56ebb7ce559e2af",
+ "sha256:9bd63850e406167b4751f5050f6dc0ebd789bb5ef5e5c6c31ed062bda8c063e8",
+}
+
+var schema1WithThrowawaysFixtureLayerInfos = []types.BlobInfo{
+ {Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: 51354364},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: 150},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: 11739507},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833},
+ {Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+ {Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))},
+}
+
+var schema1WithThrowawaysFixtureLayerDiffIDs = []digest.Digest{
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ GzippedEmptyLayerDigest,
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b",
+ GzippedEmptyLayerDigest,
+ GzippedEmptyLayerDigest,
+}
+
+func manifestSchema1FromFixture(t *testing.T, fixture string) genericManifest {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := manifestSchema1FromManifest(manifest)
+ require.NoError(t, err)
+ return m
+}
+
+func manifestSchema1FromComponentsLikeFixture(t *testing.T) genericManifest {
+ ref, err := reference.ParseNormalizedNamed("rhosp12/openstack-nova-api:latest")
+ require.NoError(t, err)
+ m, err := manifestSchema1FromComponents(ref, []manifest.Schema1FSLayers{
+ {BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"},
+ {BlobSum: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"},
+ {BlobSum: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"},
+ {BlobSum: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"},
+ {BlobSum: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"},
+ {BlobSum: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"},
+ }, []manifest.Schema1History{
+ {V1Compatibility: "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}"},
+ {V1Compatibility: "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}"},
+ {V1Compatibility: "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}"},
+ }, "amd64")
+ require.NoError(t, err)
+ return m
+}
+
+func TestManifestSchema1FromManifest(t *testing.T) {
+ // This just tests that the JSON can be loaded; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestSchema1FromFixture(t, "schema1.json")
+
+ // FIXME: Detailed coverage of manifest.Schema1FromManifest failures
+ _, err := manifestSchema1FromManifest([]byte{})
+ assert.Error(t, err)
+}
+
+func TestManifestSchema1FromComponents(t *testing.T) {
+ // This just smoke-tests that the manifest can be created; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestSchema1FromComponentsLikeFixture(t)
+
+ // Error on invalid input
+ _, err := manifestSchema1FromComponents(nil, []manifest.Schema1FSLayers{}, []manifest.Schema1History{}, "amd64")
+ assert.Error(t, err)
+}
+
+func TestManifestSchema1Serialize(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ serialized, err := m.serialize()
+ require.NoError(t, err)
+ // Drop "signatures" which is generated by AddDummyV2S1Signature
+ // We would ideally like to compare “serialized” with some transformation of
+ // the original fixture, but the ordering of fields in JSON maps is undefined, so this is
+ // easier.
+ assertJSONEqualsFixture(t, serialized, "schema1.json", "signatures")
+ }
+}
+
+func TestManifestSchema1ManifestMIMEType(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, m.manifestMIMEType())
+ }
+}
+
+func TestManifestSchema1ConfigInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ assert.Equal(t, types.BlobInfo{Digest: ""}, m.ConfigInfo())
+ }
+}
+
+func TestManifestSchema1ConfigBlob(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ blob, err := m.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assert.Nil(t, blob)
+ }
+}
+
+func TestManifestSchema1OCIConfig(t *testing.T) {
+ m := manifestSchema1FromFixture(t, "schema1-for-oci-config.json")
+ configOCI, err := m.OCIConfig(context.Background())
+ require.NoError(t, err)
+ // FIXME: A more comprehensive test?
+ assert.Equal(t, "/pause", configOCI.Config.Entrypoint[0])
+}
+
+func TestManifestSchema1LayerInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4",
+ Size: -1,
+ },
+ {
+ Digest: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a",
+ Size: -1,
+ },
+ {
+ Digest: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e",
+ Size: -1,
+ },
+ {
+ Digest: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6",
+ Size: -1,
+ },
+ {
+ Digest: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788",
+ Size: -1,
+ },
+ {
+ Digest: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d",
+ Size: -1,
+ },
+ }, m.LayerInfos())
+ }
+}
+
+func TestManifestSchema1EmbeddedDockerReferenceConflicts(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ for name, expected := range map[string]bool{
+ "rhosp12/openstack-nova-api:latest": false, // Exactly the embedded reference
+ "example.com/rhosp12/openstack-nova-api:latest": false, // A different host name, but path and tag match
+ "docker.io:3333/rhosp12/openstack-nova-api:latest": false, // A different port, but path and tag match
+ "busybox": true, // Entirely different, minimal
+ "example.com:5555/ns/repo:tag": true, // Entirely different, maximal
+ "rhosp12/openstack-nova-api": true, // Missing tag
+ "rhosp12/openstack-nova-api:notlatest": true, // Different tag
+ "notrhosp12/openstack-nova-api:latest": true, // Different namespace
+ "rhosp12/notopenstack-nova-api:latest": true, // Different repo
+ } {
+ ref, err := reference.ParseNormalizedNamed(name)
+ require.NoError(t, err, name)
+ conflicts := m.EmbeddedDockerReferenceConflicts(ref)
+ assert.Equal(t, expected, conflicts, name)
+ }
+ }
+}
+
+func TestManifestSchema1Inspect(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ ii, err := m.Inspect(context.Background())
+ require.NoError(t, err)
+ created := time.Date(2018, 1, 25, 0, 37, 48, 268558000, time.UTC)
+ var emptyAnnotations map[string]string
+ assert.Equal(t, types.ImageInspectInfo{
+ Tag: "latest",
+ Created: &created,
+ DockerVersion: "1.12.6",
+ Labels: map[string]string{
+ "Kolla-SHA": "5.0.0-39-g6f1b947b",
+ "architecture": "x86_64",
+ "authoritative-source-url": "registry.access.redhat.com",
+ "build-date": "2018-01-25T00:32:27.807261",
+ "com.redhat.build-host": "ip-10-29-120-186.ec2.internal",
+ "com.redhat.component": "openstack-nova-api-docker",
+ "description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "distribution-scope": "public",
+ "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.openshift.tags": "rhosp osp openstack osp-12.0",
+ "kolla_version": "stable/pike",
+ "name": "rhosp12/openstack-nova-api",
+ "release": "20180124.1",
+ "summary": "Red Hat OpenStack Platform 12.0 nova-api",
+ "tripleo-common_version": "7.6.3-23-g4891cfe",
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1",
+ "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98",
+ "vcs-type": "git",
+ "vendor": "Red Hat, Inc.",
+ "version": "12.0",
+ "version-release": "12.0-20180124.1",
+ },
+ Architecture: "amd64",
+ Os: "linux",
+ Layers: []string{
+ "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4",
+ "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a",
+ "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e",
+ "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6",
+ "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788",
+ "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d",
+ },
+ LayersData: []types.ImageInspectLayer{{
+ MIMEType: "",
+ Digest: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "",
+ Digest: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "",
+ Digest: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "",
+ Digest: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "",
+ Digest: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ },
+ {
+ MIMEType: "",
+ Digest: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d",
+ Size: -1,
+ Annotations: emptyAnnotations,
+ },
+ },
+ Author: "",
+ Env: []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "container=oci",
+ "KOLLA_BASE_DISTRO=rhel",
+ "KOLLA_INSTALL_TYPE=binary",
+ "KOLLA_INSTALL_METATYPE=rhos",
+ "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ ",
+ },
+ }, *ii)
+ }
+}
+
+func TestManifestSchema1UpdatedImageNeedsLayerDiffIDs(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ for mt, expected := range map[string]bool{
+ "": false,
+ manifest.DockerV2Schema1MediaType: false,
+ manifest.DockerV2Schema1SignedMediaType: false,
+ manifest.DockerV2Schema2MediaType: true,
+ imgspecv1.MediaTypeImageManifest: true,
+ } {
+ needsDiffIDs := m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{
+ ManifestMIMEType: mt,
+ })
+ assert.Equal(t, expected, needsDiffIDs, mt)
+ }
+ }
+}
+
+func TestManifestSchema1UpdatedImage(t *testing.T) {
+ original := manifestSchema1FromFixture(t, "schema1.json")
+
+ // LayerInfos:
+ layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0])
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfos,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, layerInfos, res.LayerInfos())
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: append(layerInfos, layerInfos[0]),
+ })
+ assert.Error(t, err)
+
+ // EmbeddedDockerReference:
+ for _, refName := range []string{
+ "busybox",
+ "busybox:notlatest",
+ "rhosp12/openstack-nova-api:latest",
+ } {
+ embeddedRef, err := reference.ParseNormalizedNamed(refName)
+ require.NoError(t, err)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ EmbeddedDockerReference: embeddedRef,
+ })
+ require.NoError(t, err)
+ // The previous embedded docker reference now does not match.
+ nonEmbeddedRef, err := reference.ParseNormalizedNamed("rhosp12/openstack-nova-api:latest")
+ require.NoError(t, err)
+ conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef)
+ assert.Equal(t, refName != "rhosp12/openstack-nova-api:latest", conflicts)
+ }
+
+ // ManifestMIMEType:
+ // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.)
+ for _, mime := range []string{
+ manifest.DockerV2Schema2MediaType,
+ imgspecv1.MediaTypeImageManifest,
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: schema1FixtureLayerInfos,
+ LayerDiffIDs: schema1FixtureLayerDiffIDs,
+ },
+ })
+ assert.NoError(t, err, mime)
+ }
+ for _, mime := range []string{
+ "this is invalid",
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ })
+ assert.Error(t, err, mime)
+ }
+
+ // m hasn’t been changed:
+ m2 := manifestSchema1FromFixture(t, "schema1.json")
+ typedOriginal, ok := original.(*manifestSchema1)
+ require.True(t, ok)
+ typedM2, ok := m2.(*manifestSchema1)
+ require.True(t, ok)
+ assert.Equal(t, *typedM2, *typedOriginal)
+}
+
+func TestManifestSchema1ConvertToSchema2(t *testing.T) {
+ original := manifestSchema1FromFixture(t, "schema1.json")
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: schema1FixtureLayerInfos,
+ LayerDiffIDs: schema1FixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ // Ignore "config": we don’t want to hard-code a specific digest and size of the marshaled config here.
+ assertJSONEqualsFixture(t, convertedJSON, "schema1-to-schema2.json", "config")
+
+ convertedConfig, err := res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "schema1-to-schema2-config.json")
+
+ // Conversion to schema2 together with changing LayerInfos works as expected (which requires
+ // handling schema1 throwaway layers):
+ // Use the recorded result of converting the schema2 fixture to schema1, because that one
+ // (unlike schem1.json) contains throwaway layers.
+ original = manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json")
+ updatedLayers, updatedLayersCopy := modifiedLayerInfos(t, schema1WithThrowawaysFixtureLayerInfos)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: updatedLayers,
+ LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ // Layers have been updated as expected
+ originalSrc := newSchema2ImageSource(t, "httpd:latest")
+ s2Manifest, err := manifestSchema2FromManifest(originalSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5ba",
+ Size: 51354365,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680d",
+ Size: 151,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a8",
+ Size: 11739506,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25908",
+ Size: 8841832,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fb",
+ Size: 290,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ }, s2Manifest.LayerInfos())
+
+ // Conversion to schema2 with encryption fails
+ encryptedLayers := layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt)
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: encryptedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: updatedLayers,
+ LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs,
+ },
+ })
+ assert.Error(t, err)
+
+ // FIXME? Test also the various failure cases, if only to see that we don't crash?
+}
+
+func TestManifestSchema1ConvertToManifestOCI1(t *testing.T) {
+ original := manifestSchema1FromFixture(t, "schema1.json")
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: schema1FixtureLayerInfos,
+ LayerDiffIDs: schema1FixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ // Ignore "config": we don’t want to hard-code a specific digest and size of the marshaled config here.
+ assertJSONEqualsFixture(t, convertedJSON, "schema1-to-oci1.json", "config")
+
+ convertedConfig, err := res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "schema1-to-oci1-config.json")
+
+ // Conversion to OCI together with changing LayerInfos works as expected (which requires
+ // handling schema1 throwaway layers):
+ // Use the recorded result of converting the schema2 fixture to schema1, because that one
+ // (unlike schem1.json) contains throwaway layers.
+ original = manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json")
+ updatedLayers, updatedLayersCopy := modifiedLayerInfos(t, schema1WithThrowawaysFixtureLayerInfos)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ InformationOnly: types.ManifestUpdateInformation{ // FIXME: deduplicate this data
+ LayerInfos: updatedLayers,
+ LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ // Layers have been updated as expected
+ originalSrc := newSchema2ImageSource(t, "httpd:latest")
+ ociManifest, err := manifestOCI1FromManifest(originalSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5ba",
+ Size: 51354365,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680d",
+ Size: 151,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a8",
+ Size: 11739506,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25908",
+ Size: 8841832,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fb",
+ Size: 290,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ },
+ }, ociManifest.LayerInfos())
+
+ // Conversion to OCI with encryption is possible.
+ encryptedLayers := layerInfosWithCryptoOperation(schema1WithThrowawaysFixtureLayerInfos, types.Encrypt)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: encryptedLayers,
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: encryptedLayers,
+ LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ // Layers have been updated as expected
+ ociManifest, err = manifestOCI1FromManifest(originalSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ }, ociManifest.LayerInfos())
+
+ // FIXME? Test also the various failure cases, if only to see that we don't crash?
+}
+
+func TestConvertSchema1ToManifestOCIWithAnnotations(t *testing.T) {
+ // Test when converting an image from schema 1 (which doesn't support certain fields like
+ // URLs, annotations, etc.) to an OCI image (which supports those fields),
+ // that UpdatedImage propagates the features to the converted manifest.
+
+ original := manifestSchema1FromFixture(t, "schema1.json")
+ layerInfoOverwrites := []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ URLs: []string{
+ "https://layer.url",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: map[string]string{
+ "test-annotation-2": "two",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ }
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ LayerInfos: layerInfoOverwrites,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: schema1FixtureLayerInfos,
+ LayerDiffIDs: schema1FixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+ assert.Equal(t, res.LayerInfos(), layerInfoOverwrites)
+
+ // Doing this with schema2 should fail
+ original = manifestSchema1FromFixture(t, "schema1.json")
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ LayerInfos: layerInfoOverwrites,
+ InformationOnly: types.ManifestUpdateInformation{
+ LayerInfos: schema1FixtureLayerInfos,
+ LayerDiffIDs: schema1FixtureLayerDiffIDs,
+ },
+ })
+ require.NoError(t, err)
+ assert.NotEqual(t, res.LayerInfos(), layerInfoOverwrites)
+}
+
+func TestManifestSchema1CanChangeLayerCompression(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema1FromFixture(t, "schema1.json"),
+ manifestSchema1FromComponentsLikeFixture(t),
+ } {
+ assert.True(t, m.CanChangeLayerCompression(""))
+ }
+}
diff --git a/internal/image/docker_schema2.go b/internal/image/docker_schema2.go
new file mode 100644
index 0000000..c3234c3
--- /dev/null
+++ b/internal/image/docker_schema2.go
@@ -0,0 +1,413 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayer.
+var GzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
+//
+// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
+const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the CompressionOperation and CompressionAlgorithm specified in one or more
+// options.LayerInfos items is anything other than gzip.
+func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
+ configOCI, err := m.OCIConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ }
+
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ switch m.m.LayersDescriptors[idx].MediaType {
+ case manifest.DockerV2Schema2ForeignLayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema2LayerMediaType:
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
+ }
+ }
+
+ return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema2 object.
+//
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ dest := options.InformationOnly.Destination
+
+ var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
+ if options.LayerInfos != nil {
+ if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
+ len(options.LayerInfos), len(m.m.LayersDescriptors))
+ }
+ convertedLayerUpdates = []types.BlobInfo{}
+ }
+
+ configBytes, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &manifest.Schema2Image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
+
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
+ if err != nil {
+ return nil, fmt.Errorf("uploading empty layer: %w", err)
+ }
+ if info.Digest != emptyLayerBlobInfo.Digest {
+ return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
+ }
+ blobDigest = emptyLayerBlobInfo.Digest
+ } else {
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
+ }
+ if options.LayerInfos != nil {
+ convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
+ }
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := manifest.Schema1V1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ if options.LayerInfos != nil {
+ options.LayerInfos = convertedLayerUpdates
+ }
+ m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ if err != nil {
+ return nil, err // This should never happen, we should have created all the components correctly.
+ }
+ return m1, nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Hex()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]any{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
+ return false
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/internal/image/docker_schema2_test.go b/internal/image/docker_schema2_test.go
new file mode 100644
index 0000000..cf3b7f4
--- /dev/null
+++ b/internal/image/docker_schema2_test.go
@@ -0,0 +1,726 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+const commonFixtureConfigDigest = "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+
+func manifestSchema2FromFixture(t *testing.T, src types.ImageSource, fixture string, mustFail bool) genericManifest {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := manifestSchema2FromManifest(src, manifest)
+ if mustFail {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ return m
+}
+
+func manifestSchema2FromComponentsLikeFixture(configBlob []byte) genericManifest {
+ return manifestSchema2FromComponents(manifest.Schema2Descriptor{
+ MediaType: "application/octet-stream",
+ Size: 5940,
+ Digest: commonFixtureConfigDigest,
+ }, nil, configBlob, []manifest.Schema2Descriptor{
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ },
+ })
+}
+
+func TestManifestSchema2FromManifest(t *testing.T) {
+ // This just tests that the JSON can be loaded; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false)
+
+ _, err := manifestSchema2FromManifest(nil, []byte{})
+ assert.Error(t, err)
+}
+
+func TestManifestSchema2FromComponents(t *testing.T) {
+ // This just smoke-tests that the manifest can be created; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestSchema2FromComponentsLikeFixture(nil)
+}
+
+func TestManifestSchema2Serialize(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ serialized, err := m.serialize()
+ require.NoError(t, err)
+ // We would ideally like to compare “serialized” with some transformation of
+ // the original fixture, but the ordering of fields in JSON maps is undefined, so this is
+ // easier.
+ assertJSONEqualsFixture(t, serialized, "schema2.json")
+ }
+}
+
+func TestManifestSchema2ManifestMIMEType(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, m.manifestMIMEType())
+ }
+}
+
+func TestManifestSchema2ConfigInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, types.BlobInfo{
+ Size: 5940,
+ Digest: commonFixtureConfigDigest,
+ MediaType: "application/octet-stream",
+ }, m.ConfigInfo())
+ }
+}
+
+// configBlobImageSource allows testing various GetBlob behaviors in .ConfigBlob()
+type configBlobImageSource struct {
+ mocks.ForbiddenImageSource // We inherit almost all of the methods, which just panic()
+ expectedDigest digest.Digest
+ f func() (io.ReadCloser, int64, error)
+}
+
+func (f configBlobImageSource) GetBlob(ctx context.Context, info types.BlobInfo, _ types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if info.Digest != f.expectedDigest {
+ panic("Unexpected digest in GetBlob")
+ }
+ return f.f()
+}
+
+func TestManifestSchema2ConfigBlob(t *testing.T) {
+ realConfigJSON, err := os.ReadFile("fixtures/schema2-config.json")
+ require.NoError(t, err)
+
+ for _, c := range []struct {
+ cbISfn func() (io.ReadCloser, int64, error)
+ blob []byte
+ }{
+ // Success
+ {func() (io.ReadCloser, int64, error) {
+ return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
+ }, realConfigJSON},
+ // Various kinds of failures
+ {nil, nil},
+ {func() (io.ReadCloser, int64, error) {
+ return nil, -1, errors.New("Error returned from GetBlob")
+ }, nil},
+ {func() (io.ReadCloser, int64, error) {
+ reader, writer := io.Pipe()
+ err = writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob"))
+ assert.NoError(t, err)
+ return reader, 1, nil
+ }, nil},
+ {func() (io.ReadCloser, int64, error) {
+ nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest")
+ return io.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil
+ }, nil},
+ } {
+ var src types.ImageSource
+ if c.cbISfn != nil {
+ src = configBlobImageSource{
+ expectedDigest: commonFixtureConfigDigest,
+ f: c.cbISfn,
+ }
+ } else {
+ src = nil
+ }
+ m := manifestSchema2FromFixture(t, src, "schema2.json", false)
+ blob, err := m.ConfigBlob(context.Background())
+ if c.blob != nil {
+ assert.NoError(t, err)
+ assert.Equal(t, c.blob, blob)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+
+ // Generally configBlob should match ConfigInfo; we don’t quite need it to, and this will
+ // guarantee that the returned object is returning the original contents instead
+ // of reading an object from elsewhere.
+ configBlob := []byte("config blob which does not match ConfigInfo")
+ // This just tests that the manifest can be created; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ m := manifestSchema2FromComponentsLikeFixture(configBlob)
+ cb, err := m.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, configBlob, cb)
+}
+
+func TestManifestSchema2LayerInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ }, m.LayerInfos())
+ }
+}
+
+func TestManifestSchema2EmbeddedDockerReferenceConflicts(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} {
+ ref, err := reference.ParseNormalizedNamed(name)
+ require.NoError(t, err)
+ conflicts := m.EmbeddedDockerReferenceConflicts(ref)
+ assert.False(t, conflicts)
+ }
+ }
+}
+
+func TestManifestSchema2Inspect(t *testing.T) {
+ configJSON, err := os.ReadFile("fixtures/schema2-config.json")
+ require.NoError(t, err)
+
+ m := manifestSchema2FromComponentsLikeFixture(configJSON)
+ ii, err := m.Inspect(context.Background())
+ require.NoError(t, err)
+ created := time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC)
+
+ var emptyAnnotations map[string]string
+ assert.Equal(t, types.ImageInspectInfo{
+ Tag: "",
+ Created: &created,
+ DockerVersion: "1.12.1",
+ Labels: map[string]string{},
+ Architecture: "amd64",
+ Os: "linux",
+ Layers: []string{
+ "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ },
+ LayersData: []types.ImageInspectLayer{{
+ MIMEType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ Annotations: emptyAnnotations,
+ },
+ },
+ Author: "",
+ Env: []string{
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ },
+ }, *ii)
+
+ // nil configBlob will trigger an error in m.ConfigBlob()
+ m = manifestSchema2FromComponentsLikeFixture(nil)
+ _, err = m.Inspect(context.Background())
+ assert.Error(t, err)
+
+ m = manifestSchema2FromComponentsLikeFixture([]byte("invalid JSON"))
+ _, err = m.Inspect(context.Background())
+ assert.Error(t, err)
+}
+
+func TestManifestSchema2UpdatedImageNeedsLayerDiffIDs(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ }))
+ }
+}
+
+// schema2ImageSource is plausible enough for schema conversions in manifestSchema2.UpdatedImage() to work.
+type schema2ImageSource struct {
+ configBlobImageSource
+ ref reference.Named
+}
+
+func (s2is *schema2ImageSource) Reference() types.ImageReference {
+ return refImageReferenceMock{ref: s2is.ref}
+}
+
+// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference.
+type refImageReferenceMock struct {
+ mocks.ForbiddenImageReference // We inherit almost all of the methods, which just panic()
+ ref reference.Named
+}
+
+func (ref refImageReferenceMock) DockerReference() reference.Named {
+ return ref.ref
+}
+
+func newSchema2ImageSource(t *testing.T, dockerRef string) *schema2ImageSource {
+ realConfigJSON, err := os.ReadFile("fixtures/schema2-config.json")
+ require.NoError(t, err)
+
+ ref, err := reference.ParseNormalizedNamed(dockerRef)
+ require.NoError(t, err)
+
+ return &schema2ImageSource{
+ configBlobImageSource: configBlobImageSource{
+ expectedDigest: commonFixtureConfigDigest,
+ f: func() (io.ReadCloser, int64, error) {
+ return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
+ },
+ },
+ ref: ref,
+ }
+}
+
+type memoryImageDest struct {
+ ref reference.Named
+ storedBlobs map[digest.Digest][]byte
+}
+
+func (d *memoryImageDest) Reference() types.ImageReference {
+ return refImageReferenceMock{ref: d.ref}
+}
+func (d *memoryImageDest) Close() error {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) SupportedManifestMIMETypes() []string {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) SupportsSignatures(ctx context.Context) error {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) DesiredLayerCompression() types.LayerCompression {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) AcceptsForeignLayerURLs() bool {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) MustMatchRuntimeOS() bool {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) IgnoresEmbeddedDockerReference() bool {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) HasThreadSafePutBlob() bool {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ if d.storedBlobs == nil {
+ d.storedBlobs = make(map[digest.Digest][]byte)
+ }
+ if inputInfo.Digest == "" {
+ panic("inputInfo.Digest unexpectedly empty")
+ }
+ contents, err := io.ReadAll(stream)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ d.storedBlobs[inputInfo.Digest] = contents
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil
+}
+func (d *memoryImageDest) TryReusingBlob(context.Context, types.BlobInfo, types.BlobInfoCache, bool) (bool, types.BlobInfo, error) {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) PutManifest(context.Context, []byte, *digest.Digest) error {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ panic("Unexpected call to a mock function")
+}
+func (d *memoryImageDest) Commit(context.Context, types.UnparsedImage) error {
+ panic("Unexpected call to a mock function")
+}
+
+// modifiedLayerInfos returns two identical (but separately allocated) copies of
+// layers from input, where the size and digest of each item is predictably modified from the original in input.
+// (This is used to test ManifestUpdateOptions.LayerInfos handling.)
+func modifiedLayerInfos(t *testing.T, input []types.BlobInfo) ([]types.BlobInfo, []types.BlobInfo) {
+ modified := []types.BlobInfo{}
+ for _, blob := range input {
+ b2 := blob
+ oldDigest, err := hex.DecodeString(b2.Digest.Encoded())
+ require.NoError(t, err)
+ oldDigest[len(oldDigest)-1] ^= 1
+ b2.Digest = digest.NewDigestFromEncoded(b2.Digest.Algorithm(), hex.EncodeToString(oldDigest))
+ b2.Size ^= 1
+ modified = append(modified, b2)
+ }
+
+ copy := slices.Clone(modified)
+ return modified, copy
+}
+
+func TestManifestSchema2UpdatedImage(t *testing.T) {
+ originalSrc := newSchema2ImageSource(t, "httpd:latest")
+ original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+
+ // LayerInfos:
+ layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0])
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfos,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, layerInfos, res.LayerInfos())
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: append(layerInfos, layerInfos[0]),
+ })
+ assert.Error(t, err)
+
+ // EmbeddedDockerReference:
+ // … is ignored
+ embeddedRef, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ EmbeddedDockerReference: embeddedRef,
+ })
+ require.NoError(t, err)
+ nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest")
+ require.NoError(t, err)
+ conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef)
+ assert.False(t, conflicts)
+
+ // ManifestMIMEType:
+ // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.)
+ for _, mime := range []string{
+ manifest.DockerV2Schema1MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: &memoryImageDest{ref: originalSrc.ref},
+ },
+ })
+ assert.NoError(t, err, mime)
+ }
+ for _, mime := range []string{
+ manifest.DockerV2Schema2MediaType, // This indicates a confused caller, not a no-op
+ "this is invalid",
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ })
+ assert.Error(t, err, mime)
+ }
+
+ // m hasn’t been changed:
+ m2 := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+ typedOriginal, ok := original.(*manifestSchema2)
+ require.True(t, ok)
+ typedM2, ok := m2.(*manifestSchema2)
+ require.True(t, ok)
+ assert.Equal(t, *typedM2, *typedOriginal)
+}
+
+func TestConvertToManifestOCI(t *testing.T) {
+ originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
+ original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ })
+ require.NoError(t, err)
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ assertJSONEqualsFixture(t, convertedJSON, "schema2-to-oci1.json")
+
+ convertedConfig, err := res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "schema2-to-oci1-config.json")
+
+ // Conversion to OCI with encryption is possible.
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt),
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ })
+ require.NoError(t, err)
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ // Layers have been updated as expected
+ ociManifest, err := manifestOCI1FromManifest(originalSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ },
+ }, ociManifest.LayerInfos())
+}
+
+func TestConvertToManifestOCIAllMediaTypes(t *testing.T) {
+ originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
+ original := manifestSchema2FromFixture(t, originalSrc, "schema2-all-media-types.json", false)
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ })
+ require.NoError(t, err)
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
+ assertJSONEqualsFixture(t, convertedJSON, "schema2-all-media-types-to-oci1.json")
+
+ convertedConfig, err := res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "schema2-to-oci1-config.json")
+}
+
+func TestConvertToOCIWithInvalidMIMEType(t *testing.T) {
+ originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
+ manifestSchema2FromFixture(t, originalSrc, "schema2-invalid-media-type.json", true)
+}
+
+func TestConvertToManifestSchema1(t *testing.T) {
+ originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
+ original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+ memoryDest := &memoryImageDest{ref: originalSrc.ref}
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ require.NoError(t, err)
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+
+ // schema2-to-schema1-by-docker.json is the result of asking the Docker Hub for a schema1 manifest,
+ // except that we have replaced "name" to verify that the ref from
+ // memoryDest, not from originalSrc, is used.
+ assertJSONEqualsFixture(t, convertedJSON, "schema2-to-schema1-by-docker.json", "signatures")
+
+ assert.Equal(t, GzippedEmptyLayer, memoryDest.storedBlobs[GzippedEmptyLayerDigest])
+
+ // Conversion to schema1 together with changing LayerInfos works as expected (which requires
+ // handling schema1 empty layers):
+ updatedLayers, updatedLayersCopy := modifiedLayerInfos(t, original.LayerInfos())
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ require.NoError(t, err)
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+ // Layers have been updated as expected
+ s1Manifest, err := manifestSchema1FromManifest(convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5ba", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680d", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a8", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25908", Size: -1},
+ {Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fb", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ }, s1Manifest.LayerInfos())
+
+ // Conversion to schema1 with encryption fails
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt),
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Error(t, err)
+
+ // FIXME? Test also the various failure cases, if only to see that we don't crash?
+}
+
+func TestConvertSchema2ToManifestOCIWithAnnotations(t *testing.T) {
+ // Test when converting an image from schema 2 (which doesn't support certain fields like
+ // URLs, annotations, etc.) to an OCI image (which supports those fields),
+ // that UpdatedImage propagates the features to the converted manifest.
+ originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
+ original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+ layerInfoOverwrites := []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ URLs: []string{
+ "https://layer.url",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: map[string]string{
+ "test-annotation-2": "two",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ }
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
+ LayerInfos: layerInfoOverwrites,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, res.LayerInfos(), layerInfoOverwrites)
+
+ // Doing this with schema2 should fail
+ originalSrc = newSchema2ImageSource(t, "httpd-copy:latest")
+ original = manifestSchema2FromFixture(t, originalSrc, "schema2.json", false)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: "",
+ LayerInfos: layerInfoOverwrites,
+ })
+ require.NoError(t, err)
+ assert.NotEqual(t, res.LayerInfos(), layerInfoOverwrites)
+}
+
+func TestManifestSchema2CanChangeLayerCompression(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestSchema2FromFixture(t, mocks.ForbiddenImageSource{}, "schema2.json", false),
+ manifestSchema2FromComponentsLikeFixture(nil),
+ } {
+ assert.True(t, m.CanChangeLayerCompression(manifest.DockerV2Schema2LayerMediaType))
+ // Some projects like to use squashfs and other unspecified formats for layers; don’t touch those.
+ assert.False(t, m.CanChangeLayerCompression("a completely unknown and quite possibly invalid MIME type"))
+ }
+}
diff --git a/internal/image/fixtures/oci1-all-media-types-config.json b/internal/image/fixtures/oci1-all-media-types-config.json
new file mode 100644
index 0000000..cd17d26
--- /dev/null
+++ b/internal/image/fixtures/oci1-all-media-types-config.json
@@ -0,0 +1,161 @@
+{
+ "architecture": "amd64",
+ "config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "httpd-foreground"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69",
+ "container_config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ",
+ "CMD [\"httpd-foreground\"]"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "docker_version": "1.12.1",
+ "history": [
+ {
+ "created": "2016-09-23T18:08:50.537223822Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "
+ },
+ {
+ "created": "2016-09-23T18:08:51.133779867Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:40.725768956Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.037788416Z",
+ "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.990121202Z",
+ "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""
+ },
+ {
+ "created": "2016-09-23T19:16:42.339911155Z",
+ "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:54.948461741Z",
+ "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"
+ },
+ {
+ "created": "2016-09-23T19:16:55.321573403Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:55.629947307Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:03.705796801Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:04.009782822Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:44.585743332Z",
+ "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"
+ },
+ {
+ "created": "2016-09-23T23:20:45.127455562Z",
+ "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "
+ },
+ {
+ "created": "2016-09-23T23:20:45.453934921Z",
+ "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2023-10-01T02:03:04.56789764Z",
+ "created_by": "/bin/sh echo something > last"
+ }
+ ],
+ "os": "linux",
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b",
+ "sha256:1111111111111111111111111111111111111111111111111111111111111111"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json b/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json
new file mode 100644
index 0000000..cd17d26
--- /dev/null
+++ b/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json
@@ -0,0 +1,161 @@
+{
+ "architecture": "amd64",
+ "config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "httpd-foreground"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69",
+ "container_config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ",
+ "CMD [\"httpd-foreground\"]"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "docker_version": "1.12.1",
+ "history": [
+ {
+ "created": "2016-09-23T18:08:50.537223822Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "
+ },
+ {
+ "created": "2016-09-23T18:08:51.133779867Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:40.725768956Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.037788416Z",
+ "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.990121202Z",
+ "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""
+ },
+ {
+ "created": "2016-09-23T19:16:42.339911155Z",
+ "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:54.948461741Z",
+ "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"
+ },
+ {
+ "created": "2016-09-23T19:16:55.321573403Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:55.629947307Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:03.705796801Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:04.009782822Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:44.585743332Z",
+ "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"
+ },
+ {
+ "created": "2016-09-23T23:20:45.127455562Z",
+ "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "
+ },
+ {
+ "created": "2016-09-23T23:20:45.453934921Z",
+ "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2023-10-01T02:03:04.56789764Z",
+ "created_by": "/bin/sh echo something > last"
+ }
+ ],
+ "os": "linux",
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b",
+ "sha256:1111111111111111111111111111111111111111111111111111111111111111"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-all-media-types-to-schema2.json b/internal/image/fixtures/oci1-all-media-types-to-schema2.json
new file mode 100644
index 0000000..702addf
--- /dev/null
+++ b/internal/image/fixtures/oci1-all-media-types-to-schema2.json
@@ -0,0 +1,41 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 4651,
+ "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.zstd",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 152,
+ "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-all-media-types.json b/internal/image/fixtures/oci1-all-media-types.json
new file mode 100644
index 0000000..e92fe2c
--- /dev/null
+++ b/internal/image/fixtures/oci1-all-media-types.json
@@ -0,0 +1,41 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 4651,
+ "digest": "sha256:94ac69e4413476d061116c9d05757e46a0afc744e8b9886f75cf7f6f14c78fb3"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+zstd",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 152,
+ "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-artifact.json b/internal/image/fixtures/oci1-artifact.json
new file mode 100644
index 0000000..9e54409
--- /dev/null
+++ b/internal/image/fixtures/oci1-artifact.json
@@ -0,0 +1,43 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.custom.artifact.config.v1+json",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
+ "annotations": {
+ "test-annotation-1": "one"
+ }
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "urls": ["https://layer.url"]
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "annotations": {
+ "test-annotation-2": "two"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+}
diff --git a/internal/image/fixtures/oci1-config-extra-fields.json b/internal/image/fixtures/oci1-config-extra-fields.json
new file mode 100644
index 0000000..1d670d5
--- /dev/null
+++ b/internal/image/fixtures/oci1-config-extra-fields.json
@@ -0,0 +1,158 @@
+{
+ "extra-string-field": "string",
+ "extra-object": {"foo":"bar"},
+ "architecture": "amd64",
+ "config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "httpd-foreground"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69",
+ "container_config": {
+ "Hostname": "383850eeb47b",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ",
+ "CMD [\"httpd-foreground\"]"
+ ],
+ "ArgsEscaped": true,
+ "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd",
+ "Volumes": null,
+ "WorkingDir": "/usr/local/apache2",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {}
+ },
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "docker_version": "1.12.1",
+ "history": [
+ {
+ "created": "2016-09-23T18:08:50.537223822Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "
+ },
+ {
+ "created": "2016-09-23T18:08:51.133779867Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:40.725768956Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.037788416Z",
+ "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.990121202Z",
+ "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""
+ },
+ {
+ "created": "2016-09-23T19:16:42.339911155Z",
+ "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:54.948461741Z",
+ "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"
+ },
+ {
+ "created": "2016-09-23T19:16:55.321573403Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:55.629947307Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:03.705796801Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:04.009782822Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:44.585743332Z",
+ "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"
+ },
+ {
+ "created": "2016-09-23T23:20:45.127455562Z",
+ "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "
+ },
+ {
+ "created": "2016-09-23T23:20:45.453934921Z",
+ "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]",
+ "empty_layer": true
+ }
+ ],
+ "os": "linux",
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-config.json b/internal/image/fixtures/oci1-config.json
new file mode 100644
index 0000000..f49230e
--- /dev/null
+++ b/internal/image/fixtures/oci1-config.json
@@ -0,0 +1 @@
+{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-extra-config-fields.json b/internal/image/fixtures/oci1-extra-config-fields.json
new file mode 100644
index 0000000..b297f4a
--- /dev/null
+++ b/internal/image/fixtures/oci1-extra-config-fields.json
@@ -0,0 +1,43 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7693,
+ "digest": "sha256:7f2a783ee2f07826b1856e68a40c930cd0430d6e7d4a88c29c2c8b7718706e74",
+ "annotations": {
+ "test-annotation-1": "one"
+ }
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "urls": ["https://layer.url"]
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "annotations": {
+ "test-annotation-2": "two"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+}
diff --git a/internal/image/fixtures/oci1-invalid-media-type.json b/internal/image/fixtures/oci1-invalid-media-type.json
new file mode 100644
index 0000000..7b7d06e
--- /dev/null
+++ b/internal/image/fixtures/oci1-invalid-media-type.json
@@ -0,0 +1,15 @@
+{
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+invalid-suffix",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-to-schema1.json b/internal/image/fixtures/oci1-to-schema1.json
new file mode 100644
index 0000000..a85b3ff
--- /dev/null
+++ b/internal/image/fixtures/oci1-to-schema1.json
@@ -0,0 +1 @@
+{"name":"library/httpd-copy","tag":"latest","architecture":"amd64","fsLayers":[{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"},{"blobSum":"sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"},{"blobSum":"sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"}],"history":[{"v1Compatibility":"{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"httpd-foreground\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"container\":\"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69\",\"container_config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"httpd-foreground\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2016-09-23T23:20:45.78976459Z\",\"docker_version\":\"1.12.1\",\"id\":\"dca7323f9c839837493199d63263083d94f5eb1796d7bd04ca8374c4e9d3749a\",\"os\":\"linux\",\"parent\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"parent\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"created\":\"2016-09-23T23:20:45.453934921Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 80/tcp\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"parent\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"created\":\"2016-09-23T23:20:45.127455562Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ \"]}}"},{"v1Compatibility":"{\"id\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"parent\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"created\":\"2016-09-23T23:20:44.585743332Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 buildDeps=' \\t\\tbzip2 \\t\\tca-certificates \\t\\tgcc \\t\\tlibpcre++-dev \\t\\tlibssl-dev \\t\\tmake \\t\\twget \\t' \\t\\u0026\\u0026 apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends $buildDeps \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/* \\t\\t\\u0026\\u0026 wget -O httpd.tar.bz2 \\\"$HTTPD_BZ2_URL\\\" \\t\\u0026\\u0026 echo \\\"$HTTPD_SHA1 *httpd.tar.bz2\\\" | sha1sum -c - \\t\\u0026\\u0026 wget -O httpd.tar.bz2.asc \\\"$HTTPD_ASC_URL\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \\t\\u0026\\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \\t\\u0026\\u0026 rm -r \\\"$GNUPGHOME\\\" httpd.tar.bz2.asc \\t\\t\\u0026\\u0026 mkdir -p src \\t\\u0026\\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \\t\\u0026\\u0026 rm httpd.tar.bz2 \\t\\u0026\\u0026 cd src \\t\\t\\u0026\\u0026 ./configure \\t\\t--prefix=\\\"$HTTPD_PREFIX\\\" \\t\\t--enable-mods-shared=reallyall \\t\\u0026\\u0026 make -j\\\"$(nproc)\\\" \\t\\u0026\\u0026 make install \\t\\t\\u0026\\u0026 cd .. \\t\\u0026\\u0026 rm -r src \\t\\t\\u0026\\u0026 sed -ri \\t\\t-e 's!^(\\\\s*CustomLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/1!g' \\t\\t-e 's!^(\\\\s*ErrorLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/2!g' \\t\\t\\\"$HTTPD_PREFIX/conf/httpd.conf\\\" \\t\\t\\u0026\\u0026 apt-get purge -y --auto-remove $buildDeps\"]}}"},{"v1Compatibility":"{\"id\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"parent\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"created\":\"2016-09-23T23:19:04.009782822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"parent\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"created\":\"2016-09-23T23:19:03.705796801Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"parent\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"created\":\"2016-09-23T19:16:55.629947307Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"parent\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"created\":\"2016-09-23T19:16:55.321573403Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"parent\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"created\":\"2016-09-23T19:16:54.948461741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tlibapr1 \\t\\tlibaprutil1 \\t\\tlibaprutil1-ldap \\t\\tlibapr1-dev \\t\\tlibaprutil1-dev \\t\\tlibpcre++0 \\t\\tlibssl1.0.0 \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/*\"]}}"},{"v1Compatibility":"{\"id\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"parent\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"created\":\"2016-09-23T19:16:42.339911155Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) WORKDIR /usr/local/apache2\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"parent\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"created\":\"2016-09-23T19:16:41.990121202Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p \\\"$HTTPD_PREFIX\\\" \\t\\u0026\\u0026 chown www-data:www-data \\\"$HTTPD_PREFIX\\\"\"]}}"},{"v1Compatibility":"{\"id\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"parent\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"created\":\"2016-09-23T19:16:41.037788416Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"parent\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"created\":\"2016-09-23T19:16:40.725768956Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"parent\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:51.133779867Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/bash\\\"]\"]},\"throwaway\":true}"},{"v1Compatibility":"{\"id\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:50.537223822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / \"]}}"}],"schemaVersion":1,"signatures":[{"header":{"jwk":{"crv":"P-256","kid":"Q3ZE:RPLV:YRWL:CJGY:3YUV:CVRB:KOZN:DPRO:QZKD:B7KB:4FJ5:XUDM","kty":"EC","x":"iIJKPTtzobd73WmVmIoRSGkHWQB86bL5BBw9-YWcVjA","y":"3fiBd8u2fXRc5DZG20gQWQ8LUvTuPRqkU0e672ymn-8"},"alg":"ES256"},"signature":"I1uNEFT2P64rwc7dajzBOCD9o4DB4W7xbWQRxOgWm43Py1_N3omkvqUStMeUQsQVjNqje6NQyVzQzOACDHsPYg","protected":"eyJmb3JtYXRMZW5ndGgiOjEwMTEyLCJmb3JtYXRUYWlsIjoiZlEiLCJ0aW1lIjoiMjAyMC0wMy0yNVQyMzo0NzowOVoifQ"}]}
diff --git a/internal/image/fixtures/oci1-to-schema2-config.json b/internal/image/fixtures/oci1-to-schema2-config.json
new file mode 100644
index 0000000..f49230e
--- /dev/null
+++ b/internal/image/fixtures/oci1-to-schema2-config.json
@@ -0,0 +1 @@
+{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1-to-schema2.json b/internal/image/fixtures/oci1-to-schema2.json
new file mode 100644
index 0000000..50aa6dc
--- /dev/null
+++ b/internal/image/fixtures/oci1-to-schema2.json
@@ -0,0 +1,37 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "urls": ["https://layer.url"]
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/oci1.encrypted.json b/internal/image/fixtures/oci1.encrypted.json
new file mode 100644
index 0000000..c6c523e
--- /dev/null
+++ b/internal/image/fixtures/oci1.encrypted.json
@@ -0,0 +1,43 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
+ "annotations": {
+ "test-annotation-1": "one"
+ }
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "size": 51354364,
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "size": 150,
+ "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "size": 11739507,
+ "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ "urls": ["https://layer.url"]
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "size": 8841833,
+ "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ "annotations": {
+ "test-annotation-2": "two"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "size": 291,
+ "digest": "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
+ }
+ ]
+}
diff --git a/internal/image/fixtures/oci1.json b/internal/image/fixtures/oci1.json
new file mode 100644
index 0000000..26efc23
--- /dev/null
+++ b/internal/image/fixtures/oci1.json
@@ -0,0 +1,43 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
+ "annotations": {
+ "test-annotation-1": "one"
+ }
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "urls": ["https://layer.url"]
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "annotations": {
+ "test-annotation-2": "two"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+}
diff --git a/internal/image/fixtures/schema1-for-oci-config.json b/internal/image/fixtures/schema1-for-oci-config.json
new file mode 100644
index 0000000..ee58257
--- /dev/null
+++ b/internal/image/fixtures/schema1-for-oci-config.json
@@ -0,0 +1,29 @@
+{
+ "schemaVersion": 1,
+ "name": "google_containers/pause-amd64",
+ "tag": "3.0",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:f112334343777b75be77ec1f835e3bbbe7d7bd46e27b6a2ae35c6b3cfea0987c"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"id\":\"bb497e16a2d55195649174d1fadac52b00fa2c14124d73009712606909286bc5\",\"parent\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"created\":\"2016-05-04T06:26:41.522308365Z\",\"container\":\"a9873535145fe72b464d3055efbac36aab70d059914e221cbbd7fe3cac53ef6b\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT \\u0026{[\\\"/pause\\\"]}\"],\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"parent\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:41.091672218Z\",\"container\":\"e1b38778b023f25642273ed9e7f4846b4bf38b22a8b55755880b2e6ab6019811\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:b7eb6a5df9d5fbe509cac16ed89f8d6513a4362017184b14c6a5fae151eee5c5 in /pause\"],\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":746888}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:40.628395649Z\",\"container\":\"95722352e41d57660259fbede4413d06889a28eb07a7302d2a7b3f9c71ceaa46\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ARG ARCH\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\"}"
+ }
+ ],"signatures":[{"header":{"alg":"ES256","jwk":{"crv":"P-256","kid":"ORN4:M47W:3KP3:TZRZ:C3UF:5MFQ:INZV:TCMY:LHNV:EYQU:IRGJ:IJLJ","kty":"EC","x":"yJ0ZQ19NBZUQn8LV60sFEabhlgky9svozfK0VGVou7Y","y":"gOJScOkkLVY1f8aAx-6XXpVM5rJaDYLkCNJ1dvcQGMs"}},"protected":"eyJmb3JtYXRMZW5ndGgiOjQxMzMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNS0wNFQwNjoyODo1MVoifQ","signature":"77_7DVx1IZ3PiKNnO7QnvoF7Sgik4GI4bnlVJdtQW461dSyYzd-nSdBmky8Jew3InEW8Cuv_t5w4GmOSwXvL7g"}]
+
+}
diff --git a/internal/image/fixtures/schema1-to-oci1-config.json b/internal/image/fixtures/schema1-to-oci1-config.json
new file mode 100644
index 0000000..950e225
--- /dev/null
+++ b/internal/image/fixtures/schema1-to-oci1-config.json
@@ -0,0 +1,82 @@
+{
+ "architecture": "amd64",
+ "config": {
+ "User": "nova",
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "container=oci",
+ "KOLLA_BASE_DISTRO=rhel",
+ "KOLLA_INSTALL_TYPE=binary",
+ "KOLLA_INSTALL_METATYPE=rhos",
+ "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
+ ],
+ "Cmd": [
+ "kolla_start"
+ ],
+ "Labels": {
+ "Kolla-SHA": "5.0.0-39-g6f1b947b",
+ "architecture": "x86_64",
+ "authoritative-source-url": "registry.access.redhat.com",
+ "build-date": "2018-01-25T00:32:27.807261",
+ "com.redhat.build-host": "ip-10-29-120-186.ec2.internal",
+ "com.redhat.component": "openstack-nova-api-docker",
+ "description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "distribution-scope": "public",
+ "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.openshift.tags": "rhosp osp openstack osp-12.0",
+ "kolla_version": "stable/pike",
+ "name": "rhosp12/openstack-nova-api",
+ "release": "20180124.1",
+ "summary": "Red Hat OpenStack Platform 12.0 nova-api",
+ "tripleo-common_version": "7.6.3-23-g4891cfe",
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1",
+ "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98",
+ "vcs-type": "git",
+ "vendor": "Red Hat, Inc.",
+ "version": "12.0",
+ "version-release": "12.0-20180124.1"
+ },
+ "ArgsEscaped": true
+ },
+ "created": "2018-01-25T00:37:48.268558Z",
+ "os": "linux",
+ "history": [
+ {
+ "comment": "Imported from -",
+ "created": "2017-11-21T16:47:27.755341705Z"
+ },
+ {
+ "author": "Red Hat, Inc.",
+ "created": "2017-11-21T16:49:37.292899Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'"
+ },
+ {
+ "created": "2018-01-24T21:40:32.494686Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-24T22:00:57.807862Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-24T23:08:25.300741Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-25T00:37:48.268558Z",
+ "created_by": "/bin/sh -c #(nop) USER [nova]"
+ }
+ ],
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:e1d829eddb62dc49f1c56dbf8acd0c71299b3996115399de853a9d66d81b822f",
+ "sha256:02404b4d7e5d89b1383ca346b4462b199128aa4b238c5a2b2c186004ac148ba8",
+ "sha256:45fad80a4b1cec165c421eb570dec312d825bd8fac362e255028fa3f2169148d",
+ "sha256:7ddef8efd44586e54880ec4797458eac87b368544c438d7e7c63fbc0d9a7ae97",
+ "sha256:b56b16b6407ba1b86252e7e50f98f142cf6844fab42e4495d56ebb7ce559e2af",
+ "sha256:9bd63850e406167b4751f5050f6dc0ebd789bb5ef5e5c6c31ed062bda8c063e8"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema1-to-oci1.json b/internal/image/fixtures/schema1-to-oci1.json
new file mode 100644
index 0000000..0af1fbe
--- /dev/null
+++ b/internal/image/fixtures/schema1-to-oci1.json
@@ -0,0 +1,41 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": -1,
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 74876245,
+ "digest": "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 1239,
+ "digest": "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 78339724,
+ "digest": "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 76857203,
+ "digest": "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 25923380,
+ "digest": "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 23511300,
+ "digest": "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema1-to-schema2-config.json b/internal/image/fixtures/schema1-to-schema2-config.json
new file mode 100644
index 0000000..c182ded
--- /dev/null
+++ b/internal/image/fixtures/schema1-to-schema2-config.json
@@ -0,0 +1,163 @@
+{
+ "architecture": "amd64",
+ "config": {
+ "Hostname": "9428cdea83ba",
+ "Domainname": "",
+ "User": "nova",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "container=oci",
+ "KOLLA_BASE_DISTRO=rhel",
+ "KOLLA_INSTALL_TYPE=binary",
+ "KOLLA_INSTALL_METATYPE=rhos",
+ "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
+ ],
+ "Cmd": [
+ "kolla_start"
+ ],
+ "Healthcheck": {
+ "Test": [
+ "CMD-SHELL",
+ "/openstack/healthcheck"
+ ]
+ },
+ "ArgsEscaped": true,
+ "Image": "3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33",
+ "Volumes": null,
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {
+ "Kolla-SHA": "5.0.0-39-g6f1b947b",
+ "architecture": "x86_64",
+ "authoritative-source-url": "registry.access.redhat.com",
+ "build-date": "2018-01-25T00:32:27.807261",
+ "com.redhat.build-host": "ip-10-29-120-186.ec2.internal",
+ "com.redhat.component": "openstack-nova-api-docker",
+ "description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "distribution-scope": "public",
+ "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.openshift.tags": "rhosp osp openstack osp-12.0",
+ "kolla_version": "stable/pike",
+ "name": "rhosp12/openstack-nova-api",
+ "release": "20180124.1",
+ "summary": "Red Hat OpenStack Platform 12.0 nova-api",
+ "tripleo-common_version": "7.6.3-23-g4891cfe",
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1",
+ "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98",
+ "vcs-type": "git",
+ "vendor": "Red Hat, Inc.",
+ "version": "12.0",
+ "version-release": "12.0-20180124.1"
+ }
+ },
+ "container_config": {
+ "Hostname": "9428cdea83ba",
+ "Domainname": "",
+ "User": "nova",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "container=oci",
+ "KOLLA_BASE_DISTRO=rhel",
+ "KOLLA_INSTALL_TYPE=binary",
+ "KOLLA_INSTALL_METATYPE=rhos",
+ "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
+ ],
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ",
+ "USER [nova]"
+ ],
+ "Healthcheck": {
+ "Test": [
+ "CMD-SHELL",
+ "/openstack/healthcheck"
+ ]
+ },
+ "ArgsEscaped": true,
+ "Image": "sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf",
+ "Volumes": null,
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "OnBuild": [],
+ "Labels": {
+ "Kolla-SHA": "5.0.0-39-g6f1b947b",
+ "architecture": "x86_64",
+ "authoritative-source-url": "registry.access.redhat.com",
+ "build-date": "2018-01-25T00:32:27.807261",
+ "com.redhat.build-host": "ip-10-29-120-186.ec2.internal",
+ "com.redhat.component": "openstack-nova-api-docker",
+ "description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "distribution-scope": "public",
+ "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api",
+ "io.openshift.tags": "rhosp osp openstack osp-12.0",
+ "kolla_version": "stable/pike",
+ "name": "rhosp12/openstack-nova-api",
+ "release": "20180124.1",
+ "summary": "Red Hat OpenStack Platform 12.0 nova-api",
+ "tripleo-common_version": "7.6.3-23-g4891cfe",
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1",
+ "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98",
+ "vcs-type": "git",
+ "vendor": "Red Hat, Inc.",
+ "version": "12.0",
+ "version-release": "12.0-20180124.1"
+ }
+ },
+ "created": "2018-01-25T00:37:48.268558Z",
+ "docker_version": "1.12.6",
+ "os": "linux",
+ "history": [
+ {
+ "comment": "Imported from -",
+ "created": "2017-11-21T16:47:27.755341705Z"
+ },
+ {
+ "author": "Red Hat, Inc.",
+ "created": "2017-11-21T16:49:37.292899Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'"
+ },
+ {
+ "created": "2018-01-24T21:40:32.494686Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-24T22:00:57.807862Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-24T23:08:25.300741Z",
+ "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"
+ },
+ {
+ "created": "2018-01-25T00:37:48.268558Z",
+ "created_by": "/bin/sh -c #(nop) USER [nova]"
+ }
+ ],
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:e1d829eddb62dc49f1c56dbf8acd0c71299b3996115399de853a9d66d81b822f",
+ "sha256:02404b4d7e5d89b1383ca346b4462b199128aa4b238c5a2b2c186004ac148ba8",
+ "sha256:45fad80a4b1cec165c421eb570dec312d825bd8fac362e255028fa3f2169148d",
+ "sha256:7ddef8efd44586e54880ec4797458eac87b368544c438d7e7c63fbc0d9a7ae97",
+ "sha256:b56b16b6407ba1b86252e7e50f98f142cf6844fab42e4495d56ebb7ce559e2af",
+ "sha256:9bd63850e406167b4751f5050f6dc0ebd789bb5ef5e5c6c31ed062bda8c063e8"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema1-to-schema2.json b/internal/image/fixtures/schema1-to-schema2.json
new file mode 100644
index 0000000..9d6feee
--- /dev/null
+++ b/internal/image/fixtures/schema1-to-schema2.json
@@ -0,0 +1,41 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": -1,
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 74876245,
+ "digest": "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 1239,
+ "digest": "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 78339724,
+ "digest": "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 76857203,
+ "digest": "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 25923380,
+ "digest": "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 23511300,
+ "digest": "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema1.json b/internal/image/fixtures/schema1.json
new file mode 100644
index 0000000..d741149
--- /dev/null
+++ b/internal/image/fixtures/schema1.json
@@ -0,0 +1,62 @@
+{
+ "schemaVersion": 1,
+ "name": "rhosp12/openstack-nova-api",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"
+ },
+ {
+ "blobSum": "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"
+ },
+ {
+ "blobSum": "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"
+ },
+ {
+ "blobSum": "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"
+ },
+ {
+ "blobSum": "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"
+ },
+ {
+ "blobSum": "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}"
+ }
+ ],
+ "signatures": [
+ {
+ "header": {
+ "jwk": {
+ "crv": "P-256",
+ "kid": "DB2X:GSG2:72H3:AE3R:KCMI:Y77E:W7TF:ERHK:V5HR:JJ2Y:YMS6:HFGJ",
+ "kty": "EC",
+ "x": "jyr9-xZBorSC9fhqNsmfU_Ud31wbaZ-bVGz0HmySvbQ",
+ "y": "vkE6qZCCvYRWjSUwgAOvibQx_s8FipYkAiHS0VnAFNs"
+ },
+ "alg": "ES256"
+ },
+ "signature": "jBBsnocfxw77LzmM_VeN6Nb031BtqPgx-DbppYOEnhZfGLRcyYwGUPW--3JrkeEX6AlEGzPI57R0tlu5bZvrnQ",
+ "protected": "eyJmb3JtYXRMZW5ndGgiOjY4MTMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wMS0zMFQxOToyNToxMloifQ"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-all-media-types-to-oci1.json b/internal/image/fixtures/schema2-all-media-types-to-oci1.json
new file mode 100644
index 0000000..65fff4a
--- /dev/null
+++ b/internal/image/fixtures/schema2-all-media-types-to-oci1.json
@@ -0,0 +1,36 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 4670,
+ "digest": "sha256:f15ba60ec257ee2cf4fddfb9451bb86ba2668450e88d402f5ecc7ea6ce1b661a"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 152,
+ "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-all-media-types.json b/internal/image/fixtures/schema2-all-media-types.json
new file mode 100644
index 0000000..2c3d8c7
--- /dev/null
+++ b/internal/image/fixtures/schema2-all-media-types.json
@@ -0,0 +1,36 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 4651,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 152,
+ "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-config.json b/internal/image/fixtures/schema2-config.json
new file mode 100644
index 0000000..f49230e
--- /dev/null
+++ b/internal/image/fixtures/schema2-config.json
@@ -0,0 +1 @@
+{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}} \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-invalid-media-type.json b/internal/image/fixtures/schema2-invalid-media-type.json
new file mode 100644
index 0000000..d6b0691
--- /dev/null
+++ b/internal/image/fixtures/schema2-invalid-media-type.json
@@ -0,0 +1,36 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/octet-stream",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.zstd",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+ } \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-to-oci1-config.json b/internal/image/fixtures/schema2-to-oci1-config.json
new file mode 100644
index 0000000..eb43d87
--- /dev/null
+++ b/internal/image/fixtures/schema2-to-oci1-config.json
@@ -0,0 +1,105 @@
+{
+ "architecture": "amd64",
+ "config": {
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Env": [
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"
+ ],
+ "Cmd": [
+ "httpd-foreground"
+ ],
+ "ArgsEscaped": true,
+ "WorkingDir": "/usr/local/apache2"
+ },
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "history": [
+ {
+ "created": "2016-09-23T18:08:50.537223822Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "
+ },
+ {
+ "created": "2016-09-23T18:08:51.133779867Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:40.725768956Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.037788416Z",
+ "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:41.990121202Z",
+ "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""
+ },
+ {
+ "created": "2016-09-23T19:16:42.339911155Z",
+ "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:54.948461741Z",
+ "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"
+ },
+ {
+ "created": "2016-09-23T19:16:55.321573403Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T19:16:55.629947307Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:03.705796801Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:19:04.009782822Z",
+ "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:44.585743332Z",
+ "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"
+ },
+ {
+ "created": "2016-09-23T23:20:45.127455562Z",
+ "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "
+ },
+ {
+ "created": "2016-09-23T23:20:45.453934921Z",
+ "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp",
+ "empty_layer": true
+ },
+ {
+ "created": "2016-09-23T23:20:45.78976459Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]",
+ "empty_layer": true
+ }
+ ],
+ "os": "linux",
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"
+ ]
+ }
+} \ No newline at end of file
diff --git a/internal/image/fixtures/schema2-to-oci1.json b/internal/image/fixtures/schema2-to-oci1.json
new file mode 100644
index 0000000..251e4e5
--- /dev/null
+++ b/internal/image/fixtures/schema2-to-oci1.json
@@ -0,0 +1,30 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 4670,
+ "digest": "sha256:f15ba60ec257ee2cf4fddfb9451bb86ba2668450e88d402f5ecc7ea6ce1b661a"
+ },
+ "layers": [{
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ }, {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ }, {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ }, {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ }, {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }]
+}
diff --git a/internal/image/fixtures/schema2-to-schema1-by-docker.json b/internal/image/fixtures/schema2-to-schema1-by-docker.json
new file mode 100644
index 0000000..494450d
--- /dev/null
+++ b/internal/image/fixtures/schema2-to-schema1-by-docker.json
@@ -0,0 +1,116 @@
+{
+ "schemaVersion": 1,
+ "name": "library/httpd-copy",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ },
+ {
+ "blobSum": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+ },
+ {
+ "blobSum": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"httpd-foreground\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"container\":\"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69\",\"container_config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"httpd-foreground\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2016-09-23T23:20:45.78976459Z\",\"docker_version\":\"1.12.1\",\"id\":\"dca7323f9c839837493199d63263083d94f5eb1796d7bd04ca8374c4e9d3749a\",\"os\":\"linux\",\"parent\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"parent\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"created\":\"2016-09-23T23:20:45.453934921Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 80/tcp\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"parent\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"created\":\"2016-09-23T23:20:45.127455562Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ \"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"parent\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"created\":\"2016-09-23T23:20:44.585743332Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 buildDeps=' \\t\\tbzip2 \\t\\tca-certificates \\t\\tgcc \\t\\tlibpcre++-dev \\t\\tlibssl-dev \\t\\tmake \\t\\twget \\t' \\t\\u0026\\u0026 apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends $buildDeps \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/* \\t\\t\\u0026\\u0026 wget -O httpd.tar.bz2 \\\"$HTTPD_BZ2_URL\\\" \\t\\u0026\\u0026 echo \\\"$HTTPD_SHA1 *httpd.tar.bz2\\\" | sha1sum -c - \\t\\u0026\\u0026 wget -O httpd.tar.bz2.asc \\\"$HTTPD_ASC_URL\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \\t\\u0026\\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \\t\\u0026\\u0026 rm -r \\\"$GNUPGHOME\\\" httpd.tar.bz2.asc \\t\\t\\u0026\\u0026 mkdir -p src \\t\\u0026\\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \\t\\u0026\\u0026 rm httpd.tar.bz2 \\t\\u0026\\u0026 cd src \\t\\t\\u0026\\u0026 ./configure \\t\\t--prefix=\\\"$HTTPD_PREFIX\\\" \\t\\t--enable-mods-shared=reallyall \\t\\u0026\\u0026 make -j\\\"$(nproc)\\\" \\t\\u0026\\u0026 make install \\t\\t\\u0026\\u0026 cd .. \\t\\u0026\\u0026 rm -r src \\t\\t\\u0026\\u0026 sed -ri \\t\\t-e 's!^(\\\\s*CustomLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/1!g' \\t\\t-e 's!^(\\\\s*ErrorLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/2!g' \\t\\t\\\"$HTTPD_PREFIX/conf/httpd.conf\\\" \\t\\t\\u0026\\u0026 apt-get purge -y --auto-remove $buildDeps\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"parent\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"created\":\"2016-09-23T23:19:04.009782822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"parent\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"created\":\"2016-09-23T23:19:03.705796801Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"parent\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"created\":\"2016-09-23T19:16:55.629947307Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"parent\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"created\":\"2016-09-23T19:16:55.321573403Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"parent\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"created\":\"2016-09-23T19:16:54.948461741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tlibapr1 \\t\\tlibaprutil1 \\t\\tlibaprutil1-ldap \\t\\tlibapr1-dev \\t\\tlibaprutil1-dev \\t\\tlibpcre++0 \\t\\tlibssl1.0.0 \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/*\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"parent\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"created\":\"2016-09-23T19:16:42.339911155Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) WORKDIR /usr/local/apache2\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"parent\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"created\":\"2016-09-23T19:16:41.990121202Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p \\\"$HTTPD_PREFIX\\\" \\t\\u0026\\u0026 chown www-data:www-data \\\"$HTTPD_PREFIX\\\"\"]}}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"parent\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"created\":\"2016-09-23T19:16:41.037788416Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"parent\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"created\":\"2016-09-23T19:16:40.725768956Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"parent\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:51.133779867Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/bash\\\"]\"]},\"throwaway\":true}"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:50.537223822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / \"]}}"
+ }
+ ],
+ "signatures": [
+ {
+ "header": {
+ "jwk": {
+ "crv": "P-256",
+ "kid": "6QVR:5NTY:VIHC:W6IU:XYIN:CTKT:OG5R:XEEG:Z6XJ:2623:YCBP:36MA",
+ "kty": "EC",
+ "x": "NAGHj6-IdNonuFoxlqJnNMjcrCCE1CBoq2r_1NDci68",
+ "y": "Kocqgj_Ey5J-wLXTjkuqLC-HjciAnWxsBEziAOTvSPc"
+ },
+ "alg": "ES256"
+ },
+ "signature": "2MN5k06i8xkJhD5ay4yxAFK7tsZk58UznAZONxDplvQ5lZwbRS162OeBDjCb0Hk0IDyrLXtAfBDlY2Gzf6jrpw",
+ "protected": "eyJmb3JtYXRMZW5ndGgiOjEwODk1LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTYtMTAtMTRUMTY6MTI6MDlaIn0"
+ }
+ ]
+}
diff --git a/internal/image/fixtures/schema2.json b/internal/image/fixtures/schema2.json
new file mode 100644
index 0000000..8df4c0d
--- /dev/null
+++ b/internal/image/fixtures/schema2.json
@@ -0,0 +1,36 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/octet-stream",
+ "size": 5940,
+ "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 51354364,
+ "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 150,
+ "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 11739507,
+ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 8841833,
+ "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 291,
+ "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/image/manifest.go b/internal/image/manifest.go
new file mode 100644
index 0000000..75e472a
--- /dev/null
+++ b/internal/image/manifest.go
@@ -0,0 +1,121 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// genericManifest is an interface for parsing, modifying image manifests and related data.
+// The public methods are related to types.Image so that embedding a genericManifest implements most of it,
+// but there are also public methods that are only visible by packages that can import c/image/internal/image.
+type genericManifest interface {
+ serialize() ([]byte, error)
+ manifestMIMEType() string
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() types.BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob(context.Context) ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig(context.Context) (*imgspecv1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+ Inspect(context.Context) (*types.ImageInspectInfo, error)
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // This does not change the state of the original Image object.
+ UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
+ // SupportsEncryption returns if encryption is supported for the manifest type
+ //
+ // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
+ // the process of updating a manifest between different manifest types was to update then convert.
+ // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
+ SupportsEncryption(ctx context.Context) bool
+
+ // The following methods are not a part of types.Image:
+ // ===
+
+ // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+ // (and the code can handle that).
+ // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+ // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+ // to a different manifest format).
+ CanChangeLayerCompression(mimeType string) bool
+}
+
+// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
+// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
+func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
+ switch manifest.NormalizedMIMEType(mt) {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ return manifestSchema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return manifestOCI1FromManifest(src, manblob)
+ case manifest.DockerV2Schema2MediaType:
+ return manifestSchema2FromManifest(src, manblob)
+ case manifest.DockerV2ListMediaType:
+ return manifestSchema2FromManifestList(ctx, sys, src, manblob)
+ case imgspecv1.MediaTypeImageIndex:
+ return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
+ default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
+ }
+}
+
+// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
+func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
+ blobs := make([]types.BlobInfo, len(layers))
+ for i, layer := range layers {
+ blobs[i] = layer.BlobInfo
+ }
+ return blobs
+}
+
+// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation
+// converted to a specific manifest MIME type.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original genericManifest object.
+type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error)
+
+// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if
+// required and re-apply the options to the converted type.
+// It returns (nil, nil) if no conversion was requested.
+func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {
+ if options.ManifestMIMEType == "" {
+ return nil, nil
+ }
+
+ converter, ok := converters[options.ManifestMIMEType]
+ if !ok {
+ return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
+ }
+
+ optionsCopy := options
+ convertedManifest, err := converter(ctx, &optionsCopy)
+ if err != nil {
+ return nil, err
+ }
+ convertedImage := memoryImageFromManifest(convertedManifest)
+
+ optionsCopy.ManifestMIMEType = ""
+ return convertedImage.UpdatedImage(ctx, optionsCopy)
+}
diff --git a/internal/image/manifest_test.go b/internal/image/manifest_test.go
new file mode 100644
index 0000000..64ad130
--- /dev/null
+++ b/internal/image/manifest_test.go
@@ -0,0 +1,71 @@
+package image
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestManifestLayerInfosToBlobInfos(t *testing.T) {
+ blobs := manifestLayerInfosToBlobInfos([]manifest.LayerInfo{})
+ assert.Equal(t, []types.BlobInfo{}, blobs)
+
+ blobs = manifestLayerInfosToBlobInfos([]manifest.LayerInfo{
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ EmptyLayer: true,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ },
+ EmptyLayer: false,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ },
+ EmptyLayer: false,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ EmptyLayer: true,
+ },
+ })
+ assert.Equal(t, []types.BlobInfo{
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ },
+ {
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ }, blobs)
+}
diff --git a/internal/image/memory.go b/internal/image/memory.go
new file mode 100644
index 0000000..e22c7aa
--- /dev/null
+++ b/internal/image/memory.go
@@ -0,0 +1,64 @@
+package image
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/types"
+)
+
+// memoryImage is a mostly-implementation of types.Image assembled from data
+// created in memory, used primarily as a return value of types.Image.UpdatedImage
+// as a way to carry various structured information in a type-safe and easy-to-use way.
+// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
+// collection of all related information, e.g. there is no way to get layer blobs
+// from a memoryImage.
+type memoryImage struct {
+ genericManifest
+ serializedManifest []byte // A private cache for Manifest()
+}
+
+func memoryImageFromManifest(m genericManifest) types.Image {
+ return &memoryImage{
+ genericManifest: m,
+ serializedManifest: nil,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *memoryImage) Reference() types.ImageReference {
+ // It would really be inappropriate to return the ImageReference of the image this was based on.
+ return nil
+}
+
+// Size returns the size of the image as stored, if known, or -1 if not.
+func (i *memoryImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.serializedManifest == nil {
+ m, err := i.genericManifest.serialize()
+ if err != nil {
+ return nil, "", err
+ }
+ i.serializedManifest = m
+ }
+ return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // Modifying an image invalidates signatures; a caller asking the updated image for signatures
+ // is probably confused.
+ return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
+}
+
+// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/internal/image/oci.go b/internal/image/oci.go
new file mode 100644
index 0000000..df0e8e4
--- /dev/null
+++ b/internal/image/oci.go
@@ -0,0 +1,336 @@
+package image
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ internalManifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ ociencspec "github.com/containers/ocicrypt/spec"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/slices"
+)
+
+type manifestOCI1 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of m.Config.
+ m *manifest.OCI1
+}
+
+func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
+ return nil, err
+ }
+ return &manifestOCI1{
+ src: src,
+ m: m,
+ }, nil
+}
+
+// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
+func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
+ return &manifestOCI1{
+ src: src,
+ configBlob: configBlob,
+ m: manifest.OCI1FromComponents(config, layers),
+ }
+}
+
+func (m *manifestOCI1) serialize() ([]byte, error) {
+ return m.m.Serialize()
+}
+
+func (m *manifestOCI1) manifestMIMEType() string {
+ return imgspecv1.MediaTypeImageManifest
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
+ return m.m.ConfigInfo()
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1")
+ }
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.m.Config.Digest {
+ return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ cb, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(cb, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
+ return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
+ }
+ return m.m.Inspect(getter)
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
+// if the combination of CompressionOperation and CompressionAlgorithm specified
+// in one or more options.LayerInfos items indicates that a layer is compressed using
+// an algorithm that is not allowed in OCI.
+func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.OCI1Clone(m.m),
+ }
+
+ converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
+ manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
+ manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
+ manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if converted != nil {
+ return converted, nil
+ }
+
+ // No conversion required, update manifest
+ if options.LayerInfos != nil {
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
+
+ return memoryImageFromManifest(&copy), nil
+}
+
+func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
+ return manifest.Schema2Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
+// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestSchema1 object.
+//
+// We need this function just because a function returning an implementation of the genericManifest
+// interface is not automatically assignable to a function type returning the genericManifest interface
+func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ return m.convertToManifestSchema2(ctx, options)
+}
+
+// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
+// If not, it returns (nil, nil).
+// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
+// and edits *options to not try decryption again.
+func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
+ if options == nil || options.LayerInfos == nil {
+ return nil, nil
+ }
+
+ originalInfos := m.LayerInfos()
+ if len(originalInfos) != len(options.LayerInfos) {
+ return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
+ }
+
+ ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
+ laterEdits := slices.Clone(options.LayerInfos)
+ needsOCIOnlyEdits := false
+ for i, edit := range options.LayerInfos {
+ // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
+ ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
+ ociOnlyEdits[i].CompressionAlgorithm = nil
+
+ if edit.CryptoOperation == types.Decrypt {
+ needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas
+ ociOnlyEdits[i].CryptoOperation = types.Decrypt
+ laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
+ }
+
+ if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
+ originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas.
+ ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
+ ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
+ laterEdits[i].CompressionOperation = types.PreserveOriginal
+ laterEdits[i].CompressionAlgorithm = nil
+ }
+ }
+ if !needsOCIOnlyEdits {
+ return nil, nil
+ }
+
+ options.LayerInfos = laterEdits
+ return ociOnlyEdits, nil
+}
+
+// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestOCI1 object.
+func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
+ // which remove OCI-specific features, because trying to convert those layers would fail.
+ // So, do the layer updates for decryption, and for conversions from Zstd.
+ ociManifest := m.m
+ ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
+ if err != nil {
+ return nil, err
+ }
+ if ociOnlyEdits != nil {
+ ociManifest = manifest.OCI1Clone(ociManifest)
+ if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
+ return nil, err
+ }
+ }
+
+ // Create a copy of the descriptor.
+ config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config)
+
+ // Above, we have already checked that this manifest refers to an image, not an OCI artifact,
+ // so the only difference between OCI and DockerSchema2 is the mediatypes. The
+ // media type of the manifest is handled by manifestSchema2FromComponents.
+ config.MediaType = manifest.DockerV2Schema2ConfigMediaType
+
+ layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers))
+ for idx := range layers {
+ layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx])
+ switch layers[idx].MediaType {
+ case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
+ case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
+ case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
+ case imgspecv1.MediaTypeImageLayer:
+ layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ case imgspecv1.MediaTypeImageLayerGzip:
+ layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
+ case imgspecv1.MediaTypeImageLayerZstd:
+ return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
+ case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
+ ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
+ return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
+ default:
+ return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
+ }
+ }
+
+ // Rather than copying the ConfigBlob now, we just pass m.src to the
+ // translated manifest, since the only difference is the mediatype of
+ // descriptors there is no change to any blob stored in m.src.
+ return manifestSchema2FromComponents(config, m.src, nil, layers), nil
+}
+
+// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
+// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
+// value.
+// This does not change the state of the original manifestOCI1 object.
+func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
+ if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
+ }
+
+ // We can't directly convert images to V1, but we can transitively convert via a V2 image
+ m2, err := m.convertToManifestSchema2(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return m2.convertToManifestSchema1(ctx, options)
+}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
+ return true
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
+// to a different manifest format).
+func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
+ return m.m.CanChangeLayerCompression(mimeType)
+}
diff --git a/internal/image/oci_index.go b/internal/image/oci_index.go
new file mode 100644
index 0000000..0e945c8
--- /dev/null
+++ b/internal/image/oci_index.go
@@ -0,0 +1,34 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+)
+
+func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
+ index, err := manifest.OCI1IndexFromManifest(manblob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing OCI1 index: %w", err)
+ }
+ targetManifestDigest, err := index.ChooseInstance(sys)
+ if err != nil {
+ return nil, fmt.Errorf("choosing image instance: %w", err)
+ }
+ manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err)
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
+}
diff --git a/internal/image/oci_test.go b/internal/image/oci_test.go
new file mode 100644
index 0000000..0ac22f3
--- /dev/null
+++ b/internal/image/oci_test.go
@@ -0,0 +1,891 @@
+package image
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := manifestOCI1FromManifest(src, manifest)
+ require.NoError(t, err)
+ return m
+}
+
+var layerDescriptorsLikeFixture = []imgspecv1.Descriptor{
+ {
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ },
+ {
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ },
+ {
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ URLs: []string{
+ "https://layer.url",
+ },
+ },
+ {
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: map[string]string{
+ "test-annotation-2": "two",
+ },
+ },
+ {
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ },
+}
+
+func manifestOCI1FromComponentsLikeFixture(configBlob []byte) genericManifest {
+ return manifestOCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: 5940,
+ Digest: commonFixtureConfigDigest,
+ Annotations: map[string]string{
+ "test-annotation-1": "one",
+ },
+ }, nil, configBlob, layerDescriptorsLikeFixture)
+}
+
+func manifestOCI1FromComponentsWithExtraConfigFields(t *testing.T, src types.ImageSource) genericManifest {
+ configJSON, err := os.ReadFile("fixtures/oci1-config-extra-fields.json")
+ require.NoError(t, err)
+ return manifestOCI1FromComponents(imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: 7693,
+ Digest: "sha256:7f2a783ee2f07826b1856e68a40c930cd0430d6e7d4a88c29c2c8b7718706e74",
+ Annotations: map[string]string{
+ "test-annotation-1": "one",
+ },
+ }, src, configJSON, layerDescriptorsLikeFixture)
+}
+
+func TestManifestOCI1FromManifest(t *testing.T) {
+ // This just tests that the JSON can be loaded; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json")
+
+ _, err := manifestOCI1FromManifest(nil, []byte{})
+ assert.Error(t, err)
+}
+
+func TestManifestOCI1FromComponents(t *testing.T) {
+ // This just smoke-tests that the manifest can be created; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ _ = manifestOCI1FromComponentsLikeFixture(nil)
+}
+
+func TestManifestOCI1Serialize(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ serialized, err := m.serialize()
+ require.NoError(t, err)
+ // We would ideally like to compare “serialized” with some transformation of
+ // the original fixture, but the ordering of fields in JSON maps is undefined, so this is
+ // easier.
+ assertJSONEqualsFixture(t, serialized, "oci1.json")
+ }
+}
+
+func TestManifestOCI1ManifestMIMEType(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, imgspecv1.MediaTypeImageManifest, m.manifestMIMEType())
+ }
+}
+
+func TestManifestOCI1ConfigInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, types.BlobInfo{
+ Size: 5940,
+ Digest: commonFixtureConfigDigest,
+ Annotations: map[string]string{
+ "test-annotation-1": "one",
+ },
+ MediaType: "application/vnd.oci.image.config.v1+json",
+ }, m.ConfigInfo())
+ }
+}
+
+func TestManifestOCI1ConfigBlob(t *testing.T) {
+ realConfigJSON, err := os.ReadFile("fixtures/oci1-config.json")
+ require.NoError(t, err)
+
+ for _, c := range []struct {
+ cbISfn func() (io.ReadCloser, int64, error)
+ blob []byte
+ }{
+ // Success
+ {func() (io.ReadCloser, int64, error) {
+ return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
+ }, realConfigJSON},
+ // Various kinds of failures
+ {nil, nil},
+ {func() (io.ReadCloser, int64, error) {
+ return nil, -1, errors.New("Error returned from GetBlob")
+ }, nil},
+ {func() (io.ReadCloser, int64, error) {
+ reader, writer := io.Pipe()
+ err = writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob"))
+ require.NoError(t, err)
+ return reader, 1, nil
+ }, nil},
+ {func() (io.ReadCloser, int64, error) {
+ nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest")
+ return io.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil
+ }, nil},
+ } {
+ var src types.ImageSource
+ if c.cbISfn != nil {
+ src = configBlobImageSource{
+ expectedDigest: commonFixtureConfigDigest,
+ f: c.cbISfn,
+ }
+ } else {
+ src = nil
+ }
+ m := manifestOCI1FromFixture(t, src, "oci1.json")
+ blob, err := m.ConfigBlob(context.Background())
+ if c.blob != nil {
+ assert.NoError(t, err)
+ assert.Equal(t, c.blob, blob)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+
+ // Generally configBlob should match ConfigInfo; we don’t quite need it to, and this will
+ // guarantee that the returned object is returning the original contents instead
+ // of reading an object from elsewhere.
+ configBlob := []byte("config blob which does not match ConfigInfo")
+ // This just tests that the manifest can be created; we test that the parsed
+ // values are correctly returned in tests for the individual getter methods.
+ m := manifestOCI1FromComponentsLikeFixture(configBlob)
+ cb, err := m.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, configBlob, cb)
+}
+
+func TestManifestOCI1OCIConfig(t *testing.T) {
+ // Just a smoke-test that the code can read the data…
+ configJSON, err := os.ReadFile("fixtures/oci1-config.json")
+ require.NoError(t, err)
+ expectedConfig := imgspecv1.Image{}
+ err = json.Unmarshal(configJSON, &expectedConfig)
+ require.NoError(t, err)
+
+ originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd:latest")
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, originalSrc, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(configJSON),
+ } {
+ config, err := m.OCIConfig(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, &expectedConfig, config)
+ }
+
+ // “Any extra fields in the Image JSON struct are considered implementation specific
+ // and MUST NOT generate an error by any implementations which are unable to interpret them.”
+ // oci1-config-extra-fields.json is the same as oci1-config.json, apart from a few added fields.
+ srcWithExtraFields := newOCI1ImageSource(t, "oci1-config-extra-fields.json", "httpd:latest")
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, srcWithExtraFields, "oci1-extra-config-fields.json"),
+ manifestOCI1FromComponentsWithExtraConfigFields(t, srcWithExtraFields),
+ } {
+ config, err := m.OCIConfig(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, &expectedConfig, config)
+ }
+
+ // This can share originalSrc because the config digest is the same between oci1-artifact.json and oci1.json
+ artifact := manifestOCI1FromFixture(t, originalSrc, "oci1-artifact.json")
+ _, err = artifact.OCIConfig(context.Background())
+ var expected manifest.NonImageArtifactError
+ assert.ErrorAs(t, err, &expected)
+}
+
+func TestManifestOCI1LayerInfo(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ URLs: []string{
+ "https://layer.url",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: map[string]string{
+ "test-annotation-2": "two",
+ },
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ },
+ }, m.LayerInfos())
+ }
+}
+
+func TestManifestOCI1EmbeddedDockerReferenceConflicts(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} {
+ ref, err := reference.ParseNormalizedNamed(name)
+ require.NoError(t, err)
+ conflicts := m.EmbeddedDockerReferenceConflicts(ref)
+ assert.False(t, conflicts)
+ }
+ }
+}
+
+func TestManifestOCI1Inspect(t *testing.T) {
+ var emptyAnnotations map[string]string
+ created := time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC)
+
+ configJSON, err := os.ReadFile("fixtures/oci1-config.json")
+ require.NoError(t, err)
+ for _, m := range []genericManifest{
+ manifestOCI1FromComponentsLikeFixture(configJSON),
+ // “Any extra fields in the Image JSON struct are considered implementation specific
+ // and MUST NOT generate an error by any implementations which are unable to interpret them.”
+ // oci1-config-extra-fields.json is the same as oci1-config.json, apart from a few added fields.
+ manifestOCI1FromComponentsWithExtraConfigFields(t, nil),
+ } {
+ ii, err := m.Inspect(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, types.ImageInspectInfo{
+ Tag: "",
+ Created: &created,
+ DockerVersion: "1.12.1",
+ Labels: map[string]string{},
+ Architecture: "amd64",
+ Os: "linux",
+ Layers: []string{
+ "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ },
+ LayersData: []types.ImageInspectLayer{{
+ MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ Annotations: emptyAnnotations,
+ }, {
+ MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ Annotations: map[string]string{"test-annotation-2": "two"},
+ }, {
+ MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ Annotations: emptyAnnotations,
+ },
+ },
+ Author: "",
+ Env: []string{
+ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HTTPD_PREFIX=/usr/local/apache2",
+ "HTTPD_VERSION=2.4.23",
+ "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f",
+ "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2",
+ "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc",
+ },
+ }, *ii)
+ }
+
+ // nil configBlob will trigger an error in m.ConfigBlob()
+ m := manifestOCI1FromComponentsLikeFixture(nil)
+ _, err = m.Inspect(context.Background())
+ assert.Error(t, err)
+
+ m = manifestOCI1FromComponentsLikeFixture([]byte("invalid JSON"))
+ _, err = m.Inspect(context.Background())
+ assert.Error(t, err)
+}
+
+func TestManifestOCI1UpdatedImageNeedsLayerDiffIDs(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ }))
+ }
+}
+
+// oci1ImageSource is plausible enough for schema conversions in manifestOCI1.UpdatedImage() to work.
+type oci1ImageSource struct {
+ configBlobImageSource
+ ref reference.Named
+}
+
+func (OCIis *oci1ImageSource) Reference() types.ImageReference {
+ return refImageReferenceMock{ref: OCIis.ref}
+}
+
+func newOCI1ImageSource(t *testing.T, configFixture string, dockerRef string) *oci1ImageSource {
+ realConfigJSON, err := os.ReadFile(filepath.Join("fixtures", configFixture))
+ require.NoError(t, err)
+
+ ref, err := reference.ParseNormalizedNamed(dockerRef)
+ require.NoError(t, err)
+
+ return &oci1ImageSource{
+ configBlobImageSource: configBlobImageSource{
+ expectedDigest: digest.FromBytes(realConfigJSON),
+ f: func() (io.ReadCloser, int64, error) {
+ return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
+ },
+ },
+ ref: ref,
+ }
+}
+
+func TestManifestOCI1UpdatedImage(t *testing.T) {
+ originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd:latest")
+ original := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+
+ // LayerInfos:
+ layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0])
+ res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfos,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, layerInfos, res.LayerInfos())
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: append(layerInfos, layerInfos[0]),
+ })
+ assert.Error(t, err)
+
+ // EmbeddedDockerReference:
+ // … is ignored
+ embeddedRef, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ EmbeddedDockerReference: embeddedRef,
+ })
+ require.NoError(t, err)
+ nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest")
+ require.NoError(t, err)
+ conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef)
+ assert.False(t, conflicts)
+
+ // ManifestMIMEType:
+ // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.)
+ for _, mime := range []string{
+ manifest.DockerV2Schema2MediaType,
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: &memoryImageDest{ref: originalSrc.ref},
+ },
+ })
+ assert.NoError(t, err, mime)
+ }
+ for _, mime := range []string{
+ imgspecv1.MediaTypeImageManifest, // This indicates a confused caller, not a no-op.
+ "this is invalid",
+ } {
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: mime,
+ })
+ assert.Error(t, err, mime)
+ }
+
+ // original hasn’t been changed:
+ m2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+ typedOriginal, ok := original.(*manifestOCI1)
+ require.True(t, ok)
+ typedM2, ok := m2.(*manifestOCI1)
+ require.True(t, ok)
+ assert.Equal(t, *typedM2, *typedOriginal)
+}
+
+// successfulOCI1Conversion verifies that an edit of original with edits suceeeds, and and original continues to match originalClone.
+// It returns the resulting image, for more checks
+func successfulOCI1Conversion(t *testing.T, original genericManifest, originalClone genericManifest,
+ edits types.ManifestUpdateOptions) types.Image {
+ res, err := original.UpdatedImage(context.Background(), edits)
+ require.NoError(t, err)
+
+ // original = the source Image implementation hasn’t been changed by the edits
+ typedOriginal, ok := original.(*manifestOCI1)
+ require.True(t, ok)
+ typedOriginalClone, ok := originalClone.(*manifestOCI1)
+ require.True(t, ok)
+ assert.Equal(t, *typedOriginalClone, *typedOriginal)
+
+ return res
+}
+
+func TestManifestOCI1ConvertToManifestSchema1(t *testing.T) {
+ originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest")
+ original := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+ original2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+ memoryDest := &memoryImageDest{ref: originalSrc.ref}
+ res := successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+ assertJSONEqualsFixture(t, convertedJSON, "oci1-to-schema1.json", "signatures")
+
+ assert.Equal(t, GzippedEmptyLayer, memoryDest.storedBlobs[GzippedEmptyLayerDigest])
+
+ // Conversion to schema1 together with changing LayerInfos works as expected (which requires
+ // handling schema1 empty layers):
+ updatedLayers, updatedLayersCopy := modifiedLayerInfos(t, original.LayerInfos())
+ res = successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+ // Layers have been updated as expected
+ s1Manifest, err := manifestSchema1FromManifest(convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5ba", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680d", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a8", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25908", Size: -1},
+ {Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fb", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ }, s1Manifest.LayerInfos())
+
+ // This can share originalSrc because the config digest is the same between oci1-artifact.json and oci1.json
+ artifact := manifestOCI1FromFixture(t, originalSrc, "oci1-artifact.json")
+ _, err = artifact.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ var expected manifest.NonImageArtifactError
+ assert.ErrorAs(t, err, &expected)
+
+ // Conversion of an encrypted image fails
+ encrypted := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json")
+ encrypted2 := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json")
+ _, err = encrypted.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Error(t, err)
+
+ // Conversion to schema1 with encryption fails
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt),
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Error(t, err)
+
+ // Conversion to schema1 with simultaneous decryption is possible
+ updatedLayers = layerInfosWithCryptoOperation(encrypted.LayerInfos(), types.Decrypt)
+ updatedLayersCopy = slices.Clone(updatedLayers)
+ res = successfulOCI1Conversion(t, encrypted, encrypted2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+ // Layers have been updated as expected
+ s1Manifest, err = manifestSchema1FromManifest(convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", Size: -1},
+ {Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ }, s1Manifest.LayerInfos())
+
+ // Conversion to schema1 of an image with Zstd layers fails
+ mixedSrc := newOCI1ImageSource(t, "oci1-all-media-types-config.json", "httpd-copy:latest")
+ mixedImage := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json")
+ mixedImage2 := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json")
+ _, err = mixedImage.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Error(t, err) // zstd compression is not supported for docker images
+
+ // Conversion to schema1 of an image with Zstd layers, while editing layers to be uncompressed, or gzip-compressed, is possible.
+ for _, c := range []struct {
+ op types.LayerCompression
+ algo *compressiontypes.Algorithm
+ }{
+ {types.Decompress, nil},
+ {types.PreserveOriginal, &compression.Gzip},
+ } {
+ updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), c.op, c.algo)
+ updatedLayersCopy = slices.Clone(updatedLayers)
+ res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
+ InformationOnly: types.ManifestUpdateInformation{
+ Destination: memoryDest,
+ },
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
+ s1Manifest, err = manifestSchema1FromManifest(convertedJSON)
+ require.NoError(t, err)
+ // The schema1 data does not contain a MIME type (and we don’t update the digests), so both loop iterations look the same here
+ assert.Equal(t, []types.BlobInfo{
+ {Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: -1},
+ {Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: GzippedEmptyLayerDigest, Size: -1},
+ {Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: -1},
+ }, s1Manifest.LayerInfos())
+ }
+
+ // FIXME? Test also the other failure cases, if only to see that we don't crash?
+}
+
+func TestConvertToManifestSchema2(t *testing.T) {
+ originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest")
+ original := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+ original2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
+ res := successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+
+ convertedJSON, mt, err := res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ assertJSONEqualsFixture(t, convertedJSON, "oci1-to-schema2.json")
+
+ convertedConfig, err := res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "oci1-to-schema2-config.json")
+
+ // This can share originalSrc because the config digest is the same between oci1-artifact.json and oci1.json
+ artifact := manifestOCI1FromFixture(t, originalSrc, "oci1-artifact.json")
+ _, err = artifact.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ var expected manifest.NonImageArtifactError
+ assert.ErrorAs(t, err, &expected)
+
+ // Conversion of an encrypted image fails
+ encrypted := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json")
+ encrypted2 := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json")
+ _, err = encrypted.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Error(t, err)
+
+ // Conversion to schema2 with encryption fails
+ _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt),
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Error(t, err)
+
+ // Conversion to schema2 with simultaneous decryption is possible
+ updatedLayers := layerInfosWithCryptoOperation(encrypted.LayerInfos(), types.Decrypt)
+ updatedLayersCopy := slices.Clone(updatedLayers)
+ res = successfulOCI1Conversion(t, encrypted, encrypted2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ s2Manifest, err := manifestSchema2FromManifest(originalSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 51354364,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ Size: 150,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ Size: 11739507,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ URLs: []string{"https://layer.url"},
+ },
+ {
+ Digest: "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ Size: 8841833,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
+ Size: 291,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ }, s2Manifest.LayerInfos())
+ convertedConfig, err = res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "oci1-to-schema2-config.json")
+
+ // Conversion to schema2 of an image with Zstd layers fails
+ mixedSrc := newOCI1ImageSource(t, "oci1-all-media-types-config.json", "httpd-copy:latest")
+ mixedImage := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json")
+ mixedImage2 := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json")
+ _, err = mixedImage.UpdatedImage(context.Background(), types.ManifestUpdateOptions{
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Error(t, err) // zstd compression is not supported for docker images
+
+ // Conversion to schema2 of an image with Zstd layers, while editing layers to be uncompressed, is possible.
+ updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), types.Decompress, nil)
+ updatedLayersCopy = slices.Clone(updatedLayers)
+ res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ s2Manifest, err = manifestSchema2FromManifest(mixedSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar",
+ },
+ {
+ Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 152,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ },
+ }, s2Manifest.LayerInfos())
+ convertedConfig, err = res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "oci1-all-media-types-to-schema2-config.json")
+
+ // Conversion to schema2 of an image with Zstd layers, while editing layers to be gzip-compressed, is possible.
+ updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), types.PreserveOriginal, &compression.Gzip)
+ updatedLayersCopy = slices.Clone(updatedLayers)
+ res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{
+ LayerInfos: updatedLayers,
+ ManifestMIMEType: manifest.DockerV2Schema2MediaType,
+ })
+ assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place
+ convertedJSON, mt, err = res.Manifest(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
+ s2Manifest, err = manifestSchema2FromManifest(mixedSrc, convertedJSON)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {
+ Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
+ Size: 51354364,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 150,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
+ Size: 152,
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
+ Size: 11739507,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ },
+ {
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ },
+ }, s2Manifest.LayerInfos())
+ convertedConfig, err = res.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ assertJSONEqualsFixture(t, convertedConfig, "oci1-all-media-types-to-schema2-config.json")
+
+ // FIXME? Test also the other failure cases, if only to see that we don't crash?
+}
+
+func TestConvertToV2S2WithInvalidMIMEType(t *testing.T) {
+ originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest")
+ manifest, err := os.ReadFile(filepath.Join("fixtures", "oci1-invalid-media-type.json"))
+ require.NoError(t, err)
+
+ _, err = manifestOCI1FromManifest(originalSrc, manifest)
+ require.NoError(t, err)
+}
+
+func TestManifestOCI1CanChangeLayerCompression(t *testing.T) {
+ for _, m := range []genericManifest{
+ manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1.json"),
+ manifestOCI1FromComponentsLikeFixture(nil),
+ } {
+ assert.True(t, m.CanChangeLayerCompression(imgspecv1.MediaTypeImageLayerGzip))
+ // Some projects like to use squashfs and other unspecified formats for layers; don’t touch those.
+ assert.False(t, m.CanChangeLayerCompression("a completely unknown and quite possibly invalid MIME type"))
+ }
+
+ artifact := manifestOCI1FromFixture(t, mocks.ForbiddenImageSource{}, "oci1-artifact.json")
+ assert.False(t, artifact.CanChangeLayerCompression(imgspecv1.MediaTypeImageLayerGzip))
+}
diff --git a/internal/image/sourced.go b/internal/image/sourced.go
new file mode 100644
index 0000000..661891a
--- /dev/null
+++ b/internal/image/sourced.go
@@ -0,0 +1,134 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+)
+
+// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
+// If reference points to a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
+ src, err := ref.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+ img, err := FromSource(ctx, sys, src)
+ if err != nil {
+ src.Close()
+ return nil, err
+ }
+ return img, nil
+}
+
+// imageCloser implements types.ImageCloser, perhaps allowing simple users
+// to use a single object without having keep a reference to a types.ImageSource
+// only to call types.ImageSource.Close().
+type imageCloser struct {
+ types.Image
+ src types.ImageSource
+}
+
+// FromSource returns a types.ImageCloser implementation for the default instance of source.
+// If source is a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate image instance.
+//
+// The caller must call .Close() on the returned ImageCloser.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+//
+// Most callers can use either FromUnparsedImage or FromReference instead.
+//
+// This is publicly visible as c/image/image.FromSource.
+func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
+ img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
+ if err != nil {
+ return nil, err
+ }
+ return &imageCloser{
+ Image: img,
+ src: src,
+ }, nil
+}
+
+func (ic *imageCloser) Close() error {
+ return ic.src.Close()
+}
+
+// SourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying transport (i.e. ImageSource-independent).
+// Note the existence of docker.Image and image.memoryImage: various instances
+// of a types.Image may not be a SourcedImage directly.
+//
+// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
+//
+// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
+type SourcedImage struct {
+ *UnparsedImage
+ ManifestBlob []byte // The manifest of the relevant instance
+ ManifestMIMEType string // MIME type of ManifestBlob
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
+// but other methods transparently return data from an appropriate single image.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.FromUnparsedImage.
+func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SourcedImage{
+ UnparsedImage: unparsed,
+ ManifestBlob: manifestBlob,
+ ManifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *SourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ return i.ManifestBlob, i.ManifestMIMEType, nil
+}
+
+func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
+}
diff --git a/internal/image/unparsed.go b/internal/image/unparsed.go
new file mode 100644
index 0000000..0f02650
--- /dev/null
+++ b/internal/image/unparsed.go
@@ -0,0 +1,119 @@
+package image
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// This is publicly visible as c/image/image.UnparsedImage.
+type UnparsedImage struct {
+ src private.ImageSource
+ instanceDigest *digest.Digest
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+//
+// This is publicly visible as c/image/image.UnparsedInstance.
+func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
+ return &UnparsedImage{
+ src: imagesource.FromPublic(src),
+ instanceDigest: instanceDigest,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
+ return i.src.Reference()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", fmt.Errorf("computing manifest digest: %w", err)
+ }
+ if !matches {
+ return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
+// The bool return value seems redundant with digest != ""; it is used explicitly
+// to refuse (unexpected) situations when the digest exists but is "".
+func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
+ if i.instanceDigest != nil {
+ return *i.instanceDigest, true
+ }
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ return canonical.Digest(), true
+ }
+ }
+ return "", false
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper,
+ // but this is very likely to be the only implementation ever.
+ sigs, err := i.UntrustedSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
diff --git a/internal/imagedestination/impl/compat.go b/internal/imagedestination/impl/compat.go
new file mode 100644
index 0000000..47c169a
--- /dev/null
+++ b/internal/imagedestination/impl/compat.go
@@ -0,0 +1,101 @@
+package impl
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+// See AddCompat below.
+type Compat struct {
+ dest private.ImageDestinationInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination
+// for implementations of private.ImageDestination.
+//
+// Use it like this:
+//
+// type yourDestination struct {
+// impl.Compat
+// …
+// }
+//
+// dest := &yourDestination{…}
+// dest.Compat = impl.AddCompat(dest)
+func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
+ return Compat{dest}
+}
+
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ IsConfig: isConfig,
+ })
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ return types.BlobInfo{
+ Digest: res.Digest,
+ Size: res.Size,
+ }, nil
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
+ Cache: blobinfocache.FromBlobInfoCache(cache),
+ CanSubstitute: canSubstitute,
+ })
+ if !reused || err != nil {
+ return reused, types.BlobInfo{}, err
+ }
+ res := types.BlobInfo{
+ Digest: blob.Digest,
+ Size: blob.Size,
+ CompressionOperation: blob.CompressionOperation,
+ CompressionAlgorithm: blob.CompressionAlgorithm,
+ }
+ // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
+ // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
+ // provide the value in cases where it is likely to be correct.
+ if blob.Digest == info.Digest {
+ res.MediaType = info.MediaType
+ }
+ return true, res, nil
+}
+
+// PutSignatures writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
+ withFormat := []signature.Signature{}
+ for _, sig := range signatures {
+ withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig))
+ }
+ return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
+}
diff --git a/internal/imagedestination/impl/helpers.go b/internal/imagedestination/impl/helpers.go
new file mode 100644
index 0000000..5d28b3e
--- /dev/null
+++ b/internal/imagedestination/impl/helpers.go
@@ -0,0 +1,25 @@
+package impl
+
+import (
+ "github.com/containers/image/v5/internal/private"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+)
+
+// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
+// then function performs a match against the compression requested by the caller and compression of existing blob
+// (which can be nil to represent uncompressed or unknown)
+func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
+ if options.RequiredCompression == nil {
+ return true // no requirement imposed
+ }
+ if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
+ // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
+ // The caller must re-compress to build those annotations.
+ return false
+ }
+ return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
+}
+
+func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
+ return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
+}
diff --git a/internal/imagedestination/impl/helpers_test.go b/internal/imagedestination/impl/helpers_test.go
new file mode 100644
index 0000000..8a80d1d
--- /dev/null
+++ b/internal/imagedestination/impl/helpers_test.go
@@ -0,0 +1,29 @@
+package impl
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/compression"
+ compressionTypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBlobMatchesRequiredCompression(t *testing.T) {
+ var opts private.TryReusingBlobOptions
+ cases := []struct {
+ requiredCompression *compressionTypes.Algorithm
+ candidateCompression *compressionTypes.Algorithm
+ result bool
+ }{
+ {&compression.Zstd, &compression.Zstd, true},
+ {&compression.Gzip, &compression.Zstd, false},
+ {&compression.Zstd, nil, false},
+ {nil, &compression.Zstd, true},
+ }
+
+ for _, c := range cases {
+ opts = private.TryReusingBlobOptions{RequiredCompression: c.requiredCompression}
+ assert.Equal(t, c.result, BlobMatchesRequiredCompression(opts, c.candidateCompression))
+ }
+}
diff --git a/internal/imagedestination/impl/properties.go b/internal/imagedestination/impl/properties.go
new file mode 100644
index 0000000..704812e
--- /dev/null
+++ b/internal/imagedestination/impl/properties.go
@@ -0,0 +1,72 @@
+package impl
+
+import "github.com/containers/image/v5/types"
+
+// Properties collects properties of an ImageDestination that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // SupportedManifestMIMETypes tells which manifest MIME types the destination supports.
+ // A empty slice or nil means any MIME type can be tried to upload.
+ SupportedManifestMIMETypes []string
+ // DesiredLayerCompression indicates the kind of compression to apply on layers
+ DesiredLayerCompression types.LayerCompression
+ // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs bool
+ // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS.
+ MustMatchRuntimeOS bool
+ // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+ // and would prefer to receive an unmodified manifest instead of one modified for the destination.
+ // Does not make a difference if Reference().DockerReference() is nil.
+ IgnoresEmbeddedDockerReference bool
+ // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently.
+ HasThreadSafePutBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string {
+ return o.vals.SupportedManifestMIMETypes
+}
+
+// DesiredLayerCompression indicates the kind of compression to apply on layers
+func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression {
+ return o.vals.DesiredLayerCompression
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool {
+ return o.vals.AcceptsForeignLayerURLs
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool {
+ return o.vals.MustMatchRuntimeOS
+}
+
+// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool {
+ return o.vals.IgnoresEmbeddedDockerReference
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool {
+ return o.vals.HasThreadSafePutBlob
+}
diff --git a/internal/imagedestination/stubs/put_blob_partial.go b/internal/imagedestination/stubs/put_blob_partial.go
new file mode 100644
index 0000000..0dc6bd5
--- /dev/null
+++ b/internal/imagedestination/stubs/put_blob_partial.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoPutBlobPartialInitialize implements parts of private.ImageDestination
+// for transports that don’t support PutBlobPartial().
+// See NoPutBlobPartial() below.
+type NoPutBlobPartialInitialize struct {
+ transportName string
+}
+
+// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref.
+func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialRaw(ref.Transport().Name())
+}
+
+// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used
+// in situations where no ImageReference is available.
+func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize {
+ return NoPutBlobPartialInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
+ return false
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
+ return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
+type ImplementsPutBlobPartial struct{}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool {
+ return true
+}
diff --git a/internal/imagedestination/stubs/signatures.go b/internal/imagedestination/stubs/signatures.go
new file mode 100644
index 0000000..7015fd0
--- /dev/null
+++ b/internal/imagedestination/stubs/signatures.go
@@ -0,0 +1,50 @@
+package stubs
+
+import (
+ "context"
+ "errors"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignaturesInitialize implements parts of private.ImageDestination
+// for transports that don’t support storing signatures.
+// See NoSignatures() below.
+type NoSignaturesInitialize struct {
+ message string
+}
+
+// NoSignatures creates a NoSignaturesInitialize, failing with message.
+func NoSignatures(message string) NoSignaturesInitialize {
+ return NoSignaturesInitialize{
+ message: message,
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error {
+ return errors.New(stub.message)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if len(signatures) != 0 {
+ return errors.New(stub.message)
+ }
+ return nil
+}
+
+// SupportsSignatures implements SupportsSignatures() that returns nil.
+// Note that it might be even more useful to return a value dynamically detected based on
+type AlwaysSupportsSignatures struct{}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error {
+ return nil
+}
diff --git a/internal/imagedestination/stubs/stubs.go b/internal/imagedestination/stubs/stubs.go
new file mode 100644
index 0000000..ab23340
--- /dev/null
+++ b/internal/imagedestination/stubs/stubs.go
@@ -0,0 +1,27 @@
+// Package stubs contains trivial stubs for parts of private.ImageDestination.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagedestination/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
+// implementation:
+//
+// type yourDestination struct {
+// stubs.ImplementsPutBlobPartial
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
+// means that a constructor must be called:
+//
+// type yourDestination struct {
+// stubs.NoPutBlobPartialInitialize
+// …
+// }
+//
+// dest := &yourDestination{
+// …
+// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+// }
+package stubs
diff --git a/internal/imagedestination/wrapper.go b/internal/imagedestination/wrapper.go
new file mode 100644
index 0000000..17e1870
--- /dev/null
+++ b/internal/imagedestination/wrapper.go
@@ -0,0 +1,96 @@
+package imagedestination
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageDestination operations
+// for a destination that only implements types.ImageDestination
+type wrapped struct {
+ stubs.NoPutBlobPartialInitialize
+
+ types.ImageDestination
+}
+
+// FromPublic(dest) returns an object that provides the private.ImageDestination API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageDestination, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageDestination.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(dest types.ImageDestination) private.ImageDestination {
+ if dest2, ok := dest.(private.ImageDestination); ok {
+ return dest2
+ }
+ return &wrapped{
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()),
+
+ ImageDestination: dest,
+ }
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ return private.UploadedBlob{
+ Digest: res.Digest,
+ Size: res.Size,
+ }, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if options.RequiredCompression != nil {
+ return false, private.ReusedBlob{}, nil
+ }
+ reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
+ if !reused || err != nil {
+ return reused, private.ReusedBlob{}, err
+ }
+ return true, private.ReusedBlob{
+ Digest: blob.Digest,
+ Size: blob.Size,
+ CompressionOperation: blob.CompressionOperation,
+ CompressionAlgorithm: blob.CompressionAlgorithm,
+ }, nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ simpleSigs := [][]byte{}
+ for _, sig := range signatures {
+ simpleSig, ok := sig.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(sig)
+ }
+ simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature())
+ }
+ return w.PutSignatures(ctx, simpleSigs, instanceDigest)
+}
diff --git a/internal/imagesource/impl/compat.go b/internal/imagesource/impl/compat.go
new file mode 100644
index 0000000..7d859c3
--- /dev/null
+++ b/internal/imagesource/impl/compat.go
@@ -0,0 +1,55 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// Compat implements the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+// See AddCompat below.
+type Compat struct {
+ src private.ImageSourceInternalOnly
+}
+
+// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource
+// for implementations of private.ImageSource.
+//
+// Use it like this:
+//
+// type yourSource struct {
+// impl.Compat
+// …
+// }
+//
+// src := &yourSource{…}
+// src.Compat = impl.AddCompat(src)
+func AddCompat(src private.ImageSourceInternalOnly) Compat {
+ return Compat{src}
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ // Silently ignore signatures with other formats; the caller can’t handle them.
+ // Admittedly callers that want to sync all of the image might want to fail instead; this
+ // way an upgrade of c/image neither breaks them nor adds new functionality.
+ // Alternatively, we could possibly define the old GetSignatures to use the multi-format
+ // signature.Blob representation now, in general, but that could silently break them as well.
+ sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ simpleSigs := [][]byte{}
+ for _, sig := range sigs {
+ if sig, ok := sig.(signature.SimpleSigning); ok {
+ simpleSigs = append(simpleSigs, sig.UntrustedSignature())
+ }
+ }
+ return simpleSigs, nil
+}
diff --git a/internal/imagesource/impl/layer_infos.go b/internal/imagesource/impl/layer_infos.go
new file mode 100644
index 0000000..d5eae63
--- /dev/null
+++ b/internal/imagesource/impl/layer_infos.go
@@ -0,0 +1,23 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing.
+type DoesNotAffectLayerInfosForCopy struct{}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/internal/imagesource/impl/properties.go b/internal/imagesource/impl/properties.go
new file mode 100644
index 0000000..73e8c78
--- /dev/null
+++ b/internal/imagesource/impl/properties.go
@@ -0,0 +1,27 @@
+package impl
+
+// Properties collects properties of an ImageSource that are constant throughout its lifetime
+// (but might differ across instances).
+type Properties struct {
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob bool
+}
+
+// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties.
+type PropertyMethodsInitialize struct {
+ // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name.
+
+ vals Properties
+}
+
+// PropertyMethods creates an PropertyMethodsInitialize for vals.
+func PropertyMethods(vals Properties) PropertyMethodsInitialize {
+ return PropertyMethodsInitialize{
+ vals: vals,
+ }
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool {
+ return o.vals.HasThreadSafeGetBlob
+}
diff --git a/internal/imagesource/impl/signatures.go b/internal/imagesource/impl/signatures.go
new file mode 100644
index 0000000..b3a8c7e
--- /dev/null
+++ b/internal/imagesource/impl/signatures.go
@@ -0,0 +1,19 @@
+package impl
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/opencontainers/go-digest"
+)
+
+// NoSignatures implements GetSignatures() that returns nothing.
+type NoSignatures struct{}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return nil, nil
+}
diff --git a/internal/imagesource/stubs/get_blob_at.go b/internal/imagesource/stubs/get_blob_at.go
new file mode 100644
index 0000000..15aee6d
--- /dev/null
+++ b/internal/imagesource/stubs/get_blob_at.go
@@ -0,0 +1,52 @@
+package stubs
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// NoGetBlobAtInitialize implements parts of private.ImageSource
+// for transports that don’t support GetBlobAt().
+// See NoGetBlobAt() below.
+type NoGetBlobAtInitialize struct {
+ transportName string
+}
+
+// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref.
+func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize {
+ return NoGetBlobAtRaw(ref.Transport().Name())
+}
+
+// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used
+// in situations where no ImageReference is available.
+func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize {
+ return NoGetBlobAtInitialize{
+ transportName: transportName,
+ }
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
+ return false
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
+}
+
+// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true.
+type ImplementsGetBlobAt struct{}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool {
+ return true
+}
diff --git a/internal/imagesource/stubs/stubs.go b/internal/imagesource/stubs/stubs.go
new file mode 100644
index 0000000..cb34539
--- /dev/null
+++ b/internal/imagesource/stubs/stubs.go
@@ -0,0 +1,28 @@
+// Package stubs contains trivial stubs for parts of private.ImageSource.
+// It can be used from internal/wrapper, so it should not drag in any extra dependencies.
+// Compare with imagesource/impl, which might require non-trivial implementation work.
+//
+// There are two kinds of stubs:
+//
+// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
+//
+// implementation:
+//
+// type yourSource struct {
+// stubs.ImplementsGetBlobAt
+// …
+// }
+//
+// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
+// means that a constructor must be called:
+//
+// type yourSource struct {
+// stubs.NoGetBlobAtInitialize
+// …
+// }
+//
+// dest := &yourSource{
+// …
+// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+// }
+package stubs
diff --git a/internal/imagesource/wrapper.go b/internal/imagesource/wrapper.go
new file mode 100644
index 0000000..886b4e8
--- /dev/null
+++ b/internal/imagesource/wrapper.go
@@ -0,0 +1,56 @@
+package imagesource
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// wrapped provides the private.ImageSource operations
+// for a source that only implements types.ImageSource
+type wrapped struct {
+ stubs.NoGetBlobAtInitialize
+
+ types.ImageSource
+}
+
+// FromPublic(src) returns an object that provides the private.ImageSource API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageSource, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageSource.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(src types.ImageSource) private.ImageSource {
+ if src2, ok := src.(private.ImageSource); ok {
+ return src2
+ }
+ return &wrapped{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()),
+
+ ImageSource: src,
+ }
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ sigs, err := w.GetSignatures(ctx, instanceDigest)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/internal/iolimits/iolimits.go b/internal/iolimits/iolimits.go
new file mode 100644
index 0000000..f17d002
--- /dev/null
+++ b/internal/iolimits/iolimits.go
@@ -0,0 +1,58 @@
+package iolimits
+
+import (
+ "fmt"
+ "io"
+)
+
+// All constants below are intended to be used as limits for `ReadAtMost`. The
+// immediate use-case for limiting the size of in-memory copied data is to
+// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of
+// copying data until running out of memory, we error out after hitting the
+// specified limit.
+const (
+ // megaByte denotes one megabyte and is intended to be used as a limit in
+ // `ReadAtMost`.
+ megaByte = 1 << 20
+ // MaxManifestBodySize is the maximum allowed size of a manifest. The limit
+ // of 4 MB aligns with the one of a Docker registry:
+ // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30
+ MaxManifestBodySize = 4 * megaByte
+ // MaxAuthTokenBodySize is the maximum allowed size of an auth token.
+ // The limit of 1 MB is considered to be greatly sufficient.
+ MaxAuthTokenBodySize = megaByte
+ // MaxSignatureListBodySize is the maximum allowed size of a signature list.
+ // The limit of 4 MB is considered to be greatly sufficient.
+ MaxSignatureListBodySize = 4 * megaByte
+ // MaxSignatureBodySize is the maximum allowed size of a signature.
+ // The limit of 4 MB is considered to be greatly sufficient.
+ MaxSignatureBodySize = 4 * megaByte
+ // MaxErrorBodySize is the maximum allowed size of an error-response body.
+ // The limit of 1 MB is considered to be greatly sufficient.
+ MaxErrorBodySize = megaByte
+ // MaxConfigBodySize is the maximum allowed size of a config blob.
+ // The limit of 4 MB is considered to be greatly sufficient.
+ MaxConfigBodySize = 4 * megaByte
+ // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body.
+ // The limit of 4 MB is considered to be greatly sufficient.
+ MaxOpenShiftStatusBody = 4 * megaByte
+ // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images)
+ // The limit of 1 MB is considered to be greatly sufficient.
+ MaxTarFileManifestSize = megaByte
+)
+
+// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded.
+func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
+ limitedReader := io.LimitReader(reader, int64(limit+1))
+
+ res, err := io.ReadAll(limitedReader)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(res) > limit {
+ return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit)
+ }
+
+ return res, nil
+}
diff --git a/internal/iolimits/iolimits_test.go b/internal/iolimits/iolimits_test.go
new file mode 100644
index 0000000..630b38c
--- /dev/null
+++ b/internal/iolimits/iolimits_test.go
@@ -0,0 +1,37 @@
+package iolimits
+
+import (
+ "bytes"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadAtMost(t *testing.T) {
+ rng := rand.New(rand.NewSource(0))
+ for _, c := range []struct {
+ input, limit int
+ shouldSucceed bool
+ }{
+ {0, 0, true},
+ {0, 1, true},
+ {1, 0, false},
+ {1, 1, true},
+ {bytes.MinRead*5 - 1, bytes.MinRead * 5, true},
+ {bytes.MinRead * 5, bytes.MinRead * 5, true},
+ {bytes.MinRead*5 + 1, bytes.MinRead * 5, false},
+ } {
+ input := make([]byte, c.input)
+ _, err := rng.Read(input)
+ require.NoError(t, err)
+ result, err := ReadAtMost(bytes.NewReader(input), c.limit)
+ if c.shouldSucceed {
+ assert.NoError(t, err)
+ assert.Equal(t, result, input)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+}
diff --git a/internal/manifest/common.go b/internal/manifest/common.go
new file mode 100644
index 0000000..1f2ccb5
--- /dev/null
+++ b/internal/manifest/common.go
@@ -0,0 +1,72 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat
+// can expect to be present.
+type AllowedManifestFields int
+
+const (
+ AllowedFieldConfig AllowedManifestFields = 1 << iota
+ AllowedFieldFSLayers
+ AllowedFieldHistory
+ AllowedFieldLayers
+ AllowedFieldManifests
+ AllowedFieldFirstUnusedBit // Keep this at the end!
+)
+
+// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
+// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
+// other than the ones the caller specifically allows.
+// expectedMIMEType is used only for diagnostics.
+// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
+// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
+// data that just isn’t what was expected, as opposed to actually ambiguous data.
+func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
+ allowed AllowedManifestFields) error {
+ if allowed >= AllowedFieldFirstUnusedBit {
+ return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
+ }
+ // Use a private type to decode, not just a map[string]any, because we want
+ // to also reject case-insensitive matches (which would be used by Go when really decoding
+ // the manifest).
+ // (It is expected that as manifest formats are added or extended over time, more fields will be added
+ // here.)
+ detectedFields := struct {
+ Config any `json:"config"`
+ FSLayers any `json:"fsLayers"`
+ History any `json:"history"`
+ Layers any `json:"layers"`
+ Manifests any `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &detectedFields); err != nil {
+ // The caller was supposed to already validate version numbers, so this should not happen;
+ // let’s not bother with making this error “nice”.
+ return err
+ }
+ unexpected := []string{}
+ // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
+ if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
+ unexpected = append(unexpected, "config")
+ }
+ if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
+ unexpected = append(unexpected, "fsLayers")
+ }
+ if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
+ unexpected = append(unexpected, "history")
+ }
+ if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
+ unexpected = append(unexpected, "layers")
+ }
+ if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
+ unexpected = append(unexpected, "manifests")
+ }
+ if len(unexpected) != 0 {
+ return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
+ unexpected, expectedMIMEType)
+ }
+ return nil
+}
diff --git a/internal/manifest/common_test.go b/internal/manifest/common_test.go
new file mode 100644
index 0000000..553cc36
--- /dev/null
+++ b/internal/manifest/common_test.go
@@ -0,0 +1,91 @@
+package manifest
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateUnambiguousManifestFormat(t *testing.T) {
+ const allAllowedFields = AllowedFieldFirstUnusedBit - 1
+ const mt = "text/plain" // Just some MIME type that shows up in error messages
+
+ type test struct {
+ manifest string
+ allowed AllowedManifestFields
+ }
+
+ // Smoke tests: Success
+ for _, c := range []test{
+ {"{}", allAllowedFields},
+ {"{}", 0},
+ } {
+ err := ValidateUnambiguousManifestFormat([]byte(c.manifest), mt, c.allowed)
+ assert.NoError(t, err, c)
+ }
+ // Smoke tests: Failure
+ for _, c := range []test{
+ {"{}", AllowedFieldFirstUnusedBit}, // Invalid "allowed"
+ {"@", allAllowedFields}, // Invalid JSON
+ } {
+ err := ValidateUnambiguousManifestFormat([]byte(c.manifest), mt, c.allowed)
+ assert.Error(t, err, c)
+ }
+
+ fields := map[AllowedManifestFields]string{
+ AllowedFieldConfig: "config",
+ AllowedFieldFSLayers: "fsLayers",
+ AllowedFieldHistory: "history",
+ AllowedFieldLayers: "layers",
+ AllowedFieldManifests: "manifests",
+ }
+ // Ensure this test covers all defined AllowedManifestFields values
+ allFields := AllowedManifestFields(0)
+ for k := range fields {
+ allFields |= k
+ }
+ assert.Equal(t, allAllowedFields, allFields)
+
+ // Every single field is allowed by its bit, and rejected by any other bit
+ for bit, fieldName := range fields {
+ json := []byte(fmt.Sprintf(`{"%s":[]}`, fieldName))
+ err := ValidateUnambiguousManifestFormat(json, mt, bit)
+ assert.NoError(t, err, fieldName)
+ err = ValidateUnambiguousManifestFormat(json, mt, allAllowedFields^bit)
+ assert.Error(t, err, fieldName)
+ }
+}
+
+// Test that parser() rejects all of the provided manifest fixtures.
+// Intended to help test manifest parsers' detection of schema mismatches.
+func testManifestFixturesAreRejected(t *testing.T, parser func([]byte) error, fixtures []string) {
+ for _, fixture := range fixtures {
+ manifest, err := os.ReadFile(filepath.Join("testdata", fixture))
+ require.NoError(t, err, fixture)
+ err = parser(manifest)
+ assert.Error(t, err, fixture)
+ }
+}
+
+// Test that parser() rejects validManifest with an added top-level field with any of the provided field names.
+// Intended to help test callers of validateUnambiguousManifestFormat.
+func testValidManifestWithExtraFieldsIsRejected(t *testing.T, parser func([]byte) error,
+ validManifest []byte, fields []string) {
+ for _, field := range fields {
+ // end (the final '}') is not always at len(validManifest)-1 because the manifest can end with
+ // white space.
+ end := bytes.LastIndexByte(validManifest, '}')
+ require.NotEqual(t, end, -1)
+ updatedManifest := []byte(string(validManifest[:end]) +
+ fmt.Sprintf(`,"%s":[]}`, field))
+ err := parser(updatedManifest)
+ // Make sure it is the error from validateUnambiguousManifestFormat, not something that
+ // went wrong with creating updatedManifest.
+ assert.ErrorContains(t, err, "rejecting ambiguous manifest", field)
+ }
+}
diff --git a/internal/manifest/docker_schema2.go b/internal/manifest/docker_schema2.go
new file mode 100644
index 0000000..68d0796
--- /dev/null
+++ b/internal/manifest/docker_schema2.go
@@ -0,0 +1,15 @@
+package manifest
+
+import (
+ "github.com/opencontainers/go-digest"
+)
+
+// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
+//
+// This is publicly visible as c/image/manifest.Schema2Descriptor.
+type Schema2Descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
diff --git a/internal/manifest/docker_schema2_list.go b/internal/manifest/docker_schema2_list.go
new file mode 100644
index 0000000..7ce5bb0
--- /dev/null
+++ b/internal/manifest/docker_schema2_list.go
@@ -0,0 +1,314 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+
+ platform "github.com/containers/image/v5/internal/pkg/platform"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/slices"
+)
+
+// Schema2PlatformSpec describes the platform which a particular manifest is
+// specialized for.
+// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
+type Schema2PlatformSpec struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"` // removed in OCI
+}
+
+// Schema2ManifestDescriptor references a platform-specific manifest.
+// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
+type Schema2ManifestDescriptor struct {
+ Schema2Descriptor
+ Platform Schema2PlatformSpec `json:"platform"`
+}
+
+// Schema2ListPublic is a list of platform-specific manifests.
+// This is publicly visible as c/image/manifest.Schema2List.
+// Internal users should usually use Schema2List instead.
+type Schema2ListPublic struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Manifests []Schema2ManifestDescriptor `json:"manifests"`
+}
+
+// MIMEType returns the MIME type of this particular manifest list.
+func (list *Schema2ListPublic) MIMEType() string {
+ return list.MediaType
+}
+
+// Instances returns a slice of digests of the manifests that this list knows of.
+func (list *Schema2ListPublic) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(list.Manifests))
+ for i, m := range list.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the list.
+func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range list.Manifests {
+ if manifest.Digest == instanceDigest {
+ ret := ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }
+ ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
+ platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
+ ret.ReadOnly.Platform = &platform
+ return ret, nil
+ }
+ }
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
+ }
+ return index.editInstances(editInstances)
+}
+
+func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []Schema2ManifestDescriptor{}
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ case ListOpAdd:
+ if editInstance.AddPlatform == nil {
+ // Should we create a struct with empty fields instead?
+ // Right now ListOpAdd is only called when an instance with the same platform value
+ // already exists in the manifest, so this should not be reached in practice.
+ return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported")
+ }
+ addedEntries = append(addedEntries, Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ Digest: editInstance.AddDigest,
+ Size: editInstance.AddSize,
+ MediaType: editInstance.AddMediaType,
+ },
+ schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform),
+ })
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
+ }
+ }
+ if len(addedEntries) != 0 {
+ // slices.Clone() here to ensure a private backing array;
+ // an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
+ index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
+ }
+ return nil
+}
+
+func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
+func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
+ return list.ChooseInstance(ctx)
+}
+
+// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
+// of the image which is appropriate for the current environment.
+func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ wantedPlatforms, err := platform.WantedPlatforms(ctx)
+ if err != nil {
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
+ }
+ for _, wantedPlatform := range wantedPlatforms {
+ for _, d := range list.Manifests {
+ imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
+ if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
+ return d.Digest, nil
+ }
+ }
+ }
+ return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+}
+
+// Serialize returns the list in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (list *Schema2ListPublic) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(list)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
+ }
+ return buf, nil
+}
+
+// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
+// supplied data.
+// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
+func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
+ list := Schema2ListPublic{
+ SchemaVersion: 2,
+ MediaType: DockerV2ListMediaType,
+ Manifests: make([]Schema2ManifestDescriptor, len(components)),
+ }
+ for i, component := range components {
+ m := Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: component.MediaType,
+ Size: component.Size,
+ Digest: component.Digest,
+ URLs: slices.Clone(component.URLs),
+ },
+ Schema2PlatformSpec{
+ Architecture: component.Platform.Architecture,
+ OS: component.Platform.OS,
+ OSVersion: component.Platform.OSVersion,
+ OSFeatures: slices.Clone(component.Platform.OSFeatures),
+ Variant: component.Platform.Variant,
+ Features: slices.Clone(component.Platform.Features),
+ },
+ }
+ list.Manifests[i] = m
+ }
+ return &list
+}
+
+// Schema2ListPublicClone creates a deep copy of the passed-in list.
+// This is publicly visible as c/image/manifest.Schema2ListClone.
+func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
+ return Schema2ListPublicFromComponents(list.Manifests)
+}
+
+// ToOCI1Index returns the list encoded as an OCI1 index.
+func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
+ components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
+ for _, manifest := range list.Manifests {
+ platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
+ components = append(components, imgspecv1.Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: slices.Clone(manifest.URLs),
+ Platform: &platform,
+ })
+ }
+ oci := OCI1IndexPublicFromComponents(components, nil)
+ return oci, nil
+}
+
+// ToSchema2List returns the list encoded as a Schema2 list.
+func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
+ return Schema2ListPublicClone(list), nil
+}
+
+// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
+func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
+ list := Schema2ListPublic{
+ Manifests: []Schema2ManifestDescriptor{},
+ }
+ if err := json.Unmarshal(manifest, &list); err != nil {
+ return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
+ }
+ if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
+ AllowedFieldManifests); err != nil {
+ return nil, err
+ }
+ return &list, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (list *Schema2ListPublic) Clone() ListPublic {
+ return Schema2ListPublicClone(list)
+}
+
+// ConvertToMIMEType converts the passed-in manifest list to a manifest
+// list of the specified type.
+func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return list.Clone(), nil
+ case imgspecv1.MediaTypeImageIndex:
+ return list.ToOCI1Index()
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
+ }
+}
+
+// Schema2List is a list of platform-specific manifests.
+type Schema2List struct {
+ Schema2ListPublic
+}
+
+func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
+ return &Schema2List{*public}
+}
+
+func (index *Schema2List) CloneInternal() List {
+ return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
+}
+
+func (index *Schema2List) Clone() ListPublic {
+ return index.CloneInternal()
+}
+
+// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
+ public, err := Schema2ListPublicFromManifest(manifest)
+ if err != nil {
+ return nil, err
+ }
+ return schema2ListFromPublic(public), nil
+}
+
+// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture.
+func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform {
+ return imgspecv1.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ // Features is not supported in OCI, and discarded.
+ }
+}
diff --git a/internal/manifest/docker_schema2_list_test.go b/internal/manifest/docker_schema2_list_test.go
new file mode 100644
index 0000000..2824cf0
--- /dev/null
+++ b/internal/manifest/docker_schema2_list_test.go
@@ -0,0 +1,109 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ compressionTypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+func TestSchema2ListPublicFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "v2list.manifest.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := Schema2ListPublicFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ "ociv1.manifest.json",
+ // Not "ociv1.image.index.json" yet, without validating mediaType the two are too similar to tell the difference.
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
+
+func TestSchema2ListEditInstances(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "v2list.manifest.json"))
+ require.NoError(t, err)
+ list, err := ListFromBlob(validManifest, GuessMIMEType(validManifest))
+ require.NoError(t, err)
+
+ expectedDigests := list.Instances()
+ editInstances := []ListEdit{}
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: list.Instances()[0],
+ UpdateDigest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ UpdateSize: 32,
+ UpdateMediaType: "something",
+ ListOperation: ListOpUpdate})
+ err = list.EditInstances(editInstances)
+ require.NoError(t, err)
+
+ expectedDigests[0] = editInstances[0].UpdateDigest
+ // order of old elements must remain same.
+ assert.Equal(t, list.Instances(), expectedDigests)
+
+ instance, err := list.Instance(list.Instances()[0])
+ require.NoError(t, err)
+ assert.Equal(t, "something", instance.MediaType)
+ assert.Equal(t, int64(32), instance.Size)
+ // platform must match with instance platform set in `v2list.manifest.json` for the first instance
+ assert.Equal(t, &imgspecv1.Platform{Architecture: "ppc64le", OS: "linux", OSVersion: "", OSFeatures: []string(nil), Variant: ""}, instance.ReadOnly.Platform)
+ assert.Equal(t, []string{compressionTypes.GzipAlgorithmName}, instance.ReadOnly.CompressionAlgorithmNames)
+
+ // Create a fresh list
+ list, err = ListFromBlob(validManifest, GuessMIMEType(validManifest))
+ require.NoError(t, err)
+ originalListOrder := list.Instances()
+
+ editInstances = []ListEdit{}
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ ListOperation: ListOpAdd})
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ ListOperation: ListOpAdd})
+ err = list.EditInstances(editInstances)
+ require.NoError(t, err)
+
+ // Verify new elements are added to the end of old list
+ assert.Equal(t, append(slices.Clone(originalListOrder),
+ digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
+ ), list.Instances())
+}
+
+func TestSchema2ListFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "v2list.manifest.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := Schema2ListFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ "ociv1.manifest.json",
+ // Not "ociv1.image.index.json" yet, without validating mediaType the two are too similar to tell the difference.
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
diff --git a/internal/manifest/errors.go b/internal/manifest/errors.go
new file mode 100644
index 0000000..6c8e233
--- /dev/null
+++ b/internal/manifest/errors.go
@@ -0,0 +1,56 @@
+package manifest
+
+import (
+ "fmt"
+
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
+// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
+const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+//
+// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor)
+type NonImageArtifactError struct {
+ // Callers should not be blindly calling image-specific operations and only checking MIME types
+ // on failure; if they care about the artifact type, they should check before using it.
+ // If they blindly assume an image, they don’t really need this value; just a type check
+ // is sufficient for basic "we can only pull images" UI.
+ //
+ // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig,
+ // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
+ // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
+ // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
+ // based on that kind of data.
+ //
+ // So, let’s not expose this until a specific need is identified.
+ mimeType string
+}
+
+// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest.
+//
+// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig .
+func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error {
+ // Callers decide based on manifest.Config.MediaType that this is not an image;
+ // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically
+ // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON.
+ //
+ // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably
+ // ArtifactType is the “top-level” one, although that’s not defined by the spec.
+ mimeType := manifest.ArtifactType
+ if mimeType == "" {
+ mimeType = manifest.Config.MediaType
+ }
+ return NonImageArtifactError{mimeType: mimeType}
+}
+
+func (e NonImageArtifactError) Error() string {
+ // Special-case these invalid mixed images, which show up from time to time:
+ if e.mimeType == dockerV2Schema2ConfigMediaType {
+ return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType)
+ }
+ return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
+}
diff --git a/internal/manifest/list.go b/internal/manifest/list.go
new file mode 100644
index 0000000..189f1a7
--- /dev/null
+++ b/internal/manifest/list.go
@@ -0,0 +1,131 @@
+package manifest
+
+import (
+ "fmt"
+
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ListPublic is a subset of List which is a part of the public API;
+// so no methods can be added, removed or changed.
+//
+// Internal users should usually use List instead.
+type ListPublic interface {
+ // MIMEType returns the MIME type of this particular manifest list.
+ MIMEType() string
+
+ // Instances returns a list of the manifests that this list knows of, other than its own.
+ Instances() []digest.Digest
+
+ // Update information about the list's instances. The length of the passed-in slice must
+ // match the length of the list of instances which the list already contains, and every field
+ // must be specified.
+ UpdateInstances([]ListUpdate) error
+
+ // Instance returns the size and MIME type of a particular instance in the list.
+ Instance(digest.Digest) (ListUpdate, error)
+
+ // ChooseInstance selects which manifest is most appropriate for the platform described by the
+ // SystemContext, or for the current platform if the SystemContext doesn't specify any details.
+ ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
+
+ // Serialize returns the list in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
+ // from, even if no modifications were made!
+ Serialize() ([]byte, error)
+
+ // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
+ ConvertToMIMEType(mimeType string) (ListPublic, error)
+
+ // Clone returns a deep copy of this list and its contents.
+ Clone() ListPublic
+}
+
+// List is an interface for parsing, modifying lists of image manifests.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
+// directly.
+type List interface {
+ ListPublic
+ // CloneInternal returns a deep copy of this list and its contents.
+ CloneInternal() List
+ // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the
+ // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
+ // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
+ ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
+ // Edit information about the list's instances. Contains Slice of ListEdit where each element
+ // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is
+ // selected on the basis of configured ListOperation field.
+ EditInstances([]ListEdit) error
+}
+
+// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
+// This is publicly visible as c/image/manifest.ListUpdate.
+type ListUpdate struct {
+ Digest digest.Digest
+ Size int64
+ MediaType string
+ // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance()
+ ReadOnly struct {
+ Platform *imgspecv1.Platform
+ Annotations map[string]string
+ CompressionAlgorithmNames []string
+ }
+}
+
+type ListOp int
+
+const (
+ listOpInvalid ListOp = iota
+ ListOpAdd
+ ListOpUpdate
+)
+
+// ListEdit includes the fields which a List's EditInstances() method will modify.
+type ListEdit struct {
+ ListOperation ListOp
+
+ // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set.
+ UpdateOldDigest digest.Digest
+ UpdateDigest digest.Digest
+ UpdateSize int64
+ UpdateMediaType string
+ UpdateAffectAnnotations bool
+ UpdateAnnotations map[string]string
+ UpdateCompressionAlgorithms []compression.Algorithm
+
+ // If Op = ListEditAdd. All fields must be set.
+ AddDigest digest.Digest
+ AddSize int64
+ AddMediaType string
+ AddPlatform *imgspecv1.Platform
+ AddAnnotations map[string]string
+ AddCompressionAlgorithms []compression.Algorithm
+}
+
+// ListPublicFromBlob parses a list of manifests.
+// This is publicly visible as c/image/manifest.ListFromBlob.
+func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
+ list, err := ListFromBlob(manifest, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+ return list, nil
+}
+
+// ListFromBlob parses a list of manifests.
+func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
+ normalized := NormalizedMIMEType(manifestMIMEType)
+ switch normalized {
+ case DockerV2ListMediaType:
+ return Schema2ListFromManifest(manifest)
+ case imgspecv1.MediaTypeImageIndex:
+ return OCI1IndexFromManifest(manifest)
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
+ }
+ return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
+}
diff --git a/internal/manifest/list_test.go b/internal/manifest/list_test.go
new file mode 100644
index 0000000..2f1479b
--- /dev/null
+++ b/internal/manifest/list_test.go
@@ -0,0 +1,161 @@
+package manifest
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func pare(m List) {
+ if impl, ok := m.(*OCI1Index); ok {
+ impl.Annotations = nil
+ }
+ if impl, ok := m.(*Schema2List); ok {
+ for i := range impl.Manifests {
+ impl.Manifests[i].Platform.Features = nil
+ }
+ }
+}
+
+func TestParseLists(t *testing.T) {
+ cases := []struct {
+ path string
+ mimeType string
+ }{
+ {"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex},
+ {"v2list.manifest.json", DockerV2ListMediaType},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("testdata", c.path))
+ require.NoError(t, err, "error reading file %q", filepath.Join("testdata", c.path))
+ assert.Equal(t, GuessMIMEType(manifest), c.mimeType)
+
+ // c/image/manifest.TestParseLists verifies that FromBlob refuses to parse the manifest list
+
+ m, err := ListFromBlob(manifest, c.mimeType)
+ require.NoError(t, err, "manifest list %q should parse as list types", c.path)
+ assert.Equal(t, m.MIMEType(), c.mimeType, "manifest %q is not of the expected MIME type", c.path)
+
+ clone := m.Clone()
+ assert.Equal(t, clone, m, "manifest %q is missing some fields after being cloned", c.path)
+
+ pare(m)
+
+ index, err := m.ConvertToMIMEType(imgspecv1.MediaTypeImageIndex)
+ require.NoError(t, err, "error converting %q to an OCI1Index", c.path)
+
+ list, err := m.ConvertToMIMEType(DockerV2ListMediaType)
+ require.NoError(t, err, "error converting %q to an Schema2List", c.path)
+
+ index2, err := list.ConvertToMIMEType(imgspecv1.MediaTypeImageIndex)
+ require.NoError(t, err)
+ assert.Equal(t, index, index2, "index %q lost data in conversion", c.path)
+
+ list2, err := index.ConvertToMIMEType(DockerV2ListMediaType)
+ require.NoError(t, err)
+ assert.Equal(t, list, list2, "list %q lost data in conversion", c.path)
+ }
+}
+
+func TestChooseInstance(t *testing.T) {
+ type expectedMatch struct {
+ arch, variant string
+ instanceDigest digest.Digest
+ }
+ chooseInstanceCalls := []func(sys *types.SystemContext, rawManifest []byte) (digest.Digest, error){
+ func(sys *types.SystemContext, rawManifest []byte) (digest.Digest, error) {
+ list, err := ListPublicFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ return list.ChooseInstance(sys)
+ },
+ // Gzip preference true.
+ func(sys *types.SystemContext, rawManifest []byte) (digest.Digest, error) {
+ list, err := ListFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ return list.ChooseInstanceByCompression(sys, types.OptionalBoolTrue)
+ },
+ // Gzip preference false.
+ func(sys *types.SystemContext, rawManifest []byte) (digest.Digest, error) {
+ list, err := ListFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ return list.ChooseInstanceByCompression(sys, types.OptionalBoolFalse)
+ },
+ func(sys *types.SystemContext, rawManifest []byte) (digest.Digest, error) {
+ list, err := ListFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ return list.ChooseInstanceByCompression(sys, types.OptionalBoolUndefined)
+ },
+ }
+ for _, manifestList := range []struct {
+ listFile string
+ matchedInstances []expectedMatch
+ unmatchedInstances []string
+ }{
+ {
+ listFile: "schema2list.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:030fcb92e1487b18c974784dcc110a93147c9fc402188370fbfd17efabffc6af"},
+ {"s390x", "", "sha256:e5aa1b0a24620228b75382997a0977f609b3ca3a95533dafdef84c74cc8df642"},
+ {"arm", "v7", "sha256:b5dbad4bdb4444d919294afe49a095c23e86782f98cdf0aa286198ddb814b50b"},
+ {"arm64", "", "sha256:dc472a59fb006797aa2a6bfb54cc9c57959bb0a6d11fadaa608df8c16dea39cf"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ { // Focus on ARM variant field testing
+ listFile: "schema2list-variants.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610"},
+ {"arm", "v7", "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"},
+ {"arm", "v6", "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"},
+ {"arm", "v5", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ {"arm", "", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ {"arm", "unrecognized-present", "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990"},
+ {"arm", "unrecognized-not-present", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ {
+ listFile: "oci1index.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"},
+ {"ppc64le", "", "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ } {
+ rawManifest, err := os.ReadFile(filepath.Join("testdata", manifestList.listFile))
+ require.NoError(t, err)
+ for _, chooseInstance := range chooseInstanceCalls {
+ for _, match := range manifestList.matchedInstances {
+ testName := fmt.Sprintf("%s %q+%q", manifestList.listFile, match.arch, match.variant)
+ digest, err := chooseInstance(&types.SystemContext{
+ ArchitectureChoice: match.arch,
+ VariantChoice: match.variant,
+ OSChoice: "linux",
+ }, rawManifest)
+ require.NoError(t, err, testName)
+ assert.Equal(t, match.instanceDigest, digest, testName)
+ }
+ for _, arch := range manifestList.unmatchedInstances {
+ _, err := chooseInstance(&types.SystemContext{
+ ArchitectureChoice: arch,
+ OSChoice: "linux",
+ }, rawManifest)
+ assert.Error(t, err)
+ }
+ }
+ }
+}
diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go
new file mode 100644
index 0000000..1dbcc14
--- /dev/null
+++ b/internal/manifest/manifest.go
@@ -0,0 +1,167 @@
+package manifest
+
+import (
+ "encoding/json"
+
+ "github.com/containers/libtrust"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
+
+// FIXME(runcom, mitr): should we have a mediatype pkg??
+const (
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
+ DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
+ DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
+ DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+ // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
+ DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
+ DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
+ // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
+ DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+)
+
+// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
+// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
+// but we may not have such metadata available (e.g. when the manifest is a local file).
+// This is publicly visible as c/image/manifest.GuessMIMEType.
+func GuessMIMEType(manifest []byte) string {
+ // A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
+ // Also docker/distribution/manifest.Versioned.
+ meta := struct {
+ MediaType string `json:"mediaType"`
+ SchemaVersion int `json:"schemaVersion"`
+ Signatures any `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(manifest, &meta); err != nil {
+ return ""
+ }
+
+ switch meta.MediaType {
+ case DockerV2Schema2MediaType, DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
+ return meta.MediaType
+ }
+ // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
+ switch meta.SchemaVersion {
+ case 1:
+ if meta.Signatures != nil {
+ return DockerV2Schema1SignedMediaType
+ }
+ return DockerV2Schema1MediaType
+ case 2:
+ // Best effort to understand if this is an OCI image since mediaType
+ // wasn't in the manifest for OCI image-spec < 1.0.2.
+ // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
+ ociMan := struct {
+ Config struct {
+ MediaType string `json:"mediaType"`
+ } `json:"config"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociMan); err != nil {
+ return ""
+ }
+ switch ociMan.Config.MediaType {
+ case imgspecv1.MediaTypeImageConfig:
+ return imgspecv1.MediaTypeImageManifest
+ case DockerV2Schema2ConfigMediaType:
+ // This case should not happen since a Docker image
+ // must declare a top-level media type and
+ // `meta.MediaType` has already been checked.
+ return DockerV2Schema2MediaType
+ }
+ // Maybe an image index or an OCI artifact.
+ ociIndex := struct {
+ Manifests []imgspecv1.Descriptor `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociIndex); err != nil {
+ return ""
+ }
+ if len(ociIndex.Manifests) != 0 {
+ if ociMan.Config.MediaType == "" {
+ return imgspecv1.MediaTypeImageIndex
+ }
+ // FIXME: this is mixing media types of manifests and configs.
+ return ociMan.Config.MediaType
+ }
+ // It's most likely an OCI artifact with a custom config media
+ // type which is not (and cannot) be covered by the media-type
+ // checks cabove.
+ return imgspecv1.MediaTypeImageManifest
+ }
+ return ""
+}
+
+// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
+// This is publicly visible as c/image/manifest.Digest.
+func Digest(manifest []byte) (digest.Digest, error) {
+ if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
+ sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
+ if err != nil {
+ return "", err
+ }
+ manifest, err = sig.Payload()
+ if err != nil {
+ // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
+ // that libtrust itself has josebase64UrlEncode()d
+ return "", err
+ }
+ }
+
+ return digest.FromBytes(manifest), nil
+}
+
+// MatchesDigest returns true iff the manifest matches expectedDigest.
+// Error may be set if this returns false.
+// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
+// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
+// This is publicly visible as c/image/manifest.MatchesDigest.
+func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
+ // This should eventually support various digest types.
+ actualDigest, err := Digest(manifest)
+ if err != nil {
+ return false, err
+ }
+ return expectedDigest == actualDigest, nil
+}
+
+// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
+// centralizing various workarounds.
+// This is publicly visible as c/image/manifest.NormalizedMIMEType.
+func NormalizedMIMEType(input string) string {
+ switch input {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case "application/json":
+ return DockerV2Schema1SignedMediaType
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType:
+ return input
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return DockerV2Schema1SignedMediaType
+ }
+}
diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go
new file mode 100644
index 0000000..8dc9879
--- /dev/null
+++ b/internal/manifest/manifest_test.go
@@ -0,0 +1,134 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+)
+
+func TestGuessMIMEType(t *testing.T) {
+ cases := []struct {
+ path string
+ mimeType string
+ }{
+ {"v2s2.manifest.json", DockerV2Schema2MediaType},
+ {"v2list.manifest.json", DockerV2ListMediaType},
+ {"v2s1.manifest.json", DockerV2Schema1SignedMediaType},
+ {"v2s1-unsigned.manifest.json", DockerV2Schema1MediaType},
+ {"v2s1-invalid-signatures.manifest.json", DockerV2Schema1SignedMediaType},
+ {"v2s2nomime.manifest.json", DockerV2Schema2MediaType}, // It is unclear whether this one is legal, but we should guess v2s2 if anything at all.
+ {"unknown-version.manifest.json", ""},
+ {"non-json.manifest.json", ""}, // Not a manifest (nor JSON) at all
+ {"ociv1.manifest.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1.artifact.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex},
+ {"ociv1nomime.manifest.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1nomime.artifact.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1nomime.image.index.json", imgspecv1.MediaTypeImageIndex},
+ }
+
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("testdata", c.path))
+ require.NoError(t, err)
+ mimeType := GuessMIMEType(manifest)
+ assert.Equal(t, c.mimeType, mimeType, c.path)
+ }
+}
+
+func TestDigest(t *testing.T) {
+ cases := []struct {
+ path string
+ expectedDigest digest.Digest
+ }{
+ {"v2s2.manifest.json", TestDockerV2S2ManifestDigest},
+ {"v2s1.manifest.json", TestDockerV2S1ManifestDigest},
+ {"v2s1-unsigned.manifest.json", TestDockerV2S1UnsignedManifestDigest},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("testdata", c.path))
+ require.NoError(t, err)
+ actualDigest, err := Digest(manifest)
+ require.NoError(t, err)
+ assert.Equal(t, c.expectedDigest, actualDigest)
+ }
+
+ manifest, err := os.ReadFile("testdata/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ _, err = Digest(manifest)
+ assert.Error(t, err)
+
+ actualDigest, err := Digest([]byte{})
+ require.NoError(t, err)
+ assert.Equal(t, digest.Digest(digestSha256EmptyTar), actualDigest)
+}
+
+func TestMatchesDigest(t *testing.T) {
+ cases := []struct {
+ path string
+ expectedDigest digest.Digest
+ result bool
+ }{
+ // Success
+ {"v2s2.manifest.json", TestDockerV2S2ManifestDigest, true},
+ {"v2s1.manifest.json", TestDockerV2S1ManifestDigest, true},
+ // No match (switched s1/s2)
+ {"v2s2.manifest.json", TestDockerV2S1ManifestDigest, false},
+ {"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false},
+ // Unrecognized algorithm
+ {"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false},
+ // Mangled format
+ {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false},
+ {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false},
+ {"v2s2.manifest.json", digest.Digest(""), false},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("testdata", c.path))
+ require.NoError(t, err)
+ res, err := MatchesDigest(manifest, c.expectedDigest)
+ require.NoError(t, err)
+ assert.Equal(t, c.result, res)
+ }
+
+ manifest, err := os.ReadFile("testdata/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ // Even a correct SHA256 hash is rejected if we can't strip the JSON signature.
+ res, err := MatchesDigest(manifest, digest.FromBytes(manifest))
+ assert.False(t, res)
+ assert.Error(t, err)
+
+ res, err = MatchesDigest([]byte{}, digest.Digest(digestSha256EmptyTar))
+ assert.True(t, res)
+ assert.NoError(t, err)
+}
+
+func TestNormalizedMIMEType(t *testing.T) {
+ for _, c := range []string{ // Valid MIME types, normalized to themselves
+ DockerV2Schema1MediaType,
+ DockerV2Schema1SignedMediaType,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ } {
+ res := NormalizedMIMEType(c)
+ assert.Equal(t, c, res, c)
+ }
+ for _, c := range []string{
+ "application/json",
+ "text/plain",
+ "not at all a valid MIME type",
+ "",
+ } {
+ res := NormalizedMIMEType(c)
+ assert.Equal(t, DockerV2Schema1SignedMediaType, res, c)
+ }
+}
diff --git a/internal/manifest/oci_index.go b/internal/manifest/oci_index.go
new file mode 100644
index 0000000..d8d0651
--- /dev/null
+++ b/internal/manifest/oci_index.go
@@ -0,0 +1,446 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "runtime"
+
+ platform "github.com/containers/image/v5/internal/pkg/platform"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index.
+ // The value of the annotation must be the string "true".
+ // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression.
+ // That also suggests that this instance benefits from
+ // Zstd compression, so it can be preferred by compatible consumers over instances that
+ // use gzip, depending on their local policy.
+ OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd"
+ OCI1InstanceAnnotationCompressionZSTDValue = "true"
+)
+
+// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
+// provide methods for.
+// This is publicly visible as c/image/manifest.OCI1Index
+// Internal users should usually use OCI1Index instead.
+type OCI1IndexPublic struct {
+ imgspecv1.Index
+}
+
+// MIMEType returns the MIME type of this particular manifest index.
+func (index *OCI1IndexPublic) MIMEType() string {
+ return imgspecv1.MediaTypeImageIndex
+}
+
+// Instances returns a slice of digests of the manifests that this index knows of.
+func (index *OCI1IndexPublic) Instances() []digest.Digest {
+ results := make([]digest.Digest, len(index.Manifests))
+ for i, m := range index.Manifests {
+ results[i] = m.Digest
+ }
+ return results
+}
+
+// Instance returns the ListUpdate of a particular instance in the index.
+func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
+ for _, manifest := range index.Manifests {
+ if manifest.Digest == instanceDigest {
+ ret := ListUpdate{
+ Digest: manifest.Digest,
+ Size: manifest.Size,
+ MediaType: manifest.MediaType,
+ }
+ ret.ReadOnly.Platform = manifest.Platform
+ ret.ReadOnly.Annotations = manifest.Annotations
+ ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
+ return ret, nil
+ }
+ }
+ return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
+}
+
+// UpdateInstances updates the sizes, digests, and media types of the manifests
+// which the list catalogs.
+func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
+ editInstances := []ListEdit{}
+ for i, instance := range updates {
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: index.Manifests[i].Digest,
+ UpdateDigest: instance.Digest,
+ UpdateSize: instance.Size,
+ UpdateMediaType: instance.MediaType,
+ ListOperation: ListOpUpdate})
+ }
+ return index.editInstances(editInstances)
+}
+
+func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string {
+ result := make([]string, 0, 1)
+ if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue {
+ result = append(result, compression.ZstdAlgorithmName)
+ }
+ // No compression was detected, hence assume instance has default compression `Gzip`
+ if len(result) == 0 {
+ result = append(result, compression.GzipAlgorithmName)
+ }
+ return result
+}
+
+func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) {
+ // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
+ // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
+ // and full compressionAlghorithms list.
+ if *annotationsMap == nil && len(compressionAlgorithms) > 0 {
+ *annotationsMap = map[string]string{}
+ }
+ for _, algo := range compressionAlgorithms {
+ switch algo.Name() {
+ case compression.ZstdAlgorithmName:
+ (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
+ default:
+ continue
+ }
+ }
+}
+
+func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
+ addedEntries := []imgspecv1.Descriptor{}
+ updatedAnnotations := false
+ for i, editInstance := range editInstances {
+ switch editInstance.ListOperation {
+ case ListOpUpdate:
+ if err := editInstance.UpdateOldDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
+ }
+ if err := editInstance.UpdateDigest.Validate(); err != nil {
+ return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
+ }
+ targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool {
+ return m.Digest == editInstance.UpdateOldDigest
+ })
+ if targetIndex == -1 {
+ return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
+ }
+ index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+ if editInstance.UpdateSize < 0 {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
+ }
+ index.Manifests[targetIndex].Size = editInstance.UpdateSize
+ if editInstance.UpdateMediaType == "" {
+ return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+ }
+ index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+ if editInstance.UpdateAnnotations != nil {
+ updatedAnnotations = true
+ if editInstance.UpdateAffectAnnotations {
+ index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations)
+ } else {
+ if index.Manifests[targetIndex].Annotations == nil {
+ index.Manifests[targetIndex].Annotations = map[string]string{}
+ }
+ maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
+ }
+ }
+ addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations)
+ case ListOpAdd:
+ annotations := map[string]string{}
+ if editInstance.AddAnnotations != nil {
+ annotations = maps.Clone(editInstance.AddAnnotations)
+ }
+ addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
+ addedEntries = append(addedEntries, imgspecv1.Descriptor{
+ MediaType: editInstance.AddMediaType,
+ Size: editInstance.AddSize,
+ Digest: editInstance.AddDigest,
+ Platform: editInstance.AddPlatform,
+ Annotations: annotations})
+ default:
+ return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
+ }
+ }
+ if len(addedEntries) != 0 {
+ // slices.Clone() here to ensure the slice uses a private backing array;
+ // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
+ index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
+ }
+ if len(addedEntries) != 0 || updatedAnnotations {
+ slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
+ // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
+ // into more algorithms?
+ aZstd := instanceIsZstd(a)
+ bZstd := instanceIsZstd(b)
+ switch {
+ case aZstd == bZstd:
+ return 0
+ case !aZstd: // Implies bZstd
+ return -1
+ default: // aZstd && !bZstd
+ return 1
+ }
+ })
+ }
+ return nil
+}
+
+func (index *OCI1Index) EditInstances(editInstances []ListEdit) error {
+ return index.editInstances(editInstances)
+}
+
+// instanceIsZstd returns true if instance is a zstd instance otherwise false.
+func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
+ if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
+ return true
+ }
+ return false
+}
+
+type instanceCandidate struct {
+ platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform
+ isZstd bool // tells if particular instance if zstd instance
+ manifestPosition int // A zero-based index of the instance in the manifest list
+ digest digest.Digest // Instance digest
+}
+
+func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
+ switch {
+ case ic.platformIndex != other.platformIndex:
+ return ic.platformIndex < other.platformIndex
+ case ic.isZstd != other.isZstd:
+ if !preferGzip {
+ return ic.isZstd
+ } else {
+ return !ic.isZstd
+ }
+ case ic.manifestPosition != other.manifestPosition:
+ return ic.manifestPosition < other.manifestPosition
+ }
+ panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
+}
+
+// chooseInstance is a private equivalent to ChooseInstanceByCompression,
+// shared by ChooseInstance and ChooseInstanceByCompression.
+func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ didPreferGzip := false
+ if preferGzip == types.OptionalBoolTrue {
+ didPreferGzip = true
+ }
+ wantedPlatforms, err := platform.WantedPlatforms(ctx)
+ if err != nil {
+ return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
+ }
+ var bestMatch *instanceCandidate
+ bestMatch = nil
+ for manifestIndex, d := range index.Manifests {
+ candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
+ if d.Platform != nil {
+ imagePlatform := ociPlatformClone(*d.Platform)
+ platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
+ return platform.MatchesPlatform(imagePlatform, wantedPlatform)
+ })
+ if platformIndex == -1 {
+ continue
+ }
+ candidate.platformIndex = platformIndex
+ }
+ if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
+ bestMatch = &candidate
+ }
+ }
+ if bestMatch != nil {
+ return bestMatch.digest, nil
+ }
+ return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
+}
+
+func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
+ return index.chooseInstance(ctx, preferGzip)
+}
+
+// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
+// of the image which is appropriate for the current environment.
+func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
+ return index.chooseInstance(ctx, types.OptionalBoolFalse)
+}
+
+// Serialize returns the index in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
+ buf, err := json.Marshal(index)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
+ }
+ return buf, nil
+}
+
+// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
+// supplied data.
+// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
+func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
+ index := OCI1IndexPublic{
+ imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Manifests: make([]imgspecv1.Descriptor, len(components)),
+ Annotations: maps.Clone(annotations),
+ },
+ }
+ for i, component := range components {
+ var platform *imgspecv1.Platform
+ if component.Platform != nil {
+ platformCopy := ociPlatformClone(*component.Platform)
+ platform = &platformCopy
+ }
+ m := imgspecv1.Descriptor{
+ MediaType: component.MediaType,
+ Size: component.Size,
+ Digest: component.Digest,
+ URLs: slices.Clone(component.URLs),
+ Annotations: maps.Clone(component.Annotations),
+ Platform: platform,
+ }
+ index.Manifests[i] = m
+ }
+ return &index
+}
+
+// OCI1IndexPublicClone creates a deep copy of the passed-in index.
+// This is publicly visible as c/image/manifest.OCI1IndexClone.
+func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
+ return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
+}
+
+// ToOCI1Index returns the index encoded as an OCI1 index.
+func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
+ return OCI1IndexPublicClone(index), nil
+}
+
+// ToSchema2List returns the index encoded as a Schema2 list.
+func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
+ components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
+ for _, manifest := range index.Manifests {
+ platform := manifest.Platform
+ if platform == nil {
+ platform = &imgspecv1.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ }
+ }
+ components = append(components, Schema2ManifestDescriptor{
+ Schema2Descriptor{
+ MediaType: manifest.MediaType,
+ Size: manifest.Size,
+ Digest: manifest.Digest,
+ URLs: slices.Clone(manifest.URLs),
+ },
+ schema2PlatformSpecFromOCIPlatform(*platform),
+ })
+ }
+ s2 := Schema2ListPublicFromComponents(components)
+ return s2, nil
+}
+
+// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest index.
+// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
+func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
+ index := OCI1IndexPublic{
+ Index: imgspecv1.Index{
+ Versioned: imgspec.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageIndex,
+ Manifests: []imgspecv1.Descriptor{},
+ Annotations: make(map[string]string),
+ },
+ }
+ if err := json.Unmarshal(manifest, &index); err != nil {
+ return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
+ }
+ if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
+ AllowedFieldManifests); err != nil {
+ return nil, err
+ }
+ return &index, nil
+}
+
+// Clone returns a deep copy of this list and its contents.
+func (index *OCI1IndexPublic) Clone() ListPublic {
+ return OCI1IndexPublicClone(index)
+}
+
+// ConvertToMIMEType converts the passed-in image index to a manifest list of
+// the specified type.
+func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
+ switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
+ case DockerV2ListMediaType:
+ return index.ToSchema2List()
+ case imgspecv1.MediaTypeImageIndex:
+ return index.Clone(), nil
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
+ return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
+ default:
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
+ }
+}
+
+type OCI1Index struct {
+ OCI1IndexPublic
+}
+
+func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
+ return &OCI1Index{*public}
+}
+
+func (index *OCI1Index) CloneInternal() List {
+ return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
+}
+
+func (index *OCI1Index) Clone() ListPublic {
+ return index.CloneInternal()
+}
+
+// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest list.
+func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
+ public, err := OCI1IndexPublicFromManifest(manifest)
+ if err != nil {
+ return nil, err
+ }
+ return oci1IndexFromPublic(public), nil
+}
+
+// ociPlatformClone returns an independent copy of p.
+func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform {
+ // The only practical way in Go to give read-only access to an array is to copy it.
+ // The only practical way in Go to copy a deep structure is to either do it manually field by field,
+ // or to use reflection (incl. a round-trip through JSON, which uses reflection).
+ //
+ // The combination of the two is just sad, and leads to code like this, which will
+ // need to be updated with every new Platform field.
+ return imgspecv1.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ }
+}
+
+// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure.
+func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec {
+ return Schema2PlatformSpec{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ OSVersion: p.OSVersion,
+ OSFeatures: slices.Clone(p.OSFeatures),
+ Variant: p.Variant,
+ Features: nil,
+ }
+}
diff --git a/internal/manifest/oci_index_test.go b/internal/manifest/oci_index_test.go
new file mode 100644
index 0000000..3c9a3ac
--- /dev/null
+++ b/internal/manifest/oci_index_test.go
@@ -0,0 +1,265 @@
+package manifest
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/pkg/compression"
+ compressionTypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOCI1IndexPublicFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "ociv1.image.index.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := OCI1IndexPublicFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ // Not "v2list.manifest.json" yet, without mediaType the two are too similar to tell the difference.
+ "ociv1.manifest.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
+
+func TestOCI1IndexFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "ociv1.image.index.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := OCI1IndexFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ // Not "v2list.manifest.json" yet, without mediaType the two are too similar to tell the difference.
+ "ociv1.manifest.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
+
+func TestOCI1EditInstances(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("testdata", "ociv1.image.index.json"))
+ require.NoError(t, err)
+ list, err := ListFromBlob(validManifest, GuessMIMEType(validManifest))
+ require.NoError(t, err)
+
+ expectedDigests := list.Instances()
+ editInstances := []ListEdit{}
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: list.Instances()[0],
+ UpdateDigest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ UpdateSize: 32,
+ UpdateMediaType: "something",
+ ListOperation: ListOpUpdate})
+ err = list.EditInstances(editInstances)
+ require.NoError(t, err)
+
+ expectedDigests[0] = editInstances[0].UpdateDigest
+ // order of old elements must remain same.
+ assert.Equal(t, list.Instances(), expectedDigests)
+
+ instance, err := list.Instance(list.Instances()[0])
+ require.NoError(t, err)
+ assert.Equal(t, "something", instance.MediaType)
+ assert.Equal(t, int64(32), instance.Size)
+ // platform must match with what was set in `ociv1.image.index.json` for the first instance
+ assert.Equal(t, &imgspecv1.Platform{Architecture: "ppc64le", OS: "linux", OSVersion: "", OSFeatures: []string(nil), Variant: ""}, instance.ReadOnly.Platform)
+ assert.Equal(t, []string{compressionTypes.GzipAlgorithmName}, instance.ReadOnly.CompressionAlgorithmNames)
+
+ // Create a fresh list
+ list, err = ListFromBlob(validManifest, GuessMIMEType(validManifest))
+ require.NoError(t, err)
+
+ // Verify correct zstd sorting
+ editInstances = []ListEdit{}
+ annotations := map[string]string{"io.github.containers.compression.zstd": "true"}
+ // without zstd
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ ListOperation: ListOpAdd})
+ // with zstd
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ AddAnnotations: annotations,
+ ListOperation: ListOpAdd})
+ // with zstd but with compression, annotation must be added automatically
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ AddCompressionAlgorithms: []compression.Algorithm{compression.Zstd},
+ AddAnnotations: map[string]string{},
+ ListOperation: ListOpAdd})
+ // with zstd but with compression, annotation must be added automatically and AddAnnotations is unset
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ AddCompressionAlgorithms: []compression.Algorithm{compression.Zstd},
+ ListOperation: ListOpAdd})
+ // without zstd
+ editInstances = append(editInstances, ListEdit{
+ AddDigest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ AddSize: 32,
+ AddMediaType: "application/vnd.oci.image.manifest.v1+json",
+ AddPlatform: &imgspecv1.Platform{Architecture: "amd64", OS: "linux", OSFeatures: []string{"sse4"}},
+ ListOperation: ListOpAdd})
+ err = list.EditInstances(editInstances)
+ require.NoError(t, err)
+
+ // Zstd should be kept on lowest priority as compared to the default gzip ones and order of prior elements must be preserved.
+ assert.Equal(t, list.Instances(), []digest.Digest{digest.Digest("sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"), digest.Digest("sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"), digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"), digest.Digest("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), digest.Digest("sha256:hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh"), digest.Digest("sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")})
+
+ instance, err = list.Instance(digest.Digest("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"))
+ require.NoError(t, err)
+ // Verify if annotations are preserved and correctly set in ReadOnly field.
+ assert.Equal(t, annotations, instance.ReadOnly.Annotations)
+ // Verify compression of an instance is added to the ReadOnly CompressionAlgorithmNames where compression name
+ // is internally derived from the appropriate annotations.
+ assert.Equal(t, []string{compressionTypes.ZstdAlgorithmName}, instance.ReadOnly.CompressionAlgorithmNames)
+
+ // Update list and remove zstd annotation from existing instance, and verify if resorting works
+ editInstances = []ListEdit{}
+ editInstances = append(editInstances, ListEdit{
+ UpdateOldDigest: digest.Digest("sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ UpdateDigest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ UpdateSize: 32,
+ UpdateMediaType: "application/vnd.oci.image.manifest.v1+json",
+ UpdateAffectAnnotations: true,
+ UpdateAnnotations: map[string]string{},
+ ListOperation: ListOpUpdate})
+ err = list.EditInstances(editInstances)
+ require.NoError(t, err)
+ // Digest `ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff` should be re-ordered on update.
+ assert.Equal(t, list.Instances(), []digest.Digest{digest.Digest("sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"), digest.Digest("sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"), digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"), digest.Digest("sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), digest.Digest("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), digest.Digest("sha256:hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")})
+
+}
+
+func TestOCI1IndexChooseInstanceByCompression(t *testing.T) {
+ type expectedMatch struct {
+ arch, variant string
+ instanceDigest digest.Digest
+ preferGzip bool
+ }
+ for _, manifestList := range []struct {
+ listFile string
+ matchedInstances []expectedMatch
+ unmatchedInstances []string
+ }{
+ {
+ listFile: "oci1.index.zstd-selection.json",
+ matchedInstances: []expectedMatch{
+ // out of gzip and zstd in amd64 select the first zstd image
+ {"amd64", "", "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", false},
+ // out of multiple gzip in arm64 select the first one to ensure original logic is prevented
+ {"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", false},
+ // select a signle gzip s390x image
+ {"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", false},
+ // out of gzip and zstd in amd64 select the first gzip image
+ {"amd64", "", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true},
+ // out of multiple gzip in arm64 select the first one to ensure original logic is prevented
+ {"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", true},
+ // select a signle gzip s390x image
+ {"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", true},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ { // Focus on ARM variant field testing
+ listFile: "ocilist-variants.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610", false},
+ {"arm", "v7", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", false},
+ {"arm", "v6", "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39", false},
+ {"arm", "v5", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", false},
+ {"arm", "", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", false},
+ {"arm", "unrecognized-present", "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990", false},
+ {"arm", "unrecognized-not-present", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", false},
+ // preferGzip true
+ {"amd64", "", "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610", true},
+ {"arm", "v7", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", true},
+ {"arm", "v6", "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", true},
+ {"arm", "v5", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", true},
+ {"arm", "", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", true},
+ {"arm", "unrecognized-present", "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990", true},
+ {"arm", "unrecognized-not-present", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53", true},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ {
+ listFile: "oci1.index.zstd-selection2.json",
+ // out of list where first instance is gzip , select the first occurrence of zstd out of many
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", false},
+ {"amd64", "", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true},
+ // must return first gzip even if the first entry is zstd
+ {"arm64", "", "sha256:6dc14a60d2ba724646cfbf5fccbb9a618a5978a64a352e060b17caf5e005da9d", true},
+ // must return first zstd even if the first entry for same platform is gzip
+ {"arm64", "", "sha256:1c98002b30a71b08ab175915ce7c8fb8da9e9b502ae082d6f0c572bac9dee324", false},
+ // must return first zstd instance with no platform
+ {"matchesImageWithNoPlatform", "", "sha256:f2f5f52a2cf2c51d4cac6df0545f751c0adc3f3427eb47c59fcb32894503e18f", false},
+ // must return first gzip instance with no platform
+ {"matchesImageWithNoPlatform", "", "sha256:c76757bb6006babdd8464dbf2f1157fdfa6fead0bc6f84f15816a32d6f68f706", true},
+ },
+ },
+ {
+ listFile: "oci1index.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", false},
+ {"ppc64le", "", "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", false},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ } {
+ rawManifest, err := os.ReadFile(filepath.Join("testdata", manifestList.listFile))
+ require.NoError(t, err)
+ list, err := ListFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ for _, match := range manifestList.matchedInstances {
+ testName := fmt.Sprintf("%s %q+%q", manifestList.listFile, match.arch, match.variant)
+ digest, err := list.ChooseInstanceByCompression(&types.SystemContext{
+ ArchitectureChoice: match.arch,
+ VariantChoice: match.variant,
+ OSChoice: "linux",
+ }, types.NewOptionalBool(match.preferGzip))
+ require.NoError(t, err, testName)
+ assert.Equal(t, match.instanceDigest, digest, testName)
+ }
+ for _, arch := range manifestList.unmatchedInstances {
+ _, err := list.ChooseInstanceByCompression(&types.SystemContext{
+ ArchitectureChoice: arch,
+ OSChoice: "linux",
+ }, types.NewOptionalBool(false))
+ assert.Error(t, err)
+ }
+ }
+}
diff --git a/internal/manifest/testdata/non-json.manifest.json b/internal/manifest/testdata/non-json.manifest.json
new file mode 100644
index 0000000..f892721
--- /dev/null
+++ b/internal/manifest/testdata/non-json.manifest.json
Binary files differ
diff --git a/internal/manifest/testdata/oci1.index.zstd-selection.json b/internal/manifest/testdata/oci1.index.zstd-selection.json
new file mode 100644
index 0000000..a55e6b4
--- /dev/null
+++ b/internal/manifest/testdata/oci1.index.zstd-selection.json
@@ -0,0 +1,66 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "size": 758,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ "size": 758,
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ "size": 758,
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
+ "size": 758,
+ "platform": {
+ "architecture": "s390x",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ }
+ ]
+}
diff --git a/internal/manifest/testdata/oci1.index.zstd-selection2.json b/internal/manifest/testdata/oci1.index.zstd-selection2.json
new file mode 100644
index 0000000..7153ccf
--- /dev/null
+++ b/internal/manifest/testdata/oci1.index.zstd-selection2.json
@@ -0,0 +1,96 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "size": 758,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "size": 759,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:1c98002b30a71b08ab175915ce7c8fb8da9e9b502ae082d6f0c572bac9dee324",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:c76757bb6006babdd8464dbf2f1157fdfa6fead0bc6f84f15816a32d6f68f706",
+ "size": 772
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:f2f5f52a2cf2c51d4cac6df0545f751c0adc3f3427eb47c59fcb32894503e18f",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:4612d27c6875f1dd3c28869dfd33c7be24f838261403cfb7940b76b6fd6ea4e2",
+ "size": 772
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:6dc14a60d2ba724646cfbf5fccbb9a618a5978a64a352e060b17caf5e005da9d",
+ "size": 772,
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ "size": 759,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
+ "size": 772,
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ }
+ ]
+}
diff --git a/internal/manifest/testdata/oci1index.json b/internal/manifest/testdata/oci1index.json
new file mode 100644
index 0000000..a85b4d8
--- /dev/null
+++ b/internal/manifest/testdata/oci1index.json
@@ -0,0 +1,31 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7682,
+ "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux",
+ "os.features": [
+ "sse4"
+ ]
+ }
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+}
diff --git a/internal/manifest/testdata/ocilist-variants.json b/internal/manifest/testdata/ocilist-variants.json
new file mode 100644
index 0000000..396ae04
--- /dev/null
+++ b/internal/manifest/testdata/ocilist-variants.json
@@ -0,0 +1,67 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
+ "platform": {
+ "architecture": "arm",
+ "variant": "v7",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ "platform": {
+ "architecture": "arm",
+ "variant": "v6",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39",
+ "annotations": {
+ "io.github.containers.compression.zstd": "true"
+ },
+ "platform": {
+ "architecture": "arm",
+ "variant": "v6",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990",
+ "platform": {
+ "architecture": "arm",
+ "variant": "unrecognized-present",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 527,
+ "digest": "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53",
+ "platform": {
+ "architecture": "arm",
+ "os": "linux"
+ }
+ }
+ ]
+}
diff --git a/internal/manifest/testdata/ociv1.artifact.json b/internal/manifest/testdata/ociv1.artifact.json
new file mode 100644
index 0000000..a538079
--- /dev/null
+++ b/internal/manifest/testdata/ociv1.artifact.json
@@ -0,0 +1,10 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.custom.artifact.config.v1+json",
+ "digest": "",
+ "size": 0
+ },
+ "layers": null
+}
diff --git a/internal/manifest/testdata/ociv1.image.index.json b/internal/manifest/testdata/ociv1.image.index.json
new file mode 100644
index 0000000..a85b4d8
--- /dev/null
+++ b/internal/manifest/testdata/ociv1.image.index.json
@@ -0,0 +1,31 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7682,
+ "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux",
+ "os.features": [
+ "sse4"
+ ]
+ }
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+}
diff --git a/internal/manifest/testdata/ociv1.manifest.json b/internal/manifest/testdata/ociv1.manifest.json
new file mode 100644
index 0000000..7e2e2e8
--- /dev/null
+++ b/internal/manifest/testdata/ociv1.manifest.json
@@ -0,0 +1,30 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+}
diff --git a/internal/manifest/testdata/ociv1nomime.artifact.json b/internal/manifest/testdata/ociv1nomime.artifact.json
new file mode 100644
index 0000000..5091d1f
--- /dev/null
+++ b/internal/manifest/testdata/ociv1nomime.artifact.json
@@ -0,0 +1,9 @@
+{
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.custom.artifact.config.v1+json",
+ "digest": "",
+ "size": 0
+ },
+ "layers": null
+}
diff --git a/internal/manifest/testdata/ociv1nomime.image.index.json b/internal/manifest/testdata/ociv1nomime.image.index.json
new file mode 100644
index 0000000..066f058
--- /dev/null
+++ b/internal/manifest/testdata/ociv1nomime.image.index.json
@@ -0,0 +1,30 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7682,
+ "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux",
+ "os.features": [
+ "sse4"
+ ]
+ }
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+}
diff --git a/internal/manifest/testdata/ociv1nomime.manifest.json b/internal/manifest/testdata/ociv1nomime.manifest.json
new file mode 100644
index 0000000..1e1047c
--- /dev/null
+++ b/internal/manifest/testdata/ociv1nomime.manifest.json
@@ -0,0 +1,29 @@
+{
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+}
diff --git a/internal/manifest/testdata/schema2-to-schema1-by-docker.json b/internal/manifest/testdata/schema2-to-schema1-by-docker.json
new file mode 120000
index 0000000..437a6be
--- /dev/null
+++ b/internal/manifest/testdata/schema2-to-schema1-by-docker.json
@@ -0,0 +1 @@
+../../../internal/image/fixtures/schema2-to-schema1-by-docker.json \ No newline at end of file
diff --git a/internal/manifest/testdata/schema2list-variants.json b/internal/manifest/testdata/schema2list-variants.json
new file mode 100644
index 0000000..0f5214c
--- /dev/null
+++ b/internal/manifest/testdata/schema2list-variants.json
@@ -0,0 +1,44 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39",
+ "platform": {
+ "architecture": "arm",
+ "variant": "v6",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990",
+ "platform": {
+ "architecture": "arm",
+ "variant": "unrecognized-present",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53",
+ "platform": {
+ "architecture": "arm",
+ "os": "linux"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/manifest/testdata/schema2list.json b/internal/manifest/testdata/schema2list.json
new file mode 100644
index 0000000..398b746
--- /dev/null
+++ b/internal/manifest/testdata/schema2list.json
@@ -0,0 +1,72 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:030fcb92e1487b18c974784dcc110a93147c9fc402188370fbfd17efabffc6af",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:9142d97ef280a7953cf1a85716de49a24cc1dd62776352afad67e635331ff77a",
+ "platform": {
+ "architecture": "arm",
+ "os": "linux",
+ "variant": "v5"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:b5dbad4bdb4444d919294afe49a095c23e86782f98cdf0aa286198ddb814b50b",
+ "platform": {
+ "architecture": "arm",
+ "os": "linux",
+ "variant": "v6"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:dc472a59fb006797aa2a6bfb54cc9c57959bb0a6d11fadaa608df8c16dea39cf",
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux",
+ "variant": "v8"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 527,
+ "digest": "sha256:9a33b9909e56b0a2092a65fb1b79ef6717fa160b1f084476b860418780e8d53b",
+ "platform": {
+ "architecture": "386",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 528,
+ "digest": "sha256:59117d7c016fba6ede7f87991204bd672a1dca444102de66db632383507ed90b",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "size": 528,
+ "digest": "sha256:e5aa1b0a24620228b75382997a0977f609b3ca3a95533dafdef84c74cc8df642",
+ "platform": {
+ "architecture": "s390x",
+ "os": "linux"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/manifest/testdata/unknown-version.manifest.json b/internal/manifest/testdata/unknown-version.manifest.json
new file mode 100644
index 0000000..b0f34b6
--- /dev/null
+++ b/internal/manifest/testdata/unknown-version.manifest.json
@@ -0,0 +1,5 @@
+{
+ "schemaVersion": 99999,
+ "name": "mitr/noversion-nonsense",
+ "tag": "latest"
+}
diff --git a/internal/manifest/testdata/v2list.manifest.json b/internal/manifest/testdata/v2list.manifest.json
new file mode 100644
index 0000000..1bf9896
--- /dev/null
+++ b/internal/manifest/testdata/v2list.manifest.json
@@ -0,0 +1,56 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
+ "size": 2094,
+ "digest": "sha256:7820f9a86d4ad15a2c4f0c0e5479298df2aa7c2f6871288e2ef8546f3e7b6783",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
+ "size": 1922,
+ "digest": "sha256:ae1b0e06e8ade3a11267564a26e750585ba2259c0ecab59ab165ad1af41d1bdd",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux",
+ "features": [
+ "sse"
+ ]
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
+ "size": 2084,
+ "digest": "sha256:e4c0df75810b953d6717b8f8f28298d73870e8aa2a0d5e77b8391f16fdfbbbe2",
+ "platform": {
+ "architecture": "s390x",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
+ "size": 2084,
+ "digest": "sha256:07ebe243465ef4a667b78154ae6c3ea46fdb1582936aac3ac899ea311a701b40",
+ "platform": {
+ "architecture": "arm",
+ "os": "linux",
+ "variant": "armv7"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
+ "size": 2090,
+ "digest": "sha256:fb2fc0707b86dafa9959fe3d29e66af8787aee4d9a23581714be65db4265ad8a",
+ "platform": {
+ "architecture": "arm64",
+ "os": "linux",
+ "variant": "armv8"
+ }
+ }
+ ]
+}
diff --git a/internal/manifest/testdata/v2s1-invalid-signatures.manifest.json b/internal/manifest/testdata/v2s1-invalid-signatures.manifest.json
new file mode 100644
index 0000000..96def40
--- /dev/null
+++ b/internal/manifest/testdata/v2s1-invalid-signatures.manifest.json
@@ -0,0 +1,11 @@
+{
+ "schemaVersion": 1,
+ "name": "mitr/busybox",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ ],
+ "history": [
+ ],
+ "signatures": 1
+}
diff --git a/internal/manifest/testdata/v2s1-unsigned.manifest.json b/internal/manifest/testdata/v2s1-unsigned.manifest.json
new file mode 100644
index 0000000..16764b4
--- /dev/null
+++ b/internal/manifest/testdata/v2s1-unsigned.manifest.json
@@ -0,0 +1,28 @@
+{
+ "schemaVersion": 1,
+ "name": "mitr/busybox",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ },
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ },
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/manifest/testdata/v2s1.manifest.json b/internal/manifest/testdata/v2s1.manifest.json
new file mode 100644
index 0000000..f7bcd07
--- /dev/null
+++ b/internal/manifest/testdata/v2s1.manifest.json
@@ -0,0 +1,44 @@
+{
+ "schemaVersion": 1,
+ "name": "mitr/busybox",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ },
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ },
+ {
+ "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ },
+ {
+ "v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+ }
+ ],
+ "signatures": [
+ {
+ "header": {
+ "jwk": {
+ "crv": "P-256",
+ "kid": "OZ45:U3IG:TDOI:PMBD:NGP2:LDIW:II2U:PSBI:MMCZ:YZUP:TUUO:XPZT",
+ "kty": "EC",
+ "x": "ReC5c0J9tgXSdUL4_xzEt5RsD8kFt2wWSgJcpAcOQx8",
+ "y": "3sBGEqQ3ZMeqPKwQBAadN2toOUEASha18xa0WwsDF-M"
+ },
+ "alg": "ES256"
+ },
+ "signature": "dV1paJ3Ck1Ph4FcEhg_frjqxdlGdI6-ywRamk6CvMOcaOEUdCWCpCPQeBQpD2N6tGjkoG1BbstkFNflllfenCw",
+ "protected": "eyJmb3JtYXRMZW5ndGgiOjU0NzgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNC0xOFQyMDo1NDo0MloifQ"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/manifest/testdata/v2s2.manifest.json b/internal/manifest/testdata/v2s2.manifest.json
new file mode 100644
index 0000000..198da23
--- /dev/null
+++ b/internal/manifest/testdata/v2s2.manifest.json
@@ -0,0 +1,26 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/manifest/testdata/v2s2nomime.manifest.json b/internal/manifest/testdata/v2s2nomime.manifest.json
new file mode 100644
index 0000000..a0b06c2
--- /dev/null
+++ b/internal/manifest/testdata/v2s2nomime.manifest.json
@@ -0,0 +1,10 @@
+{
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ ]
+}
diff --git a/internal/manifest/testdata_info_test.go b/internal/manifest/testdata_info_test.go
new file mode 100644
index 0000000..bfdaed1
--- /dev/null
+++ b/internal/manifest/testdata_info_test.go
@@ -0,0 +1,12 @@
+package manifest
+
+import "github.com/opencontainers/go-digest"
+
+const (
+ // TestV2S2ManifestDigest is the Docker manifest digest of "v2s2.manifest.json"
+ TestDockerV2S2ManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
+ // TestV2S1ManifestDigest is the Docker manifest digest of "v2s1.manifest.json"
+ TestDockerV2S1ManifestDigest = digest.Digest("sha256:7364fea9d84ee548ab67d4c46c6006289800c98de3fbf8c0a97138dfcc23f000")
+ // TestV2S1UnsignedManifestDigest is the Docker manifest digest of "v2s1unsigned.manifest.json"
+ TestDockerV2S1UnsignedManifestDigest = digest.Digest("sha256:7364fea9d84ee548ab67d4c46c6006289800c98de3fbf8c0a97138dfcc23f000")
+)
diff --git a/internal/pkg/platform/platform_matcher.go b/internal/pkg/platform/platform_matcher.go
new file mode 100644
index 0000000..3ba0e40
--- /dev/null
+++ b/internal/pkg/platform/platform_matcher.go
@@ -0,0 +1,197 @@
+package platform
+
+// Largely based on
+// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go
+// Copyright 2012-2017 Docker, Inc.
+//
+// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go
+// Copyright The containerd Authors.
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// https://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/slices"
+)
+
+// For Linux, the kernel has already detected the ABI, ISA and Features.
+// So we don't need to access the ARM registers to detect platform information
+// by ourselves. We can just parse these information from /proc/cpuinfo
+func getCPUInfo(pattern string) (info string, err error) {
+ if runtime.GOOS != "linux" {
+ return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS)
+ }
+
+ cpuinfo, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return "", err
+ }
+ defer cpuinfo.Close()
+
+ // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
+ // the first core is enough.
+ scanner := bufio.NewScanner(cpuinfo)
+ for scanner.Scan() {
+ newline := scanner.Text()
+ list := strings.Split(newline, ":")
+
+ if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
+ return strings.TrimSpace(list[1]), nil
+ }
+ }
+
+ // Check whether the scanner encountered errors
+ err = scanner.Err()
+ if err != nil {
+ return "", err
+ }
+
+ return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
+}
+
+func getCPUVariantWindows(arch string) string {
+ // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
+ // runtime.GOARCH to determine the variants
+ var variant string
+ switch arch {
+ case "arm64":
+ variant = "v8"
+ case "arm":
+ variant = "v7"
+ default:
+ variant = ""
+ }
+
+ return variant
+}
+
+func getCPUVariantArm() string {
+ variant, err := getCPUInfo("Cpu architecture")
+ if err != nil {
+ return ""
+ }
+ // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286)
+
+ switch strings.ToLower(variant) {
+ case "8", "aarch64":
+ variant = "v8"
+ case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
+ variant = "v7"
+ case "6", "6tej":
+ variant = "v6"
+ case "5", "5t", "5te", "5tej":
+ variant = "v5"
+ case "4", "4t":
+ variant = "v4"
+ case "3":
+ variant = "v3"
+ default:
+ variant = ""
+ }
+
+ return variant
+}
+
+func getCPUVariant(os string, arch string) string {
+ if os == "windows" {
+ return getCPUVariantWindows(arch)
+ }
+ if arch == "arm" || arch == "arm64" {
+ return getCPUVariantArm()
+ }
+ return ""
+}
+
+// compatibility contains, for a specified architecture, a list of known variants, in the
+// order from most capable (most restrictive) to least capable (most compatible).
+// Architectures that don’t have variants should not have an entry here.
+var compatibility = map[string][]string{
+ "arm": {"v8", "v7", "v6", "v5"},
+ "arm64": {"v8"},
+}
+
+// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
+// the most compatible platform is first.
+// If some option (arch, os, variant) is not present, a value from current platform is detected.
+func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
+ // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
+ // The fields are not specified by the OCI specification, as of version 1.1, usefully enough
+ // to be interoperable, anyway.
+
+ wantedArch := runtime.GOARCH
+ wantedVariant := ""
+ if ctx != nil && ctx.ArchitectureChoice != "" {
+ wantedArch = ctx.ArchitectureChoice
+ } else {
+ // Only auto-detect the variant if we are using the default architecture.
+ // If the user has specified the ArchitectureChoice, don't autodetect, even if
+ // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH
+ // value is relevant to the use case, and if we do autodetect a variant,
+ // ctx.VariantChoice can't be used to override it back to "".
+ wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH)
+ }
+ if ctx != nil && ctx.VariantChoice != "" {
+ wantedVariant = ctx.VariantChoice
+ }
+
+ wantedOS := runtime.GOOS
+ if ctx != nil && ctx.OSChoice != "" {
+ wantedOS = ctx.OSChoice
+ }
+
+ var variants []string = nil
+ if wantedVariant != "" {
+ // If the user requested a specific variant, we'll walk down
+ // the list from most to least compatible.
+ if variantOrder := compatibility[wantedArch]; variantOrder != nil {
+ if i := slices.Index(variantOrder, wantedVariant); i != -1 {
+ variants = variantOrder[i:]
+ }
+ }
+ if variants == nil {
+ // user wants a variant which we know nothing about - not even compatibility
+ variants = []string{wantedVariant}
+ }
+ // Make sure to have a candidate with an empty variant as well.
+ variants = append(variants, "")
+ } else {
+ // Make sure to have a candidate with an empty variant as well.
+ variants = append(variants, "")
+ // If available add the entire compatibility matrix for the specific architecture.
+ if possibleVariants, ok := compatibility[wantedArch]; ok {
+ variants = append(variants, possibleVariants...)
+ }
+ }
+
+ res := make([]imgspecv1.Platform, 0, len(variants))
+ for _, v := range variants {
+ res = append(res, imgspecv1.Platform{
+ OS: wantedOS,
+ Architecture: wantedArch,
+ Variant: v,
+ })
+ }
+ return res, nil
+}
+
+// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
+// an item from the return value of WantedPlatforms.
+func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
+ return image.Architecture == wanted.Architecture &&
+ image.OS == wanted.OS &&
+ image.Variant == wanted.Variant
+}
diff --git a/internal/pkg/platform/platform_matcher_test.go b/internal/pkg/platform/platform_matcher_test.go
new file mode 100644
index 0000000..9647a34
--- /dev/null
+++ b/internal/pkg/platform/platform_matcher_test.go
@@ -0,0 +1,61 @@
+package platform
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWantedPlatforms(t *testing.T) {
+ for _, c := range []struct {
+ ctx types.SystemContext
+ expected []imgspecv1.Platform
+ }{
+ { // X86_64 does not have variants
+ types.SystemContext{ArchitectureChoice: "amd64", OSChoice: "linux"},
+ []imgspecv1.Platform{
+ {OS: "linux", Architecture: "amd64", Variant: ""},
+ },
+ },
+ { // ARM with variant
+ types.SystemContext{ArchitectureChoice: "arm", OSChoice: "linux", VariantChoice: "v6"},
+ []imgspecv1.Platform{
+ {OS: "linux", Architecture: "arm", Variant: "v6"},
+ {OS: "linux", Architecture: "arm", Variant: "v5"},
+ {OS: "linux", Architecture: "arm", Variant: ""},
+ },
+ },
+ { // ARM without variant
+ types.SystemContext{ArchitectureChoice: "arm", OSChoice: "linux"},
+ []imgspecv1.Platform{
+ {OS: "linux", Architecture: "arm", Variant: ""},
+ {OS: "linux", Architecture: "arm", Variant: "v8"},
+ {OS: "linux", Architecture: "arm", Variant: "v7"},
+ {OS: "linux", Architecture: "arm", Variant: "v6"},
+ {OS: "linux", Architecture: "arm", Variant: "v5"},
+ },
+ },
+ { // ARM64 has a base variant
+ types.SystemContext{ArchitectureChoice: "arm64", OSChoice: "linux"},
+ []imgspecv1.Platform{
+ {OS: "linux", Architecture: "arm64", Variant: ""},
+ {OS: "linux", Architecture: "arm64", Variant: "v8"},
+ },
+ },
+ { // Custom (completely unrecognized data)
+ types.SystemContext{ArchitectureChoice: "armel", OSChoice: "freeBSD", VariantChoice: "custom"},
+ []imgspecv1.Platform{
+ {OS: "freeBSD", Architecture: "armel", Variant: "custom"},
+ {OS: "freeBSD", Architecture: "armel", Variant: ""},
+ },
+ },
+ } {
+ testName := fmt.Sprintf("%q/%q/%q", c.ctx.ArchitectureChoice, c.ctx.OSChoice, c.ctx.VariantChoice)
+ platforms, err := WantedPlatforms(&c.ctx)
+ assert.Nil(t, err, testName)
+ assert.Equal(t, c.expected, platforms, testName)
+ }
+}
diff --git a/internal/private/private.go b/internal/private/private.go
new file mode 100644
index 0000000..95d561f
--- /dev/null
+++ b/internal/private/private.go
@@ -0,0 +1,164 @@
+package private
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/signature"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// ImageSourceInternalOnly is the part of private.ImageSource that is not
+// a part of types.ImageSource.
+type ImageSourceInternalOnly interface {
+ // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+ SupportsGetBlobAt() bool
+ // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
+ BlobChunkAccessor
+
+ // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error)
+}
+
+// ImageSource is an internal extension to the types.ImageSource interface.
+type ImageSource interface {
+ types.ImageSource
+ ImageSourceInternalOnly
+}
+
+// ImageDestinationInternalOnly is the part of private.ImageDestination that is not
+// a part of types.ImageDestination.
+type ImageDestinationInternalOnly interface {
+ // SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+ SupportsPutBlobPartial() bool
+ // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
+ // on unsupported formats.
+
+ // PutBlobWithOptions writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+ PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
+
+ // PutBlobPartial attempts to create a blob using the data that is already present
+ // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+ // It is available only if SupportsPutBlobPartial().
+ // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+ // should fall back to PutBlobWithOptions.
+ PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
+
+ // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If the blob has been successfully reused, returns (true, info, nil).
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
+
+ // PutSignaturesWithFormat writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
+}
+
+// ImageDestination is an internal extension to the types.ImageDestination
+// interface.
+type ImageDestination interface {
+ types.ImageDestination
+ ImageDestinationInternalOnly
+}
+
+// UploadedBlob is information about a blob written to a destination.
+// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
+type UploadedBlob struct {
+ Digest digest.Digest
+ Size int64
+}
+
+// PutBlobOptions are used in PutBlobWithOptions.
+type PutBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
+ IsConfig bool // True if the blob is a config
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+}
+
+// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
+type TryReusingBlobOptions struct {
+ Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
+ // If true, it is allowed to use an equivalent of the desired blob;
+ // in that case the returned info may not match the input.
+ CanSubstitute bool
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+ // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
+ // if they use internal/imagedestination/impl.Compat;
+ // in that case, they will all be consistently zero-valued.
+ RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
+ OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+ SrcRef reference.Named // A reference to the source image that contains the input blob.
+}
+
+// ReusedBlob is information about a blob reused in a destination.
+// It is the subset of types.BlobInfo fields the transport is responsible for setting.
+type ReusedBlob struct {
+ Digest digest.Digest // Must be provided
+ Size int64 // Must be provided
+ // The following compression fields should be set when the reuse substitutes
+ // a differently-compressed blob.
+ CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
+ CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
+}
+
+// ImageSourceChunk is a portion of a blob.
+// This API is experimental and can be changed without bumping the major version number.
+type ImageSourceChunk struct {
+ Offset uint64
+ Length uint64
+}
+
+// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
+type BlobChunkAccessor interface {
+ // GetBlobAt returns a sequential channel of readers that contain data for the requested
+ // blob chunks, and a channel that might get a single error value.
+ // The specified chunks must be not overlapping and sorted by their offset.
+ // The readers must be fully consumed, in the order they are returned, before blocking
+ // to read the next chunk.
+ GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
+}
+
+// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
+type BadPartialRequestError struct {
+ Status string
+}
+
+func (e BadPartialRequestError) Error() string {
+ return e.Status
+}
+
+// UnparsedImage is an internal extension to the types.UnparsedImage interface.
+type UnparsedImage interface {
+ types.UnparsedImage
+ // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+ UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
+}
diff --git a/internal/putblobdigest/put_blob_digest.go b/internal/putblobdigest/put_blob_digest.go
new file mode 100644
index 0000000..b8d3a7e
--- /dev/null
+++ b/internal/putblobdigest/put_blob_digest.go
@@ -0,0 +1,57 @@
+package putblobdigest
+
+import (
+ "io"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Digester computes a digest of the provided stream, if not known yet.
+type Digester struct {
+ knownDigest digest.Digest // Or ""
+ digester digest.Digester // Or nil
+}
+
+// newDigester initiates computation of a digest.Canonical digest of stream,
+// if !validDigest; otherwise it just records knownDigest to be returned later.
+// The caller MUST use the returned stream instead of the original value.
+func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) {
+ if validDigest {
+ return Digester{knownDigest: knownDigest}, stream
+ } else {
+ res := Digester{
+ digester: digest.Canonical.Digester(),
+ }
+ stream = io.TeeReader(stream, res.digester.Hash())
+ return res, stream
+ }
+}
+
+// DigestIfUnknown initiates computation of a digest.Canonical digest of stream,
+// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will
+// be used (accepting any algorithm).
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "")
+}
+
+// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream,
+// if a digest.Canonical digest is not supplied in the provided blobInfo;
+// otherwise blobInfo.Digest will be used.
+// The caller MUST use the returned stream instead of the original value.
+func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) {
+ d := blobInfo.Digest
+ return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical)
+}
+
+// Digest() returns a digest value possibly computed by Digester.
+// This must be called only after all of the stream returned by a Digester constructor
+// has been successfully read.
+func (d Digester) Digest() digest.Digest {
+ if d.digester != nil {
+ return d.digester.Digest()
+ }
+ return d.knownDigest
+}
diff --git a/internal/putblobdigest/put_blob_digest_test.go b/internal/putblobdigest/put_blob_digest_test.go
new file mode 100644
index 0000000..eb8ebbc
--- /dev/null
+++ b/internal/putblobdigest/put_blob_digest_test.go
@@ -0,0 +1,74 @@
+package putblobdigest
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var testData = []byte("test data")
+
+type testCase struct {
+ inputDigest digest.Digest
+ computesDigest bool
+ expectedDigest digest.Digest
+}
+
+func testDigester(t *testing.T, constructor func(io.Reader, types.BlobInfo) (Digester, io.Reader),
+ cases []testCase) {
+ for _, c := range cases {
+ stream := bytes.NewReader(testData)
+ digester, newStream := constructor(stream, types.BlobInfo{Digest: c.inputDigest})
+ assert.Equal(t, c.computesDigest, newStream != stream, c.inputDigest)
+ data, err := io.ReadAll(newStream)
+ require.NoError(t, err, c.inputDigest)
+ assert.Equal(t, testData, data, c.inputDigest)
+ digest := digester.Digest()
+ assert.Equal(t, c.expectedDigest, digest, c.inputDigest)
+ }
+}
+
+func TestDigestIfUnknown(t *testing.T) {
+ testDigester(t, DigestIfUnknown, []testCase{
+ {
+ inputDigest: digest.Digest("sha256:uninspected-value"),
+ computesDigest: false,
+ expectedDigest: digest.Digest("sha256:uninspected-value"),
+ },
+ {
+ inputDigest: digest.Digest("unknown-algorithm:uninspected-value"),
+ computesDigest: false,
+ expectedDigest: digest.Digest("unknown-algorithm:uninspected-value"),
+ },
+ {
+ inputDigest: "",
+ computesDigest: true,
+ expectedDigest: digest.Canonical.FromBytes(testData),
+ },
+ })
+}
+
+func TestDigestIfCanonicalUnknown(t *testing.T) {
+ testDigester(t, DigestIfCanonicalUnknown, []testCase{
+ {
+ inputDigest: digest.Digest("sha256:uninspected-value"),
+ computesDigest: false,
+ expectedDigest: digest.Digest("sha256:uninspected-value"),
+ },
+ {
+ inputDigest: digest.Digest("unknown-algorithm:uninspected-value"),
+ computesDigest: true,
+ expectedDigest: digest.Canonical.FromBytes(testData),
+ },
+ {
+ inputDigest: "",
+ computesDigest: true,
+ expectedDigest: digest.Canonical.FromBytes(testData),
+ },
+ })
+}
diff --git a/internal/rootless/rootless.go b/internal/rootless/rootless.go
new file mode 100644
index 0000000..80623bf
--- /dev/null
+++ b/internal/rootless/rootless.go
@@ -0,0 +1,25 @@
+package rootless
+
+import (
+ "os"
+ "strconv"
+)
+
+// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any)
+//
+// Podman and similar software, in “rootless” configuration, when run as a non-root
+// user, very early switches to a user namespace, where Geteuid() == 0 (but does not
+// switch to a limited mount namespace); so, code relying on Geteuid() would use
+// system-wide paths in e.g. /var, when the user is actually not privileged to write to
+// them, and expects state to be stored in the home directory.
+//
+// If Podman is setting up such a user namespace, it records the original UID in an
+// environment variable, allowing us to make choices based on the actual user’s identity.
+func GetRootlessEUID() int {
+ euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
+ if euidEnv != "" {
+ euid, _ := strconv.Atoi(euidEnv)
+ return euid
+ }
+ return os.Geteuid()
+}
diff --git a/internal/set/set.go b/internal/set/set.go
new file mode 100644
index 0000000..acf3034
--- /dev/null
+++ b/internal/set/set.go
@@ -0,0 +1,52 @@
+package set
+
+import "golang.org/x/exp/maps"
+
+// FIXME:
+// - Docstrings
+// - This should be in a public library somewhere
+
+type Set[E comparable] struct {
+ m map[E]struct{}
+}
+
+func New[E comparable]() *Set[E] {
+ return &Set[E]{
+ m: map[E]struct{}{},
+ }
+}
+
+func NewWithValues[E comparable](values ...E) *Set[E] {
+ s := New[E]()
+ for _, v := range values {
+ s.Add(v)
+ }
+ return s
+}
+
+func (s *Set[E]) Add(v E) {
+ s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+func (s *Set[E]) AddSlice(slice []E) {
+ for _, v := range slice {
+ s.Add(v)
+ }
+}
+
+func (s *Set[E]) Delete(v E) {
+ delete(s.m, v)
+}
+
+func (s *Set[E]) Contains(v E) bool {
+ _, ok := s.m[v]
+ return ok
+}
+
+func (s *Set[E]) Empty() bool {
+ return len(s.m) == 0
+}
+
+func (s *Set[E]) Values() []E {
+ return maps.Keys(s.m)
+}
diff --git a/internal/set/set_test.go b/internal/set/set_test.go
new file mode 100644
index 0000000..3e704a9
--- /dev/null
+++ b/internal/set/set_test.go
@@ -0,0 +1,77 @@
+package set
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNew(t *testing.T) {
+ s := New[int]()
+ assert.True(t, s.Empty())
+}
+
+func TestNewWithValues(t *testing.T) {
+ s := NewWithValues(1, 3)
+ assert.True(t, s.Contains(1))
+ assert.False(t, s.Contains(2))
+ assert.True(t, s.Contains(3))
+}
+
+func TestAdd(t *testing.T) {
+ s := NewWithValues(1)
+ assert.False(t, s.Contains(2))
+ s.Add(2)
+ assert.True(t, s.Contains(2))
+ s.Add(2) // Adding an already-present element
+ assert.True(t, s.Contains(2))
+ // should not contain duplicate value of `2`
+ assert.ElementsMatch(t, []int{1, 2}, s.Values())
+ // Unrelated elements are unaffected
+ assert.True(t, s.Contains(1))
+ assert.False(t, s.Contains(3))
+}
+
+func TestAddSlice(t *testing.T) {
+ s := NewWithValues(1)
+ s.Add(2)
+ s.AddSlice([]int{3, 4})
+ assert.ElementsMatch(t, []int{1, 2, 3, 4}, s.Values())
+}
+
+func TestDelete(t *testing.T) {
+ s := NewWithValues(1, 2)
+ assert.True(t, s.Contains(2))
+ s.Delete(2)
+ assert.False(t, s.Contains(2))
+ s.Delete(2) // Deleting a missing element
+ assert.False(t, s.Contains(2))
+ // Unrelated elements are unaffected
+ assert.True(t, s.Contains(1))
+}
+
+func TestContains(t *testing.T) {
+ s := NewWithValues(1, 2)
+ assert.True(t, s.Contains(1))
+ assert.True(t, s.Contains(2))
+ assert.False(t, s.Contains(3))
+}
+
+func TestEmpty(t *testing.T) {
+ s := New[int]()
+ assert.True(t, s.Empty())
+ s.Add(1)
+ assert.False(t, s.Empty())
+ s.Delete(1)
+ assert.True(t, s.Empty())
+}
+
+func TestValues(t *testing.T) {
+ s := New[int]()
+ assert.Empty(t, s.Values())
+ s.Add(1)
+ s.Add(2)
+ // ignore duplicate
+ s.Add(2)
+ assert.ElementsMatch(t, []int{1, 2}, s.Values())
+}
diff --git a/internal/signature/signature.go b/internal/signature/signature.go
new file mode 100644
index 0000000..6f95115
--- /dev/null
+++ b/internal/signature/signature.go
@@ -0,0 +1,102 @@
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// FIXME FIXME: MIME type? Int? String?
+// An interface with a name, parse methods?
+type FormatID string
+
+const (
+ SimpleSigningFormat FormatID = "simple-signing"
+ SigstoreFormat FormatID = "sigstore-json"
+ // Update also UnsupportedFormatError below
+)
+
+// Signature is an image signature of some kind.
+type Signature interface {
+ FormatID() FormatID
+ // blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+ // Almost everyone should use signature.Blob() instead.
+ blobChunk() ([]byte, error)
+}
+
+// Blob returns a representation of sig as a []byte, suitable for long-term storage.
+func Blob(sig Signature) ([]byte, error) {
+ chunk, err := sig.blobChunk()
+ if err != nil {
+ return nil, err
+ }
+
+ format := sig.FormatID()
+ switch format {
+ case SimpleSigningFormat:
+ // For compatibility with old dir formats:
+ return chunk, nil
+ default:
+ res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text.
+ res = append(res, []byte(format)...)
+ res = append(res, '\n')
+ res = append(res, chunk...)
+ return res, nil
+ }
+}
+
+// FromBlob returns a signature from parsing a blob created by signature.Blob.
+func FromBlob(blob []byte) (Signature, error) {
+ if len(blob) == 0 {
+ return nil, errors.New("empty signature blob")
+ }
+ // Historically we’ve just been using GPG with no identification; try to auto-detect that.
+ switch blob[0] {
+ // OpenPGP "compressed data" wrapping the message
+ case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any)
+ 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet)
+ // OpenPGP “one-pass signature” starting a signature
+ 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any)
+ 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet)
+ // OpenPGP signature packet signing the following data
+ 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any)
+ 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet)
+ return SimpleSigningFromBlob(blob), nil
+
+ // The newer format: binary 0, format name, newline, data
+ case 0x00:
+ blob = blob[1:]
+ formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
+ if !foundNewline {
+ return nil, fmt.Errorf("invalid signature format, missing newline")
+ }
+ for _, b := range formatBytes {
+ if b < 32 || b >= 0x7F {
+ return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
+ }
+ }
+ switch {
+ case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
+ return SimpleSigningFromBlob(blobChunk), nil
+ case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
+ return sigstoreFromBlobChunk(blobChunk)
+ default:
+ return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
+ }
+
+ default:
+ return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0])
+ }
+
+}
+
+// UnsupportedFormatError returns an error complaining about sig having an unsupported format.
+func UnsupportedFormatError(sig Signature) error {
+ formatID := sig.FormatID()
+ switch formatID {
+ case SimpleSigningFormat, SigstoreFormat:
+ return fmt.Errorf("unsupported signature format %s", string(formatID))
+ default:
+ return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
+ }
+}
diff --git a/internal/signature/signature_test.go b/internal/signature/signature_test.go
new file mode 100644
index 0000000..924c32d
--- /dev/null
+++ b/internal/signature/signature_test.go
@@ -0,0 +1,94 @@
+package signature
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBlobSimpleSigning(t *testing.T) {
+ simpleSigData, err := os.ReadFile("testdata/simple.signature")
+ require.NoError(t, err)
+ simpleSig := SimpleSigningFromBlob(simpleSigData)
+
+ simpleBlob, err := Blob(simpleSig)
+ require.NoError(t, err)
+ assert.Equal(t, simpleSigData, simpleBlob)
+
+ fromBlob, err := FromBlob(simpleBlob)
+ require.NoError(t, err)
+ fromBlobSimple, ok := fromBlob.(SimpleSigning)
+ require.True(t, ok)
+ assert.Equal(t, simpleSigData, fromBlobSimple.UntrustedSignature())
+
+ // Using the newer format is accepted as well.
+ fromBlob, err = FromBlob(append([]byte("\x00simple-signing\n"), simpleSigData...))
+ require.NoError(t, err)
+ fromBlobSimple, ok = fromBlob.(SimpleSigning)
+ require.True(t, ok)
+ assert.Equal(t, simpleSigData, fromBlobSimple.UntrustedSignature())
+
+}
+
+func TestBlobSigstore(t *testing.T) {
+ sigstoreSig := SigstoreFromComponents("mime-type", []byte("payload"),
+ map[string]string{"a": "b", "c": "d"})
+
+ sigstoreBlob, err := Blob(sigstoreSig)
+ require.NoError(t, err)
+ assert.True(t, bytes.HasPrefix(sigstoreBlob, []byte("\x00sigstore-json\n{")))
+
+ fromBlob, err := FromBlob(sigstoreBlob)
+ require.NoError(t, err)
+ fromBlobSigstore, ok := fromBlob.(Sigstore)
+ require.True(t, ok)
+ assert.Equal(t, sigstoreSig.UntrustedMIMEType(), fromBlobSigstore.UntrustedMIMEType())
+ assert.Equal(t, sigstoreSig.UntrustedPayload(), fromBlobSigstore.UntrustedPayload())
+ assert.Equal(t, sigstoreSig.UntrustedAnnotations(), fromBlobSigstore.UntrustedAnnotations())
+}
+
+func TestFromBlobInvalid(t *testing.T) {
+ // Round-tripping valid data has been tested in TestBlobSimpleSigning and TestBlobSigstore above.
+ for _, c := range []string{
+ "", // Empty
+ "\xFFsimple-signing\nhello", // Invalid first byte
+ "\x00simple-signing", // No newline
+ "\x00format\xFFname\ndata", // Non-ASCII format value
+ "\x00unknown-format\ndata", // Unknown format
+ } {
+ _, err := FromBlob([]byte(c))
+ assert.Error(t, err, fmt.Sprintf("%#v", c))
+ }
+}
+
+// mockFormatSignature returns a specified format
+type mockFormatSignature struct {
+ fmt FormatID
+}
+
+func (ms mockFormatSignature) FormatID() FormatID {
+ return ms.fmt
+}
+
+func (ms mockFormatSignature) blobChunk() ([]byte, error) {
+ panic("Unexpected call to a mock function")
+}
+
+func TestUnsuportedFormatError(t *testing.T) {
+ // Warning: The exact text returned by the function is not an API commitment.
+ for _, c := range []struct {
+ input Signature
+ expected string
+ }{
+ {SimpleSigningFromBlob(nil), "unsupported signature format simple-signing"},
+ {SigstoreFromComponents("mime-type", nil, nil), "unsupported signature format sigstore-json"},
+ {mockFormatSignature{FormatID("invalid")}, `unsupported, and unrecognized, signature format "invalid"`},
+ } {
+ res := UnsupportedFormatError(c.input)
+ assert.Equal(t, c.expected, res.Error(), string(c.input.FormatID()))
+ }
+}
diff --git a/internal/signature/sigstore.go b/internal/signature/sigstore.go
new file mode 100644
index 0000000..b8a9b36
--- /dev/null
+++ b/internal/signature/sigstore.go
@@ -0,0 +1,87 @@
+package signature
+
+import (
+ "encoding/json"
+
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ // from sigstore/cosign/pkg/types.SimpleSigningMediaType
+ SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
+ // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
+ SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
+ // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey
+ SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle"
+ // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey
+ SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate"
+ // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey
+ SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain"
+)
+
+// Sigstore is a github.com/cosign/cosign signature.
+// For the persistent-storage format used for blobChunk(), we want
+// a degree of forward compatibility against unexpected field changes
+// (as has happened before), which is why this data type
+// contains just a payload + annotations (including annotations
+// that we don’t recognize or support), instead of individual fields
+// for the known annotations.
+type Sigstore struct {
+ untrustedMIMEType string
+ untrustedPayload []byte
+ untrustedAnnotations map[string]string
+}
+
+// sigstoreJSONRepresentation needs the files to be public, which we don’t want for
+// the main Sigstore type.
+type sigstoreJSONRepresentation struct {
+ UntrustedMIMEType string `json:"mimeType"`
+ UntrustedPayload []byte `json:"payload"`
+ UntrustedAnnotations map[string]string `json:"annotations"`
+}
+
+// SigstoreFromComponents returns a Sigstore object from its components.
+func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
+ return Sigstore{
+ untrustedMIMEType: untrustedMimeType,
+ untrustedPayload: slices.Clone(untrustedPayload),
+ untrustedAnnotations: maps.Clone(untrustedAnnotations),
+ }
+}
+
+// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
+func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
+ var v sigstoreJSONRepresentation
+ if err := json.Unmarshal(blobChunk, &v); err != nil {
+ return Sigstore{}, err
+ }
+ return SigstoreFromComponents(v.UntrustedMIMEType,
+ v.UntrustedPayload,
+ v.UntrustedAnnotations), nil
+}
+
+func (s Sigstore) FormatID() FormatID {
+ return SigstoreFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s Sigstore) blobChunk() ([]byte, error) {
+ return json.Marshal(sigstoreJSONRepresentation{
+ UntrustedMIMEType: s.UntrustedMIMEType(),
+ UntrustedPayload: s.UntrustedPayload(),
+ UntrustedAnnotations: s.UntrustedAnnotations(),
+ })
+}
+
+func (s Sigstore) UntrustedMIMEType() string {
+ return s.untrustedMIMEType
+}
+func (s Sigstore) UntrustedPayload() []byte {
+ return slices.Clone(s.untrustedPayload)
+}
+
+func (s Sigstore) UntrustedAnnotations() map[string]string {
+ return maps.Clone(s.untrustedAnnotations)
+}
diff --git a/internal/signature/sigstore_test.go b/internal/signature/sigstore_test.go
new file mode 100644
index 0000000..c1d3c9b
--- /dev/null
+++ b/internal/signature/sigstore_test.go
@@ -0,0 +1,71 @@
+package signature
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSigstoreFromComponents(t *testing.T) {
+ const mimeType = "mime-type"
+ payload := []byte("payload")
+ annotations := map[string]string{"a": "b", "c": "d"}
+
+ sig := SigstoreFromComponents(mimeType, payload, annotations)
+ assert.Equal(t, Sigstore{
+ untrustedMIMEType: mimeType,
+ untrustedPayload: payload,
+ untrustedAnnotations: annotations,
+ }, sig)
+}
+
+func TestSigstoreFromBlobChunk(t *testing.T) {
+ // Success
+ json := []byte(`{"mimeType":"mime-type","payload":"cGF5bG9hZA==", "annotations":{"a":"b","c":"d"}}`)
+ res, err := sigstoreFromBlobChunk(json)
+ require.NoError(t, err)
+ assert.Equal(t, "mime-type", res.UntrustedMIMEType())
+ assert.Equal(t, []byte("payload"), res.UntrustedPayload())
+ assert.Equal(t, map[string]string{"a": "b", "c": "d"}, res.UntrustedAnnotations())
+
+ // Invalid JSON
+ _, err = sigstoreFromBlobChunk([]byte("&"))
+ assert.Error(t, err)
+}
+
+func TestSigstoreFormatID(t *testing.T) {
+ sig := SigstoreFromComponents("mime-type", []byte("payload"),
+ map[string]string{"a": "b", "c": "d"})
+ assert.Equal(t, SigstoreFormat, sig.FormatID())
+}
+
+func TestSigstoreBlobChunk(t *testing.T) {
+ sig := SigstoreFromComponents("mime-type", []byte("payload"),
+ map[string]string{"a": "b", "c": "d"})
+ res, err := sig.blobChunk()
+ require.NoError(t, err)
+
+ expectedJSON := []byte(`{"mimeType":"mime-type","payload":"cGF5bG9hZA==", "annotations":{"a":"b","c":"d"}}`)
+ // Don’t directly compare the JSON representation so that we don’t test for formatting differences, just verify that it contains exactly the expected data.
+ var raw, expectedRaw map[string]any
+ err = json.Unmarshal(res, &raw)
+ require.NoError(t, err)
+ err = json.Unmarshal(expectedJSON, &expectedRaw)
+ require.NoError(t, err)
+ assert.Equal(t, expectedRaw, raw)
+}
+
+func TestSigstoreUntrustedPayload(t *testing.T) {
+ var payload = []byte("payload")
+ sig := SigstoreFromComponents("mime-type", payload,
+ map[string]string{"a": "b", "c": "d"})
+ assert.Equal(t, payload, sig.UntrustedPayload())
+}
+
+func TestSigstoreUntrustedAnnotations(t *testing.T) {
+ annotations := map[string]string{"a": "b", "c": "d"}
+ sig := SigstoreFromComponents("mime-type", []byte("payload"), annotations)
+ assert.Equal(t, annotations, sig.UntrustedAnnotations())
+}
diff --git a/internal/signature/simple.go b/internal/signature/simple.go
new file mode 100644
index 0000000..c093704
--- /dev/null
+++ b/internal/signature/simple.go
@@ -0,0 +1,29 @@
+package signature
+
+import "golang.org/x/exp/slices"
+
+// SimpleSigning is a “simple signing” signature.
+type SimpleSigning struct {
+ untrustedSignature []byte
+}
+
+// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
+func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
+ return SimpleSigning{
+ untrustedSignature: slices.Clone(blobChunk),
+ }
+}
+
+func (s SimpleSigning) FormatID() FormatID {
+ return SimpleSigningFormat
+}
+
+// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
+// Almost everyone should use signature.Blob() instead.
+func (s SimpleSigning) blobChunk() ([]byte, error) {
+ return slices.Clone(s.untrustedSignature), nil
+}
+
+func (s SimpleSigning) UntrustedSignature() []byte {
+ return slices.Clone(s.untrustedSignature)
+}
diff --git a/internal/signature/simple_test.go b/internal/signature/simple_test.go
new file mode 100644
index 0000000..76537e2
--- /dev/null
+++ b/internal/signature/simple_test.go
@@ -0,0 +1,36 @@
+package signature
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimpleSigningFromBlob(t *testing.T) {
+ var data = []byte("some contents")
+
+ sig := SimpleSigningFromBlob(data)
+ assert.Equal(t, SimpleSigning{untrustedSignature: data}, sig)
+}
+
+func TestSimpleSigningFormatID(t *testing.T) {
+ sig := SimpleSigningFromBlob([]byte("some contents"))
+ assert.Equal(t, SimpleSigningFormat, sig.FormatID())
+}
+
+func TestSimpleSigningBlobChunk(t *testing.T) {
+ var data = []byte("some contents")
+
+ sig := SimpleSigningFromBlob(data)
+ chunk, err := sig.blobChunk()
+ require.NoError(t, err)
+ assert.Equal(t, data, chunk)
+}
+
+func TestSimpleSigningUntrustedSignature(t *testing.T) {
+ var data = []byte("some contents")
+
+ sig := SimpleSigningFromBlob(data)
+ assert.Equal(t, data, sig.UntrustedSignature())
+}
diff --git a/internal/signature/testdata/simple.signature b/internal/signature/testdata/simple.signature
new file mode 120000
index 0000000..dae8bd5
--- /dev/null
+++ b/internal/signature/testdata/simple.signature
@@ -0,0 +1 @@
+../../../signature/fixtures/image.signature \ No newline at end of file
diff --git a/internal/signer/signer.go b/internal/signer/signer.go
new file mode 100644
index 0000000..5720254
--- /dev/null
+++ b/internal/signer/signer.go
@@ -0,0 +1,47 @@
+package signer
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+)
+
+// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// This type is visible to external callers, so it has no public fields or methods apart from Close().
+//
+// The owner of a Signer must call Close() when done.
+type Signer struct {
+ implementation SignerImplementation
+}
+
+// NewSigner creates a public Signer from a SignerImplementation
+func NewSigner(impl SignerImplementation) *Signer {
+ return &Signer{implementation: impl}
+}
+
+func (s *Signer) Close() error {
+ return s.implementation.Close()
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+// Alternatively, should SignImageManifest be provided a logging writer of some kind?
+func ProgressMessage(signer *Signer) string {
+ return signer.implementation.ProgressMessage()
+}
+
+// SignImageManifest invokes a SignerImplementation.
+// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage.
+func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) {
+ return signer.implementation.SignImageManifest(ctx, manifest, dockerReference)
+}
+
+// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// This interface is distinct from Signer so that implementations can be created outside of this package.
+type SignerImplementation interface {
+ // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+ ProgressMessage() string
+ // SignImageManifest creates a new signature for manifest m as dockerReference.
+ SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error)
+ Close() error
+}
diff --git a/internal/signer/signer_test.go b/internal/signer/signer_test.go
new file mode 100644
index 0000000..eaa18cf
--- /dev/null
+++ b/internal/signer/signer_test.go
@@ -0,0 +1,87 @@
+package signer
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// mockSignerImplementation is a SignerImplementation used only for tests.
+type mockSignerImplementation struct {
+ progressMessage func() string
+ signImageManifest func(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error)
+ close func() error
+}
+
+func (ms *mockSignerImplementation) Close() error {
+ return ms.close()
+}
+
+func (ms *mockSignerImplementation) ProgressMessage() string {
+ return ms.progressMessage()
+}
+
+func (ms *mockSignerImplementation) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
+ return ms.signImageManifest(ctx, m, dockerReference)
+}
+
+func TestNewSigner(t *testing.T) {
+ closeError := errors.New("unique error")
+
+ si := mockSignerImplementation{
+ // Other functions are nil, so this ensures they are not called.
+ close: func() error { return closeError },
+ }
+ s := NewSigner(&si)
+ // Verify SignerImplementation methods are not visible even to determined callers
+ _, visible := any(s).(SignerImplementation)
+ assert.False(t, visible)
+ err := s.Close()
+ assert.Equal(t, closeError, err)
+}
+
+func TestProgressMessage(t *testing.T) {
+ si := mockSignerImplementation{
+ // Other functions are nil, so this ensures they are not called.
+ close: func() error { return nil },
+ }
+ s := NewSigner(&si)
+ defer s.Close()
+
+ const testMessage = "some unique string"
+ si.progressMessage = func() string {
+ return testMessage
+ }
+ message := ProgressMessage(s)
+ assert.Equal(t, testMessage, message)
+}
+
+func TestSignImageManifest(t *testing.T) {
+ si := mockSignerImplementation{
+ // Other functions are nil, so this ensures they are not called.
+ close: func() error { return nil },
+ }
+ s := NewSigner(&si)
+ defer s.Close()
+
+ testManifest := []byte("some manifest")
+ testDR, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ testContext := context.WithValue(context.Background(), struct{}{}, "make this context unique")
+ testSig := signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, []byte("payload"), nil)
+ testErr := errors.New("some unique error")
+ si.signImageManifest = func(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
+ assert.Equal(t, testContext, ctx)
+ assert.Equal(t, testManifest, m)
+ assert.Equal(t, testDR, dockerReference)
+ return testSig, testErr
+ }
+ sig, err := SignImageManifest(testContext, s, testManifest, testDR)
+ assert.Equal(t, testSig, sig)
+ assert.Equal(t, testErr, err)
+}
diff --git a/internal/streamdigest/fixtures/Hello.uncompressed b/internal/streamdigest/fixtures/Hello.uncompressed
new file mode 100644
index 0000000..5ab2f8a
--- /dev/null
+++ b/internal/streamdigest/fixtures/Hello.uncompressed
@@ -0,0 +1 @@
+Hello \ No newline at end of file
diff --git a/internal/streamdigest/stream_digest.go b/internal/streamdigest/stream_digest.go
new file mode 100644
index 0000000..d5a5436
--- /dev/null
+++ b/internal/streamdigest/stream_digest.go
@@ -0,0 +1,40 @@
+package streamdigest
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+)
+
+// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo.
+// The temporary file is returned as an io.Reader along with a cleanup function.
+// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
+// If an error occurs, inputInfo is not modified.
+func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
+ diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob")
+ if err != nil {
+ return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
+ }
+ cleanup := func() {
+ diskBlob.Close()
+ os.Remove(diskBlob.Name())
+ }
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
+ written, err := io.Copy(diskBlob, stream)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err)
+ }
+ _, err = diskBlob.Seek(0, io.SeekStart)
+ if err != nil {
+ cleanup()
+ return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err)
+ }
+ inputInfo.Digest = digester.Digest()
+ inputInfo.Size = written
+ return diskBlob, cleanup, nil
+}
diff --git a/internal/streamdigest/stream_digest_test.go b/internal/streamdigest/stream_digest_test.go
new file mode 100644
index 0000000..b0a1327
--- /dev/null
+++ b/internal/streamdigest/stream_digest_test.go
@@ -0,0 +1,36 @@
+package streamdigest
+
+import (
+ "io"
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestComputeBlobInfo(t *testing.T) {
+ inputInfo := types.BlobInfo{Digest: "", Size: -1}
+ fixtureFname := "fixtures/Hello.uncompressed"
+ fixtureInfo := types.BlobInfo{Digest: "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", Size: 5}
+ fixtureBytes := []byte("Hello")
+
+ // open fixture
+ stream, err := os.Open(fixtureFname)
+ require.NoError(t, err, fixtureFname)
+ defer stream.Close()
+
+ // fill in Digest and Size for inputInfo
+ streamCopy, cleanup, err := ComputeBlobInfo(nil, stream, &inputInfo)
+ require.NoError(t, err)
+ defer cleanup()
+
+ // ensure inputInfo has been filled in with Digest and Size of fixture
+ assert.Equal(t, inputInfo, fixtureInfo)
+
+ // ensure streamCopy is the same as fixture
+ b, err := io.ReadAll(streamCopy)
+ require.NoError(t, err)
+ assert.Equal(t, b, fixtureBytes)
+}
diff --git a/internal/testing/explicitfilepath-tmpdir/tmpdir.go b/internal/testing/explicitfilepath-tmpdir/tmpdir.go
new file mode 100644
index 0000000..a47ada4
--- /dev/null
+++ b/internal/testing/explicitfilepath-tmpdir/tmpdir.go
@@ -0,0 +1,29 @@
+// Package tmpdir is a TESTING-ONLY utility.
+//
+// Some tests directly or indirectly exercising the directory/explicitfilepath
+// subpackage expect the path returned by os.MkdirTemp to be canonical in the
+// directory/explicitfilepath sense (absolute, no symlinks, cleaned up).
+//
+// os.MkdirTemp uses $TMPDIR by default, and on macOS, $TMPDIR is by
+// default set to /var/folders/…, with /var a symlink to /private/var ,
+// which does not match our expectations. So, tests which want to use
+// os.MkdirTemp that way, can
+// import _ "github.com/containers/image/internal/testing/explicitfilepath-tmpdir"
+// to ensure that $TMPDIR is canonical and usable as a base for testing
+// path canonicalization in its subdirectories.
+//
+// NEVER use this in non-testing subpackages!
+package tmpdir
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func init() {
+ tmpDir := os.TempDir()
+ explicitTmpDir, err := filepath.EvalSymlinks(tmpDir)
+ if err == nil {
+ os.Setenv("TMPDIR", explicitTmpDir)
+ }
+}
diff --git a/internal/testing/gpgagent/gpg_agent.go b/internal/testing/gpgagent/gpg_agent.go
new file mode 100644
index 0000000..148b455
--- /dev/null
+++ b/internal/testing/gpgagent/gpg_agent.go
@@ -0,0 +1,16 @@
+package gpgagent
+
+import (
+ "os"
+ "os/exec"
+
+ "golang.org/x/exp/slices"
+)
+
+// Kill the running gpg-agent to drop unlocked keys.
+// This is useful to ensure tests don’t leave processes around (in TestMain), or for testing handling of invalid passphrases.
+func KillGPGAgent(gpgHomeDir string) error {
+ cmd := exec.Command("gpgconf", "--kill", "gpg-agent")
+ cmd.Env = append(slices.Clone(os.Environ()), "GNUPGHOME="+gpgHomeDir)
+ return cmd.Run()
+}
diff --git a/internal/testing/mocks/image_reference.go b/internal/testing/mocks/image_reference.go
new file mode 100644
index 0000000..bf01d00
--- /dev/null
+++ b/internal/testing/mocks/image_reference.go
@@ -0,0 +1,56 @@
+package mocks
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+)
+
+// ForbiddenImageReference is used when we don’t expect the ImageReference to be used in our tests.
+type ForbiddenImageReference struct{}
+
+// Transport is a mock that panics.
+func (ref ForbiddenImageReference) Transport() types.ImageTransport {
+ panic("unexpected call to a mock function")
+}
+
+// StringWithinTransport is a mock that panics.
+func (ref ForbiddenImageReference) StringWithinTransport() string {
+ panic("unexpected call to a mock function")
+}
+
+// DockerReference is a mock that panics.
+func (ref ForbiddenImageReference) DockerReference() reference.Named {
+ panic("unexpected call to a mock function")
+}
+
+// PolicyConfigurationIdentity is a mock that panics.
+func (ref ForbiddenImageReference) PolicyConfigurationIdentity() string {
+ panic("unexpected call to a mock function")
+}
+
+// PolicyConfigurationNamespaces is a mock that panics.
+func (ref ForbiddenImageReference) PolicyConfigurationNamespaces() []string {
+ panic("unexpected call to a mock function")
+}
+
+// NewImage is a mock that panics.
+func (ref ForbiddenImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ panic("unexpected call to a mock function")
+}
+
+// NewImageSource is a mock that panics.
+func (ref ForbiddenImageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ panic("unexpected call to a mock function")
+}
+
+// NewImageDestination is a mock that panics.
+func (ref ForbiddenImageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ panic("unexpected call to a mock function")
+}
+
+// DeleteImage is a mock that panics.
+func (ref ForbiddenImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ panic("unexpected call to a mock function")
+}
diff --git a/internal/testing/mocks/image_source.go b/internal/testing/mocks/image_source.go
new file mode 100644
index 0000000..754f5f1
--- /dev/null
+++ b/internal/testing/mocks/image_source.go
@@ -0,0 +1,47 @@
+package mocks
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// ForbiddenImageSource is used when we don't expect the ImageSource to be used in our tests.
+type ForbiddenImageSource struct{}
+
+// Reference is a mock that panics.
+func (f ForbiddenImageSource) Reference() types.ImageReference {
+ panic("Unexpected call to a mock function")
+}
+
+// Close is a mock that panics.
+func (f ForbiddenImageSource) Close() error {
+ panic("Unexpected call to a mock function")
+}
+
+// GetManifest is a mock that panics.
+func (f ForbiddenImageSource) GetManifest(context.Context, *digest.Digest) ([]byte, string, error) {
+ panic("Unexpected call to a mock function")
+}
+
+// GetBlob is a mock that panics.
+func (f ForbiddenImageSource) GetBlob(context.Context, types.BlobInfo, types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ panic("Unexpected call to a mock function")
+}
+
+// HasThreadSafeGetBlob is a mock that panics.
+func (f ForbiddenImageSource) HasThreadSafeGetBlob() bool {
+ panic("Unexpected call to a mock function")
+}
+
+// GetSignatures is a mock that panics.
+func (f ForbiddenImageSource) GetSignatures(context.Context, *digest.Digest) ([][]byte, error) {
+ panic("Unexpected call to a mock function")
+}
+
+// LayerInfosForCopy is a mock that panics.
+func (f ForbiddenImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
+ panic("Unexpected call to a mock function")
+}
diff --git a/internal/testing/mocks/image_transport.go b/internal/testing/mocks/image_transport.go
new file mode 100644
index 0000000..c551949
--- /dev/null
+++ b/internal/testing/mocks/image_transport.go
@@ -0,0 +1,24 @@
+package mocks
+
+import "github.com/containers/image/v5/types"
+
+// NameImageTransport is a mock of types.ImageTransport which returns itself in Name.
+type NameImageTransport string
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (name NameImageTransport) Name() string {
+ return string(name)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (name NameImageTransport) ParseReference(reference string) (types.ImageReference, error) {
+ panic("unexpected call to a mock function")
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (name NameImageTransport) ValidatePolicyConfigurationScope(scope string) error {
+ panic("unexpected call to a mock function")
+}
diff --git a/internal/testing/mocks/unparsed_image.go b/internal/testing/mocks/unparsed_image.go
new file mode 100644
index 0000000..a2e2f84
--- /dev/null
+++ b/internal/testing/mocks/unparsed_image.go
@@ -0,0 +1,31 @@
+package mocks
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// ForbiddenUnparsedImage is used when we don't expect the UnparsedImage to be used in our tests.
+type ForbiddenUnparsedImage struct{}
+
+// Reference is a mock that panics.
+func (ref ForbiddenUnparsedImage) Reference() types.ImageReference {
+ panic("unexpected call to a mock function")
+}
+
+// Manifest is a mock that panics.
+func (ref ForbiddenUnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
+ panic("unexpected call to a mock function")
+}
+
+// Signatures is a mock that panics.
+func (ref ForbiddenUnparsedImage) Signatures(context.Context) ([][]byte, error) {
+ panic("unexpected call to a mock function")
+}
+
+// UntrustedSignatures is a mock that panics.
+func (ref ForbiddenUnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ panic("unexpected call to a mock function")
+}
diff --git a/internal/tmpdir/tmpdir.go b/internal/tmpdir/tmpdir.go
new file mode 100644
index 0000000..bab73ee
--- /dev/null
+++ b/internal/tmpdir/tmpdir.go
@@ -0,0 +1,44 @@
+package tmpdir
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/containers/image/v5/types"
+)
+
+// unixTempDirForBigFiles is the directory path to store big files on non Windows systems.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path'
+var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
+
+// builtinUnixTempDirForBigFiles is the directory path to store big files.
+// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+// DO NOT change this, instead see unixTempDirForBigFiles above.
+const builtinUnixTempDirForBigFiles = "/var/tmp"
+
+const prefix = "container_images_"
+
+// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
+// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
+// which on systemd based systems could be the unsuitable tmpfs filesystem.
+func temporaryDirectoryForBigFiles(sys *types.SystemContext) string {
+ if sys != nil && sys.BigFilesTemporaryDir != "" {
+ return sys.BigFilesTemporaryDir
+ }
+ var temporaryDirectoryForBigFiles string
+ if runtime.GOOS == "windows" {
+ temporaryDirectoryForBigFiles = os.TempDir()
+ } else {
+ temporaryDirectoryForBigFiles = unixTempDirForBigFiles
+ }
+ return temporaryDirectoryForBigFiles
+}
+
+func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) {
+ return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
+}
+
+func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) {
+ return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
+}
diff --git a/internal/tmpdir/tmpdir_test.go b/internal/tmpdir/tmpdir_test.go
new file mode 100644
index 0000000..c36caf3
--- /dev/null
+++ b/internal/tmpdir/tmpdir_test.go
@@ -0,0 +1,54 @@
+package tmpdir
+
+import (
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCreateBigFileTemp(t *testing.T) {
+ f, err := CreateBigFileTemp(nil, "")
+ assert.NoError(t, err)
+ f.Close()
+ os.Remove(f.Name())
+
+ f, err = CreateBigFileTemp(nil, "foobar")
+ assert.NoError(t, err)
+ f.Close()
+ assert.True(t, strings.Contains(f.Name(), prefix+"foobar"))
+ os.Remove(f.Name())
+
+ var sys types.SystemContext
+ sys.BigFilesTemporaryDir = "/tmp"
+ f, err = CreateBigFileTemp(&sys, "foobar1")
+ assert.NoError(t, err)
+ f.Close()
+ assert.True(t, strings.Contains(f.Name(), "/tmp/"+prefix+"foobar1"))
+ os.Remove(f.Name())
+
+ sys.BigFilesTemporaryDir = "/tmp/bogus"
+ _, err = CreateBigFileTemp(&sys, "foobar1")
+ assert.Error(t, err)
+
+}
+
+func TestMkDirBigFileTemp(t *testing.T) {
+ d, err := MkDirBigFileTemp(nil, "foobar")
+ assert.NoError(t, err)
+ assert.True(t, strings.Contains(d, prefix+"foobar"))
+ os.RemoveAll(d)
+
+ var sys types.SystemContext
+ sys.BigFilesTemporaryDir = "/tmp"
+ d, err = MkDirBigFileTemp(&sys, "foobar1")
+ assert.NoError(t, err)
+ assert.True(t, strings.Contains(d, "/tmp/"+prefix+"foobar1"))
+ os.RemoveAll(d)
+
+ sys.BigFilesTemporaryDir = "/tmp/bogus"
+ _, err = MkDirBigFileTemp(&sys, "foobar1")
+ assert.Error(t, err)
+}
diff --git a/internal/unparsedimage/wrapper.go b/internal/unparsedimage/wrapper.go
new file mode 100644
index 0000000..fe65b1a
--- /dev/null
+++ b/internal/unparsedimage/wrapper.go
@@ -0,0 +1,38 @@
+package unparsedimage
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+)
+
+// wrapped provides the private.UnparsedImage operations
+// for an object that only implements types.UnparsedImage
+type wrapped struct {
+ types.UnparsedImage
+}
+
+// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API
+func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage {
+ if unparsed2, ok := unparsed.(private.UnparsedImage); ok {
+ return unparsed2
+ }
+ return &wrapped{
+ UnparsedImage: unparsed,
+ }
+}
+
+// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
+func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) {
+ sigs, err := w.Signatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res := []signature.Signature{}
+ for _, sig := range sigs {
+ res = append(res, signature.SimpleSigningFromBlob(sig))
+ }
+ return res, nil
+}
diff --git a/internal/uploadreader/upload_reader.go b/internal/uploadreader/upload_reader.go
new file mode 100644
index 0000000..b95370a
--- /dev/null
+++ b/internal/uploadreader/upload_reader.go
@@ -0,0 +1,61 @@
+package uploadreader
+
+import (
+ "io"
+ "sync"
+)
+
+// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http
+// package (http.NewRequest, http.Post and the like).
+//
+// The net/http package uses a separate goroutine to upload data to a HTTP connection,
+// and it is possible for the server to return a response (typically an error) before consuming
+// the full body of the request. In that case http.Client.Do can return with an error while
+// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
+//
+// As a result, any data used/updated by the io.Reader() provided as the request body may be
+// used/updated even after http.Client.Do returns, causing races.
+//
+// To fix this, UploadReader provides a synchronized Terminate() method, which can block for
+// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that
+// after Terminate() returns, the underlying reader is never used any more (unlike calling
+// the cancellation callback of context.WithCancel, which returns before any recipients may have
+// reacted to the cancellation).
+type UploadReader struct {
+ mutex sync.Mutex
+ // The following members can only be used with mutex held
+ reader io.Reader
+ terminationError error // nil if not terminated yet
+}
+
+// NewUploadReader returns an UploadReader for an "underlying" reader.
+func NewUploadReader(underlying io.Reader) *UploadReader {
+ return &UploadReader{
+ reader: underlying,
+ terminationError: nil,
+ }
+}
+
+// Read returns the error set by Terminate, if any, or calls the underlying reader.
+// It is safe to call this from a different goroutine than Terminate.
+func (ur *UploadReader) Read(p []byte) (int, error) {
+ ur.mutex.Lock()
+ defer ur.mutex.Unlock()
+
+ if ur.terminationError != nil {
+ return 0, ur.terminationError
+ }
+ return ur.reader.Read(p)
+}
+
+// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after
+// this function returns, any Read calls will fail with the provided error, and the underlying
+// reader will never be used any more.
+//
+// It is safe to call this from a different goroutine than Read.
+func (ur *UploadReader) Terminate(err error) {
+ ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress
+ defer ur.mutex.Unlock()
+
+ ur.terminationError = err
+}
diff --git a/internal/uploadreader/upload_reader_test.go b/internal/uploadreader/upload_reader_test.go
new file mode 100644
index 0000000..c91967a
--- /dev/null
+++ b/internal/uploadreader/upload_reader_test.go
@@ -0,0 +1,34 @@
+package uploadreader
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUploadReader(t *testing.T) {
+ // This is a smoke test in a single goroutine, without really testing the locking.
+
+ data := bytes.Repeat([]byte{0x01}, 65535)
+ // No termination
+ ur := NewUploadReader(bytes.NewReader(data))
+ read, err := io.ReadAll(ur)
+ require.NoError(t, err)
+ assert.Equal(t, data, read)
+
+ // Terminated
+ ur = NewUploadReader(bytes.NewReader(data))
+ readLen := len(data) / 2
+ read, err = io.ReadAll(io.LimitReader(ur, int64(readLen)))
+ require.NoError(t, err)
+ assert.Equal(t, data[:readLen], read)
+ terminationErr := errors.New("Terminated")
+ ur.Terminate(terminationErr)
+ read, err = io.ReadAll(ur)
+ assert.Equal(t, terminationErr, err)
+ assert.Len(t, read, 0)
+}
diff --git a/internal/useragent/useragent.go b/internal/useragent/useragent.go
new file mode 100644
index 0000000..7ac4969
--- /dev/null
+++ b/internal/useragent/useragent.go
@@ -0,0 +1,6 @@
+package useragent
+
+import "github.com/containers/image/v5/version"
+
+// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise.
+var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
diff --git a/manifest/common.go b/manifest/common.go
new file mode 100644
index 0000000..1bdcf3d
--- /dev/null
+++ b/manifest/common.go
@@ -0,0 +1,152 @@
+package manifest
+
+import (
+ "fmt"
+
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func layerInfosToStrings(infos []LayerInfo) []string {
+ layers := make([]string, len(infos))
+ for i, info := range infos {
+ layers[i] = info.Digest.String()
+ }
+ return layers
+}
+
+// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
+// versions of “the same kind of content”.
+// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed;
+// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
+type compressionMIMETypeSet map[string]string
+
+const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
+const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
+
+// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
+func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
+ for _, variants := range variantTable {
+ for _, mt := range variants {
+ if mt == mimeType {
+ return variants
+ }
+ }
+ }
+ return nil
+}
+
+// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
+// to mean "no compression"), based on variantTable.
+// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
+// that differ only in what type of compression is applied, but it can't be combined with this
+// algorithm to produce an updated MIME type that complies with the standard that defines mimeType.
+// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
+// differ from it only in what type of compression has been applied, the returned error will not be
+// a ManifestLayerCompressionIncompatibilityError.
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return "", fmt.Errorf("cannot update unknown MIME type")
+ }
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ if variants != nil {
+ name := mtsUncompressed
+ if algorithm != nil {
+ name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
+ }
+ if res, ok := variants[name]; ok {
+ if res != mtsUnsupportedMIMEType {
+ return res, nil
+ }
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
+ }
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
+ }
+ if name != mtsUncompressed {
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
+ }
+ // We can't very well say “the idea of no compression is unknown”
+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
+ }
+ if algorithm != nil {
+ return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
+ }
+ return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
+}
+
+// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
+// mimeType, based on variantTable. It may use updated.Digest for error messages.
+// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
+// that differ only in what type of compression is applied, but applying updated.CompressionOperation
+// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the
+// standard that defines mimeType.
+func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
+ // Note that manifests in containers-storage might be reporting the
+ // wrong media type since the original manifests are stored while layers
+ // are decompressed in storage. Hence, we need to consider the case
+ // that an already {de}compressed layer should be {de}compressed;
+ // compressionVariantMIMEType does that by not caring whether the original is
+ // {de}compressed.
+ switch updated.CompressionOperation {
+ case types.PreserveOriginal:
+ // Force a change to the media type if we're being told to use a particular compressor,
+ // since it might be different from the one associated with the media type. Otherwise,
+ // try to keep the original media type.
+ if updated.CompressionAlgorithm != nil {
+ return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
+ }
+ // Keep the original media type.
+ return mimeType, nil
+
+ case types.Decompress:
+ return compressionVariantMIMEType(variantTable, mimeType, nil)
+
+ case types.Compress:
+ if updated.CompressionAlgorithm == nil {
+ logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest)
+ return mimeType, nil
+ }
+ return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
+
+ default:
+ return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
+ }
+}
+
+// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm
+// could not be applied to a layer MIME type. A caller that receives this should either retry
+// the call with a different compression algorithm, or attempt to use a different manifest type.
+type ManifestLayerCompressionIncompatibilityError struct {
+ text string
+}
+
+func (m ManifestLayerCompressionIncompatibilityError) Error() string {
+ return m.text
+}
+
+// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
+// Note that the caller still needs to worry about a specific algorithm not being supported.
+func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
+ if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
+ return false
+ }
+ variants := findCompressionMIMETypeSet(variantTable, mimeType)
+ return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
+}
+
+// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
+ layers := make([]types.ImageInspectLayer, len(infos))
+ for i, info := range infos {
+ layers[i].MIMEType = info.MediaType
+ layers[i].Digest = info.Digest
+ layers[i].Size = info.Size
+ layers[i].Annotations = info.Annotations
+ }
+ return layers
+}
diff --git a/manifest/common_test.go b/manifest/common_test.go
new file mode 100644
index 0000000..db19b31
--- /dev/null
+++ b/manifest/common_test.go
@@ -0,0 +1,328 @@
+package manifest
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Test that parser() rejects all of the provided manifest fixtures.
+// Intended to help test manifest parsers' detection of schema mismatches.
+func testManifestFixturesAreRejected(t *testing.T, parser func([]byte) error, fixtures []string) {
+ for _, fixture := range fixtures {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err, fixture)
+ err = parser(manifest)
+ assert.Error(t, err, fixture)
+ }
+}
+
+// Test that parser() rejects validManifest with an added top-level field with any of the provided field names.
+// Intended to help test callers of validateUnambiguousManifestFormat.
+func testValidManifestWithExtraFieldsIsRejected(t *testing.T, parser func([]byte) error,
+ validManifest []byte, fields []string) {
+ for _, field := range fields {
+ // end (the final '}') is not always at len(validManifest)-1 because the manifest can end with
+ // white space.
+ end := bytes.LastIndexByte(validManifest, '}')
+ require.NotEqual(t, end, -1)
+ updatedManifest := []byte(string(validManifest[:end]) +
+ fmt.Sprintf(`,"%s":[]}`, field))
+ err := parser(updatedManifest)
+ // Make sure it is the error from validateUnambiguousManifestFormat, not something that
+ // went wrong with creating updatedManifest.
+ assert.ErrorContains(t, err, "rejecting ambiguous manifest", field)
+ }
+}
+
+func TestLayerInfosToStrings(t *testing.T) {
+ strings := layerInfosToStrings([]LayerInfo{})
+ assert.Equal(t, []string{}, strings)
+
+ strings = layerInfosToStrings([]LayerInfo{
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ EmptyLayer: true,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ Size: 8841833,
+ },
+ EmptyLayer: false,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ Size: 291,
+ },
+ EmptyLayer: false,
+ },
+ {
+ BlobInfo: types.BlobInfo{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ Size: 32,
+ },
+ EmptyLayer: true,
+ },
+ })
+ assert.Equal(t, []string{
+ "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
+ "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
+ "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+ }, strings)
+}
+
+func TestCompressionVariantMIMEType(t *testing.T) {
+ sets := []compressionMIMETypeSet{
+ {mtsUncompressed: "AU", compressiontypes.GzipAlgorithmName: "AG" /* No zstd variant */},
+ {mtsUncompressed: "BU", compressiontypes.GzipAlgorithmName: "BG", compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType},
+ { /* No uncompressed variant */ compressiontypes.GzipAlgorithmName: "CG", compressiontypes.ZstdAlgorithmName: "CZ"},
+ {mtsUncompressed: "", compressiontypes.GzipAlgorithmName: "DG"},
+ }
+
+ for _, c := range []struct {
+ input string
+ algo *compressiontypes.Algorithm
+ expected string
+ }{
+ {"AU", nil, "AU"}, {"AU", &compression.Gzip, "AG"}, {"AU", &compression.Zstd, ""},
+ {"AG", nil, "AU"}, {"AG", &compression.Gzip, "AG"}, {"AG", &compression.Zstd, ""},
+ {"BU", &compression.Zstd, ""},
+ {"BG", &compression.Zstd, ""},
+ {"CG", nil, ""}, {"CG", &compression.Zstd, "CZ"},
+ {"CZ", nil, ""}, {"CZ", &compression.Gzip, "CG"},
+ {"DG", nil, ""},
+ {"unknown", nil, ""}, {"unknown", &compression.Gzip, ""},
+ {"", nil, ""}, {"", &compression.Gzip, ""},
+ } {
+ res, err := compressionVariantMIMEType(sets, c.input, c.algo)
+ if c.expected == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ assert.Equal(t, c.expected, res, c.input)
+ }
+ }
+}
+
+func TestUpdatedMIMEType(t *testing.T) {
+ // all known types, PreserveOriginal
+ preserve := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2Schema1MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema1SignedMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ConfigMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2LayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2SchemaLayerMediaTypeUncompressed},
+ {schema2CompressionMIMETypeSets, DockerV2ListMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaTypeGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeDescriptor},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeLayoutHeader},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageManifest},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageIndex},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayer},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerZstd},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributable}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableGzip}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableZstd}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageConfig},
+ }
+ for i, c := range preserve {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.PreserveOriginal,
+ }
+ updatedType, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.NoErrorf(t, err, "%d: updatedMIMEType(%q, %+v) failed unexpectedly", i, c.mimeType, update)
+ assert.Equalf(t, c.mimeType, updatedType, "%d: updatedMIMEType(%q, %+v)", i, c.mimeType, update)
+ }
+
+ // known types where Decompress is expected to succeed
+ decompressSuccess := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ updatedType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2SchemaLayerMediaTypeUncompressed, DockerV2SchemaLayerMediaTypeUncompressed},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2ForeignLayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2LayerMediaType, DockerV2SchemaLayerMediaTypeUncompressed},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaType},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayer},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayer},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageLayer},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributable}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributable}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerNonDistributable}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ }
+ for i, c := range decompressSuccess {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Decompress,
+ }
+ updatedType, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.NoErrorf(t, err, "%d: updatedMIMEType(%q, %+v) failed unexpectedly", i, c.mimeType, update)
+ assert.Equalf(t, c.updatedType, updatedType, "%d: updatedMIMEType(%q, %+v)", i, c.mimeType, update)
+ }
+
+ // known types where Decompress is expected to fail
+ decompressFailure := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2Schema1MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema1SignedMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ConfigMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2ListMediaType},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeDescriptor},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeLayoutHeader},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageManifest},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageIndex},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageConfig},
+ }
+ for i, c := range decompressFailure {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Decompress,
+ }
+ _, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.Errorf(t, err, "%d: updatedMIMEType(%q, %+v) should have failed", i, c.mimeType, update)
+ }
+
+ require.Equalf(t, len(preserve), len(decompressSuccess)+len(decompressFailure), "missing some decompression tests")
+
+ // all known types where Compress with gzip should succeed
+ compressGzipSuccess := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ updatedType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2Schema2LayerMediaType, DockerV2Schema2LayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2ForeignLayerMediaTypeGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageLayerGzip},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableGzip}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerNonDistributableGzip}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ }
+ for i, c := range compressGzipSuccess {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ }
+ updatedType, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.NoErrorf(t, err, "%d: updatedMIMEType(%q, %+v) failed unexpectedly", i, c.mimeType, update)
+ assert.Equalf(t, c.updatedType, updatedType, "%d: updatedMIMEType(%q, %+v)", i, c.mimeType, update)
+ }
+
+ // known types where Compress with gzip is expected to fail
+ compressGzipFailure := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2Schema1MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema1SignedMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ConfigMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2ListMediaType},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeDescriptor},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeLayoutHeader},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageManifest},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageIndex},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageConfig},
+ }
+ for i, c := range compressGzipFailure {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ }
+ _, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.Errorf(t, err, "%d: updatedMIMEType(%q, %+v) should have failed", i, c.mimeType, update)
+ }
+
+ require.Equalf(t, len(preserve), len(compressGzipSuccess)+len(compressGzipFailure), "missing some gzip compression tests")
+
+ // known types where Compress with zstd is expected to succeed
+ compressZstdSuccess := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ updatedType string
+ }{
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerZstd},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageLayerZstd},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableZstd}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerNonDistributableZstd}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ }
+ for i, c := range compressZstdSuccess {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ }
+ updatedType, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.NoErrorf(t, err, "%d: updatedMIMEType(%q, %+v) failed unexpectedly", i, c.mimeType, update)
+ assert.Equalf(t, c.updatedType, updatedType, "%d: updatedMIMEType(%q, %+v)", i, c.mimeType, update)
+ }
+
+ // known types where Compress with zstd is expected to fail
+ compressZstdFailure := []struct {
+ compression []compressionMIMETypeSet
+ mimeType string
+ }{
+ {schema2CompressionMIMETypeSets, DockerV2SchemaLayerMediaTypeUncompressed},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2LayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ForeignLayerMediaTypeGzip},
+ {schema2CompressionMIMETypeSets, DockerV2Schema1MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema1SignedMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2MediaType},
+ {schema2CompressionMIMETypeSets, DockerV2Schema2ConfigMediaType},
+ {schema2CompressionMIMETypeSets, DockerV2ListMediaType},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeDescriptor},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeLayoutHeader},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageManifest},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageIndex},
+ {oci1CompressionMIMETypeSets, imgspecv1.MediaTypeImageConfig},
+ }
+ for i, c := range compressZstdFailure {
+ update := types.BlobInfo{
+ MediaType: c.mimeType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ }
+ _, err := updatedMIMEType(c.compression, c.mimeType, update)
+ require.Errorf(t, err, "%d: updatedMIMEType(%q, %+v) should have failed", i, c.mimeType, update)
+ }
+
+ require.Equalf(t, len(preserve), len(compressZstdSuccess)+len(compressZstdFailure), "missing some zstd compression tests")
+}
diff --git a/manifest/docker_schema1.go b/manifest/docker_schema1.go
new file mode 100644
index 0000000..a80af70
--- /dev/null
+++ b/manifest/docker_schema1.go
@@ -0,0 +1,331 @@
+package manifest
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/opencontainers/go-digest"
+ "golang.org/x/exp/slices"
+)
+
+// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
+type Schema1FSLayers struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+// Schema1History is an entry of the "history" array in docker/distribution schema 1.
+type Schema1History struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// Schema1 is a manifest in docker/distribution schema 1.
+type Schema1 struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Architecture string `json:"architecture"`
+ FSLayers []Schema1FSLayers `json:"fsLayers"`
+ History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility!
+ ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image)
+ SchemaVersion int `json:"schemaVersion"`
+}
+
+type schema1V1CompatibilityContainerConfig struct {
+ Cmd []string
+}
+
+// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
+type Schema1V1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
+// (NOTE: The instance is not necessary a literal representation of the original blob,
+// layers with duplicate IDs are eliminated.)
+func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
+ s1 := Schema1{}
+ if err := json.Unmarshal(manifestBlob, &s1); err != nil {
+ return nil, err
+ }
+ if s1.SchemaVersion != 1 {
+ return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
+ manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
+ return nil, err
+ }
+ if err := s1.initialize(); err != nil {
+ return nil, err
+ }
+ if err := s1.fixManifestLayers(); err != nil {
+ return nil, err
+ }
+ return &s1, nil
+}
+
+// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
+func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) {
+ var name, tag string
+ if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
+ name = reference.Path(ref)
+ if tagged, ok := ref.(reference.NamedTagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ s1 := Schema1{
+ Name: name,
+ Tag: tag,
+ Architecture: architecture,
+ FSLayers: fsLayers,
+ History: history,
+ SchemaVersion: 1,
+ }
+ if err := s1.initialize(); err != nil {
+ return nil, err
+ }
+ return &s1, nil
+}
+
+// Schema1Clone creates a copy of the supplied Schema1 manifest.
+func Schema1Clone(src *Schema1) *Schema1 {
+ copy := *src
+ return &copy
+}
+
+// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest.
+func (m *Schema1) initialize() error {
+ if len(m.FSLayers) != len(m.History) {
+ return errors.New("length of history not equal to number of layers")
+ }
+ if len(m.FSLayers) == 0 {
+ return errors.New("no FSLayers in manifest")
+ }
+ m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
+ for i, h := range m.History {
+ if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
+ return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err)
+ }
+ }
+ return nil
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{}
+}
+
+// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema1) LayerInfos() []LayerInfo {
+ layers := make([]LayerInfo, len(m.FSLayers))
+ for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
+ layers[(len(m.FSLayers)-1)-i] = LayerInfo{
+ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1},
+ EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway,
+ }
+ }
+ return layers
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
+ if len(m.FSLayers) != len(layerInfos) {
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ }
+ m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
+ for i, info := range layerInfos {
+ // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
+ // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
+ // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
+ m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
+ if info.CryptoOperation != types.PreserveOriginalCrypto {
+ return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest)
+ }
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema1) Serialize() ([]byte, error) {
+ // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
+ unsigned, err := json.Marshal(*m)
+ if err != nil {
+ return nil, err
+ }
+ return AddDummyV2S1Signature(unsigned)
+}
+
+// fixManifestLayers, after validating the supplied manifest
+// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
+// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
+// both from m.History and m.FSLayers).
+// Note that even after this succeeds, m.FSLayers may contain duplicate entries
+// (for Dockerfile operations which change the configuration but not the filesystem).
+func (m *Schema1) fixManifestLayers() error {
+ // m.initialize() has verified that len(m.FSLayers) == len(m.History)
+ for _, compat := range m.ExtractedV1Compatibility {
+ if err := validateV1ID(compat.ID); err != nil {
+ return err
+ }
+ }
+ if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" {
+ return errors.New("Invalid parent ID in the base layer of the image")
+ }
+ // check general duplicates to error instead of a deadlock
+ idmap := set.New[string]()
+ var lastID string
+ for _, img := range m.ExtractedV1Compatibility {
+ // skip IDs that appear after each other, we handle those later
+ if img.ID != lastID && idmap.Contains(img.ID) {
+ return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ }
+ lastID = img.ID
+ idmap.Add(lastID)
+ }
+ // backwards loop so that we keep the remaining indexes after removing items
+ for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
+ if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
+ m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
+ m.History = slices.Delete(m.History, i, i+1)
+ m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
+ } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
+ return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
+ }
+ }
+ return nil
+}
+
+var validHex = regexp.Delayed(`^([a-f0-9]{64})$`)
+
+func validateV1ID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return fmt.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ s1 := &Schema2V1Image{}
+ if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
+ return nil, err
+ }
+ layerInfos := m.LayerInfos()
+ i := &types.ImageInspectInfo{
+ Tag: m.Tag,
+ Created: &s1.Created,
+ DockerVersion: s1.DockerVersion,
+ Architecture: s1.Architecture,
+ Variant: s1.Variant,
+ Os: s1.OS,
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s1.Author,
+ }
+ if s1.Config != nil {
+ i.Labels = s1.Config.Labels
+ i.Env = s1.Config.Env
+ }
+ return i, nil
+}
+
+// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs.
+func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
+ // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields
+ // that aren't directly comparable using info from the manifest.
+ if len(m.History) == 0 {
+ return nil, errors.New("image has no layers")
+ }
+ s1 := Schema2V1Image{}
+ config := []byte(m.History[0].V1Compatibility)
+ err := json.Unmarshal(config, &s1)
+ if err != nil {
+ return nil, fmt.Errorf("decoding configuration: %w", err)
+ }
+ // Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
+ // adding some fields that aren't "omitempty".
+ if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
+ config, err = json.Marshal(&s1)
+ if err != nil {
+ return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err)
+ }
+ }
+ // Build the history.
+ convertedHistory := []Schema2History{}
+ for _, compat := range m.ExtractedV1Compatibility {
+ hitem := Schema2History{
+ Created: compat.Created,
+ CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
+ Author: compat.Author,
+ Comment: compat.Comment,
+ EmptyLayer: compat.ThrowAway,
+ }
+ convertedHistory = append([]Schema2History{hitem}, convertedHistory...)
+ }
+ // Build the rootfs information. We need the decompressed sums that we've been
+ // calculating to fill in the DiffIDs. It's expected (but not enforced by us)
+ // that the number of diffIDs corresponds to the number of non-EmptyLayer
+ // entries in the history.
+ rootFS := &Schema2RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
+ // And now for some raw manipulation.
+ raw := make(map[string]*json.RawMessage)
+ err = json.Unmarshal(config, &raw)
+ if err != nil {
+ return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err)
+ }
+ // Drop some fields.
+ delete(raw, "id")
+ delete(raw, "parent")
+ delete(raw, "parent_id")
+ delete(raw, "layer_id")
+ delete(raw, "throwaway")
+ delete(raw, "Size")
+ // Add the history and rootfs information.
+ rootfs, err := json.Marshal(rootFS)
+ if err != nil {
+ return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ }
+ rawRootfs := json.RawMessage(rootfs)
+ raw["rootfs"] = &rawRootfs
+ history, err := json.Marshal(convertedHistory)
+ if err != nil {
+ return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ }
+ rawHistory := json.RawMessage(history)
+ raw["history"] = &rawHistory
+ // Encode the result.
+ config, err = json.Marshal(raw)
+ if err != nil {
+ return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+ }
+ return config, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
+ image, err := m.ToSchema2Config(diffIDs)
+ if err != nil {
+ return "", err
+ }
+ return digest.FromBytes(image).Hex(), nil
+}
diff --git a/manifest/docker_schema1_test.go b/manifest/docker_schema1_test.go
new file mode 100644
index 0000000..fc16e10
--- /dev/null
+++ b/manifest/docker_schema1_test.go
@@ -0,0 +1,273 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// DiffID values corresponding to layers of schema2-to-schema1-by-docker.json
+var schema1FixtureLayerDiffIDs = []digest.Digest{
+ "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab",
+ "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c",
+ "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56",
+ "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9",
+ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b",
+}
+
+func manifestSchema1FromFixture(t *testing.T, fixture string) *Schema1 {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := Schema1FromManifest(manifest)
+ require.NoError(t, err)
+ return m
+}
+
+func TestSchema1FromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("fixtures", "schema2-to-schema1-by-docker.json"))
+ require.NoError(t, err)
+
+ // Invalid manifest version is rejected
+ m, err := Schema1FromManifest(validManifest)
+ require.NoError(t, err)
+ m.SchemaVersion = 2
+ manifest, err := m.Serialize()
+ require.NoError(t, err)
+ _, err = Schema1FromManifest(manifest)
+ assert.Error(t, err)
+
+ parser := func(m []byte) error {
+ _, err := Schema1FromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "v2s2.manifest.json", "v2list.manifest.json",
+ "ociv1.manifest.json", "ociv1.image.index.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "layers", "manifests"})
+}
+
+func TestSchema1Initialize(t *testing.T) {
+ // Test this indirectly via Schema1FromComponents; otherwise we would have to break the API and create an instance manually.
+
+ // FIXME: this should eventually share a fixture with the other parsing tests.
+ fsLayers := []Schema1FSLayers{
+ {BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"},
+ {BlobSum: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"},
+ {BlobSum: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"},
+ {BlobSum: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"},
+ {BlobSum: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"},
+ {BlobSum: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"},
+ }
+ history := []Schema1History{
+ {V1Compatibility: "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}"},
+ {V1Compatibility: "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"},
+ {V1Compatibility: "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}"},
+ {V1Compatibility: "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}"},
+ }
+
+ // Valid input
+ m, err := Schema1FromComponents(nil, fsLayers, history, "amd64")
+ assert.NoError(t, err)
+ assert.Equal(t, []Schema1V1Compatibility{
+ {
+ ID: "486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf",
+ Parent: "20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20",
+ Created: time.Date(2018, 1, 25, 0, 37, 48, 268558000, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{"/bin/sh", "-c", "#(nop) ", "USER [nova]"},
+ },
+ ThrowAway: false,
+ },
+ {
+ ID: "20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20",
+ Parent: "47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa",
+ Created: time.Date(2018, 1, 24, 23, 8, 25, 300741000, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"},
+ },
+ ThrowAway: false,
+ },
+ {
+ ID: "47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa",
+ Parent: "cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824",
+ Created: time.Date(2018, 1, 24, 22, 0, 57, 807862000, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"},
+ },
+ ThrowAway: false,
+ },
+ {
+ ID: "cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824",
+ Parent: "0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2",
+ Created: time.Date(2018, 1, 24, 21, 40, 32, 494686000, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"},
+ },
+ ThrowAway: false,
+ },
+ {
+ ID: "0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2",
+ Parent: "3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345",
+ Created: time.Date(2017, 11, 21, 16, 49, 37, 292899000, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'"},
+ },
+ Author: "Red Hat, Inc.",
+ ThrowAway: false,
+ },
+ {
+ ID: "3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345",
+ Comment: "Imported from -",
+ Created: time.Date(2017, 11, 21, 16, 47, 27, 755341705, time.UTC),
+ ContainerConfig: schema1V1CompatibilityContainerConfig{
+ Cmd: []string{""},
+ },
+ ThrowAway: false,
+ },
+ }, m.ExtractedV1Compatibility)
+
+ // Layer and history length mismatch
+ _, err = Schema1FromComponents(nil, fsLayers, history[1:], "amd64")
+ assert.Error(t, err)
+
+ // No layers/history
+ _, err = Schema1FromComponents(nil, []Schema1FSLayers{}, []Schema1History{}, "amd64")
+ assert.Error(t, err)
+
+ // Invalid history JSON
+ _, err = Schema1FromComponents(nil,
+ []Schema1FSLayers{{BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"}},
+ []Schema1History{{V1Compatibility: "-"}},
+ "amd64")
+ assert.Error(t, err)
+}
+
+func TestSchema1LayerInfos(t *testing.T) {
+ // We use this instead of original schema1 manifests, because those, surprisingly,
+ // seem not to set the "throwaway" flag.
+ m := manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json") // FIXME: Test also Schema1FromComponents
+ assert.Equal(t, []LayerInfo{
+ {BlobInfo: types.BlobInfo{Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ }, m.LayerInfos())
+}
+
+func TestSchema1UpdateLayerInfos(t *testing.T) {
+ for _, c := range []struct {
+ name string
+ sourceFixture string
+ updates []types.BlobInfo
+ expectedFixture string // or "" to indicate an expected failure
+ }{
+ // Many more tests cases could be added here
+ {
+ name: "uncompressed → gzip encrypted",
+ sourceFixture: "v2s1.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 32654,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"},
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ Size: 16724,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ Size: 73109,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ },
+ expectedFixture: "", // Encryption is not supported
+ },
+ {
+ name: "gzip → uncompressed decrypted", // We can’t represent encrypted images anyway, but verify that we reject decryption attempts.
+ sourceFixture: "v2s1.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ },
+ expectedFixture: "", // Decryption is not supported
+ },
+ } {
+ manifest := manifestSchema1FromFixture(t, c.sourceFixture)
+
+ err := manifest.UpdateLayerInfos(c.updates)
+ if c.expectedFixture == "" {
+ assert.Error(t, err, c.name)
+ } else {
+ require.NoError(t, err, c.name)
+
+ updatedManifestBytes, err := manifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ expectedManifest := manifestSchema1FromFixture(t, c.expectedFixture)
+ expectedManifestBytes, err := expectedManifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes), c.name)
+ }
+ }
+}
+
+func TestSchema1ImageID(t *testing.T) {
+ m := manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json")
+ id, err := m.ImageID(schema1FixtureLayerDiffIDs)
+ require.NoError(t, err)
+ // NOTE: This value is dependent on the Schema1.ToSchema2Config implementation, and not necessarily stable over time.
+ // This is mostly a smoke-test; it’s fine to just update this value if that implementation changes.
+ assert.Equal(t, "9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", id)
+}
diff --git a/manifest/docker_schema2.go b/manifest/docker_schema2.go
new file mode 100644
index 0000000..20b721f
--- /dev/null
+++ b/manifest/docker_schema2.go
@@ -0,0 +1,306 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/internal/manifest"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/pkg/strslice"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
+type Schema2Descriptor = manifest.Schema2Descriptor
+
+// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
+func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
+ return types.BlobInfo{
+ Digest: desc.Digest,
+ Size: desc.Size,
+ URLs: desc.URLs,
+ MediaType: desc.MediaType,
+ }
+}
+
+// Schema2 is a manifest in docker/distribution schema 2.
+type Schema2 struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ ConfigDescriptor Schema2Descriptor `json:"config"`
+ LayersDescriptors []Schema2Descriptor `json:"layers"`
+}
+
+// Schema2Port is a Port, a string containing port number and protocol in the
+// format "80/tcp", from docker/go-connections/nat.
+type Schema2Port string
+
+// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
+// docker/go-connections/nat.
+type Schema2PortSet map[Schema2Port]struct{}
+
+// Schema2HealthConfig is a HealthConfig, which holds configuration settings
+// for the HEALTHCHECK feature, from docker/docker/api/types/container.
+type Schema2HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Schema2Config is a Config in docker/docker/api/types/container.
+type Schema2Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// Schema2V1Image is a V1Image in docker/docker/image.
+type Schema2V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig Schema2Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *Schema2Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is built and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // Variant is a variant of the CPU that the image is built and runs on
+ Variant string `json:"variant,omitempty"`
+ // OS is the operating system used to built and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
+type Schema2RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+}
+
+// Schema2History stores build commands that were used to create an image, from docker/docker/image.
+type Schema2History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Schema2Image is an Image in docker/docker/image.
+type Schema2Image struct {
+ Schema2V1Image
+ Parent digest.Digest `json:"parent,omitempty"`
+ RootFS *Schema2RootFS `json:"rootfs,omitempty"`
+ History []Schema2History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+}
+
+// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
+func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
+ s2 := Schema2{}
+ if err := json.Unmarshal(manifestBlob, &s2); err != nil {
+ return nil, err
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
+ manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
+ return nil, err
+ }
+ // Check manifest's and layers' media types.
+ if err := SupportedSchema2MediaType(s2.MediaType); err != nil {
+ return nil, err
+ }
+ for _, layer := range s2.LayersDescriptors {
+ if err := SupportedSchema2MediaType(layer.MediaType); err != nil {
+ return nil, err
+ }
+ }
+ return &s2, nil
+}
+
+// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
+func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
+ return &Schema2{
+ SchemaVersion: 2,
+ MediaType: DockerV2Schema2MediaType,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
+ }
+}
+
+// Schema2Clone creates a copy of the supplied Schema2 manifest.
+func Schema2Clone(src *Schema2) *Schema2 {
+ copy := *src
+ return &copy
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema2) ConfigInfo() types.BlobInfo {
+ return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor)
+}
+
+// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema2) LayerInfos() []LayerInfo {
+ blobs := []LayerInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, LayerInfo{
+ BlobInfo: BlobInfoFromSchema2Descriptor(layer),
+ EmptyLayer: false,
+ })
+ }
+ return blobs
+}
+
+var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
+ },
+ {
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
+ },
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
+// CompressionAlgorithm that would result in anything other than gzip compression.
+func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.LayersDescriptors) != len(layerInfos) {
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ }
+ original := m.LayersDescriptors
+ m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ mimeType := original[i].MediaType
+ // First make sure we support the media type of the original layer.
+ if err := SupportedSchema2MediaType(mimeType); err != nil {
+ return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType)
+ }
+ mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
+ }
+ m.LayersDescriptors[i].MediaType = mimeType
+ m.LayersDescriptors[i].Digest = info.Digest
+ m.LayersDescriptors[i].Size = info.Size
+ m.LayersDescriptors[i].URLs = info.URLs
+ if info.CryptoOperation != types.PreserveOriginalCrypto {
+ return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest)
+ }
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema2) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ s2 := &Schema2Image{}
+ if err := json.Unmarshal(config, s2); err != nil {
+ return nil, err
+ }
+ layerInfos := m.LayerInfos()
+ i := &types.ImageInspectInfo{
+ Tag: "",
+ Created: &s2.Created,
+ DockerVersion: s2.DockerVersion,
+ Architecture: s2.Architecture,
+ Variant: s2.Variant,
+ Os: s2.OS,
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Author: s2.Author,
+ }
+ if s2.Config != nil {
+ i.Labels = s2.Config.Labels
+ i.Env = s2.Config.Env
+ }
+ return i, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *Schema2) ImageID([]digest.Digest) (string, error) {
+ if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
+ return "", err
+ }
+ return m.ConfigDescriptor.Digest.Hex(), nil
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
+ return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
+}
diff --git a/manifest/docker_schema2_list.go b/manifest/docker_schema2_list.go
new file mode 100644
index 0000000..c958a3f
--- /dev/null
+++ b/manifest/docker_schema2_list.go
@@ -0,0 +1,32 @@
+package manifest
+
+import (
+ "github.com/containers/image/v5/internal/manifest"
+)
+
+// Schema2PlatformSpec describes the platform which a particular manifest is
+// specialized for.
+type Schema2PlatformSpec = manifest.Schema2PlatformSpec
+
+// Schema2ManifestDescriptor references a platform-specific manifest.
+type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
+
+// Schema2List is a list of platform-specific manifests.
+type Schema2List = manifest.Schema2ListPublic
+
+// Schema2ListFromComponents creates a Schema2 manifest list instance from the
+// supplied data.
+func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
+ return manifest.Schema2ListPublicFromComponents(components)
+}
+
+// Schema2ListClone creates a deep copy of the passed-in list.
+func Schema2ListClone(list *Schema2List) *Schema2List {
+ return manifest.Schema2ListPublicClone(list)
+}
+
+// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
+// JSON, presumably generated by encoding a Schema2 manifest list.
+func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
+ return manifest.Schema2ListPublicFromManifest(manifestBlob)
+}
diff --git a/manifest/docker_schema2_list_test.go b/manifest/docker_schema2_list_test.go
new file mode 100644
index 0000000..0d7807f
--- /dev/null
+++ b/manifest/docker_schema2_list_test.go
@@ -0,0 +1,28 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestSchema2ListFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("fixtures", "v2list.manifest.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := Schema2ListFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ "ociv1.manifest.json",
+ // Not "ociv1.image.index.json" yet, without validating mediaType the two are too similar to tell the difference.
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
diff --git a/manifest/docker_schema2_test.go b/manifest/docker_schema2_test.go
new file mode 100644
index 0000000..6a3aa3b
--- /dev/null
+++ b/manifest/docker_schema2_test.go
@@ -0,0 +1,283 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func manifestSchema2FromFixture(t *testing.T, fixture string) *Schema2 {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := Schema2FromManifest(manifest)
+ require.NoError(t, err)
+ return m
+}
+
+func TestSupportedSchema2MediaType(t *testing.T) {
+ type testData struct {
+ m string
+ mustFail bool
+ }
+ data := []testData{
+ {DockerV2Schema2MediaType, false},
+ {DockerV2Schema2ConfigMediaType, false},
+ {DockerV2Schema2LayerMediaType, false},
+ {DockerV2SchemaLayerMediaTypeUncompressed, false},
+ {DockerV2ListMediaType, false},
+ {DockerV2Schema2ForeignLayerMediaType, false},
+ {DockerV2Schema2ForeignLayerMediaTypeGzip, false},
+ {"application/vnd.docker.image.rootfs.foreign.diff.unknown", true},
+ }
+ for _, d := range data {
+ err := SupportedSchema2MediaType(d.m)
+ if d.mustFail {
+ assert.NotNil(t, err)
+ } else {
+ assert.Nil(t, err)
+ }
+ }
+}
+
+func TestSchema2FromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("fixtures", "v2s2.manifest.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := Schema2FromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2list.manifest.json",
+ "ociv1.manifest.json", "ociv1.image.index.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"fsLayers", "history", "manifests"})
+}
+
+func TestSchema2UpdateLayerInfos(t *testing.T) {
+ for _, c := range []struct {
+ name string
+ sourceFixture string
+ updates []types.BlobInfo
+ expectedFixture string // or "" to indicate an expected failure
+ }{
+ {
+ name: "gzip → zstd",
+ sourceFixture: "v2s2.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ },
+ expectedFixture: "", // zstd is not supported for docker images
+ },
+ {
+ name: "invalid compression operation",
+ sourceFixture: "v2s2.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: 42, // MUST fail here
+ },
+ },
+ expectedFixture: "",
+ },
+ {
+ name: "invalid compression algorithm",
+ sourceFixture: "v2s2.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd, // MUST fail here
+ },
+ },
+ expectedFixture: "",
+ },
+ {
+ name: "nondistributable → gzip",
+ sourceFixture: "v2s2.nondistributable.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2ForeignLayerMediaType,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ },
+ expectedFixture: "v2s2.nondistributable.gzip.manifest.json",
+ },
+ {
+ name: "nondistributable gzip → uncompressed",
+ sourceFixture: "v2s2.nondistributable.gzip.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2ForeignLayerMediaType,
+ CompressionOperation: types.Decompress,
+ },
+ },
+ expectedFixture: "v2s2.nondistributable.manifest.json",
+ },
+ {
+ name: "uncompressed → gzip encrypted",
+ sourceFixture: "v2s2.uncompressed.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 32654,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"},
+ MediaType: DockerV2SchemaLayerMediaTypeUncompressed,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ Size: 16724,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ MediaType: DockerV2SchemaLayerMediaTypeUncompressed,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ Size: 73109,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ MediaType: DockerV2SchemaLayerMediaTypeUncompressed,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ },
+ expectedFixture: "", // Encryption is not supported
+ },
+ {
+ name: "gzip → uncompressed decrypted", // We can’t represent encrypted images anyway, but verify that we reject decryption attempts.
+ sourceFixture: "v2s2.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: DockerV2Schema2LayerMediaType,
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ },
+ expectedFixture: "", // Decryption is not supported
+ },
+ } {
+ manifest := manifestSchema2FromFixture(t, c.sourceFixture)
+
+ err := manifest.UpdateLayerInfos(c.updates)
+ if c.expectedFixture == "" {
+ assert.Error(t, err, c.name)
+ } else {
+ require.NoError(t, err, c.name)
+
+ updatedManifestBytes, err := manifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ expectedManifest := manifestSchema2FromFixture(t, c.expectedFixture)
+ expectedManifestBytes, err := expectedManifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes), c.name)
+ }
+ }
+}
+
+func TestSchema2ImageID(t *testing.T) {
+ m := manifestSchema2FromFixture(t, "v2s2.manifest.json")
+ // These are not the real DiffID values, but they don’t actually matter in our implementation.
+ id, err := m.ImageID([]digest.Digest{
+ "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ })
+ require.NoError(t, err)
+ assert.Equal(t, "b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7", id)
+}
+
+func TestSchema2CanChangeLayerCompression(t *testing.T) {
+ m := manifestSchema2FromFixture(t, "v2s2.manifest.json")
+
+ assert.True(t, m.CanChangeLayerCompression(DockerV2Schema2LayerMediaType))
+ // Some projects like to use squashfs and other unspecified formats for layers; don’t touch those.
+ assert.False(t, m.CanChangeLayerCompression("a completely unknown and quite possibly invalid MIME type"))
+}
diff --git a/manifest/fixtures/non-json.manifest.json b/manifest/fixtures/non-json.manifest.json
new file mode 120000
index 0000000..367b7de
--- /dev/null
+++ b/manifest/fixtures/non-json.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/non-json.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.artifact.json b/manifest/fixtures/ociv1.artifact.json
new file mode 120000
index 0000000..fcec28a
--- /dev/null
+++ b/manifest/fixtures/ociv1.artifact.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1.artifact.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.encrypted.manifest.json b/manifest/fixtures/ociv1.encrypted.manifest.json
new file mode 100644
index 0000000..a4fa87f
--- /dev/null
+++ b/manifest/fixtures/ociv1.encrypted.manifest.json
@@ -0,0 +1,39 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
+ "size": 7023
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "size": 32654,
+ "annotations": {
+ "org.opencontainers.image.enc.…": "layer1"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "size": 16724,
+ "annotations": {
+ "org.opencontainers.image.enc.…": "layer2"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ "size": 73109,
+ "annotations": {
+ "org.opencontainers.image.enc.…": "layer2"
+ }
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.image.index.json b/manifest/fixtures/ociv1.image.index.json
new file mode 120000
index 0000000..d5373fc
--- /dev/null
+++ b/manifest/fixtures/ociv1.image.index.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1.image.index.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.invalid.mediatype.manifest.json b/manifest/fixtures/ociv1.invalid.mediatype.manifest.json
new file mode 100644
index 0000000..8a93b2b
--- /dev/null
+++ b/manifest/fixtures/ociv1.invalid.mediatype.manifest.json
@@ -0,0 +1,29 @@
+{
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+unknown",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+ }
diff --git a/manifest/fixtures/ociv1.manifest.json b/manifest/fixtures/ociv1.manifest.json
new file mode 120000
index 0000000..4927dc9
--- /dev/null
+++ b/manifest/fixtures/ociv1.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.nondistributable.gzip.manifest.json b/manifest/fixtures/ociv1.nondistributable.gzip.manifest.json
new file mode 100644
index 0000000..ba1d377
--- /dev/null
+++ b/manifest/fixtures/ociv1.nondistributable.gzip.manifest.json
@@ -0,0 +1,20 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.nondistributable.manifest.json b/manifest/fixtures/ociv1.nondistributable.manifest.json
new file mode 100644
index 0000000..6ed3489
--- /dev/null
+++ b/manifest/fixtures/ociv1.nondistributable.manifest.json
@@ -0,0 +1,20 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.nondistributable.zstd.manifest.json b/manifest/fixtures/ociv1.nondistributable.zstd.manifest.json
new file mode 100644
index 0000000..85e2b45
--- /dev/null
+++ b/manifest/fixtures/ociv1.nondistributable.zstd.manifest.json
@@ -0,0 +1,20 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.uncompressed.manifest.json b/manifest/fixtures/ociv1.uncompressed.manifest.json
new file mode 100644
index 0000000..e3a137c
--- /dev/null
+++ b/manifest/fixtures/ociv1.uncompressed.manifest.json
@@ -0,0 +1,30 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1.zstd.manifest.json b/manifest/fixtures/ociv1.zstd.manifest.json
new file mode 100644
index 0000000..b189158
--- /dev/null
+++ b/manifest/fixtures/ociv1.zstd.manifest.json
@@ -0,0 +1,30 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+zstd",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+zstd",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+zstd",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/ociv1nomime.artifact.json b/manifest/fixtures/ociv1nomime.artifact.json
new file mode 120000
index 0000000..4b1ebfb
--- /dev/null
+++ b/manifest/fixtures/ociv1nomime.artifact.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1nomime.artifact.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1nomime.image.index.json b/manifest/fixtures/ociv1nomime.image.index.json
new file mode 120000
index 0000000..29b3d9b
--- /dev/null
+++ b/manifest/fixtures/ociv1nomime.image.index.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1nomime.image.index.json \ No newline at end of file
diff --git a/manifest/fixtures/ociv1nomime.manifest.json b/manifest/fixtures/ociv1nomime.manifest.json
new file mode 120000
index 0000000..037a21f
--- /dev/null
+++ b/manifest/fixtures/ociv1nomime.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/ociv1nomime.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/schema2-to-schema1-by-docker.json b/manifest/fixtures/schema2-to-schema1-by-docker.json
new file mode 120000
index 0000000..322c5b7
--- /dev/null
+++ b/manifest/fixtures/schema2-to-schema1-by-docker.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/schema2-to-schema1-by-docker.json \ No newline at end of file
diff --git a/manifest/fixtures/unknown-version.manifest.json b/manifest/fixtures/unknown-version.manifest.json
new file mode 120000
index 0000000..c9136e9
--- /dev/null
+++ b/manifest/fixtures/unknown-version.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/unknown-version.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2list.manifest.json b/manifest/fixtures/v2list.manifest.json
new file mode 120000
index 0000000..8fb6441
--- /dev/null
+++ b/manifest/fixtures/v2list.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2list.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2s1-invalid-signatures.manifest.json b/manifest/fixtures/v2s1-invalid-signatures.manifest.json
new file mode 120000
index 0000000..832703e
--- /dev/null
+++ b/manifest/fixtures/v2s1-invalid-signatures.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2s1-invalid-signatures.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2s1-unsigned.manifest.json b/manifest/fixtures/v2s1-unsigned.manifest.json
new file mode 120000
index 0000000..d6a55a7
--- /dev/null
+++ b/manifest/fixtures/v2s1-unsigned.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2s1-unsigned.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2s1.manifest.json b/manifest/fixtures/v2s1.manifest.json
new file mode 120000
index 0000000..8021e76
--- /dev/null
+++ b/manifest/fixtures/v2s1.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2s1.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2s2.manifest.json b/manifest/fixtures/v2s2.manifest.json
new file mode 120000
index 0000000..f172a45
--- /dev/null
+++ b/manifest/fixtures/v2s2.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2s2.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures/v2s2.nondistributable.gzip.manifest.json b/manifest/fixtures/v2s2.nondistributable.gzip.manifest.json
new file mode 100644
index 0000000..56bd516
--- /dev/null
+++ b/manifest/fixtures/v2s2.nondistributable.gzip.manifest.json
@@ -0,0 +1,20 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/v2s2.nondistributable.manifest.json b/manifest/fixtures/v2s2.nondistributable.manifest.json
new file mode 100644
index 0000000..66920c1
--- /dev/null
+++ b/manifest/fixtures/v2s2.nondistributable.manifest.json
@@ -0,0 +1,20 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ }
+ ],
+ "annotations": {
+ "com.example.key1": "value1",
+ "com.example.key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/manifest/fixtures/v2s2.uncompressed.manifest.json b/manifest/fixtures/v2s2.uncompressed.manifest.json
new file mode 100644
index 0000000..869d97e
--- /dev/null
+++ b/manifest/fixtures/v2s2.uncompressed.manifest.json
@@ -0,0 +1,26 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ]
+} \ No newline at end of file
diff --git a/manifest/fixtures/v2s2nomime.manifest.json b/manifest/fixtures/v2s2nomime.manifest.json
new file mode 120000
index 0000000..bf022a4
--- /dev/null
+++ b/manifest/fixtures/v2s2nomime.manifest.json
@@ -0,0 +1 @@
+../../internal/manifest/testdata/v2s2nomime.manifest.json \ No newline at end of file
diff --git a/manifest/fixtures_info_test.go b/manifest/fixtures_info_test.go
new file mode 100644
index 0000000..bfdaed1
--- /dev/null
+++ b/manifest/fixtures_info_test.go
@@ -0,0 +1,12 @@
+package manifest
+
+import "github.com/opencontainers/go-digest"
+
+const (
+ // TestV2S2ManifestDigest is the Docker manifest digest of "v2s2.manifest.json"
+ TestDockerV2S2ManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
+ // TestV2S1ManifestDigest is the Docker manifest digest of "v2s1.manifest.json"
+ TestDockerV2S1ManifestDigest = digest.Digest("sha256:7364fea9d84ee548ab67d4c46c6006289800c98de3fbf8c0a97138dfcc23f000")
+ // TestV2S1UnsignedManifestDigest is the Docker manifest digest of "v2s1unsigned.manifest.json"
+ TestDockerV2S1UnsignedManifestDigest = digest.Digest("sha256:7364fea9d84ee548ab67d4c46c6006289800c98de3fbf8c0a97138dfcc23f000")
+)
diff --git a/manifest/list.go b/manifest/list.go
new file mode 100644
index 0000000..1d6fdc9
--- /dev/null
+++ b/manifest/list.go
@@ -0,0 +1,35 @@
+package manifest
+
+import (
+ "github.com/containers/image/v5/internal/manifest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var (
+ // SupportedListMIMETypes is a list of the manifest list types that we know how to
+ // read/manipulate/write.
+ SupportedListMIMETypes = []string{
+ DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageIndex,
+ }
+)
+
+// List is an interface for parsing, modifying lists of image manifests.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
+// directly.
+type List = manifest.ListPublic
+
+// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
+type ListUpdate = manifest.ListUpdate
+
+// ListFromBlob parses a list of manifests.
+func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
+ return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
+}
+
+// ConvertListToMIMEType converts the passed-in manifest list to a manifest
+// list of the specified type.
+func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) {
+ return list.ConvertToMIMEType(manifestMIMEType)
+}
diff --git a/manifest/list_test.go b/manifest/list_test.go
new file mode 100644
index 0000000..25261b9
--- /dev/null
+++ b/manifest/list_test.go
@@ -0,0 +1,141 @@
+package manifest
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func pare(m List) {
+ if impl, ok := m.(*manifest.OCI1Index); ok {
+ impl.Annotations = nil
+ }
+ if impl, ok := m.(*manifest.Schema2List); ok {
+ for i := range impl.Manifests {
+ impl.Manifests[i].Platform.Features = nil
+ }
+ }
+}
+
+func TestParseLists(t *testing.T) {
+ cases := []struct {
+ path string
+ mimeType string
+ }{
+ {"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex},
+ {"v2list.manifest.json", DockerV2ListMediaType},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", c.path))
+ require.NoError(t, err, "error reading file %q", filepath.Join("fixtures", c.path))
+ assert.Equal(t, GuessMIMEType(manifest), c.mimeType)
+
+ _, err = FromBlob(manifest, c.mimeType)
+ require.Error(t, err, "manifest list %q should not parse as single images", c.path)
+
+ m, err := ListFromBlob(manifest, c.mimeType)
+ require.NoError(t, err, "manifest list %q should parse as list types", c.path)
+ assert.Equal(t, m.MIMEType(), c.mimeType, "manifest %q is not of the expected MIME type", c.path)
+
+ clone := m.Clone()
+ assert.Equal(t, clone, m, "manifest %q is missing some fields after being cloned", c.path)
+
+ pare(m)
+
+ index, err := m.ConvertToMIMEType(imgspecv1.MediaTypeImageIndex)
+ require.NoError(t, err, "error converting %q to an OCI1Index", c.path)
+
+ list, err := m.ConvertToMIMEType(DockerV2ListMediaType)
+ require.NoError(t, err, "error converting %q to an Schema2List", c.path)
+
+ index2, err := list.ConvertToMIMEType(imgspecv1.MediaTypeImageIndex)
+ require.NoError(t, err)
+ assert.Equal(t, index, index2, "index %q lost data in conversion", c.path)
+
+ list2, err := index.ConvertToMIMEType(DockerV2ListMediaType)
+ require.NoError(t, err)
+ assert.Equal(t, list, list2, "list %q lost data in conversion", c.path)
+ }
+}
+
+func TestChooseInstance(t *testing.T) {
+ type expectedMatch struct {
+ arch, variant string
+ instanceDigest digest.Digest
+ }
+ for _, manifestList := range []struct {
+ listFile string
+ matchedInstances []expectedMatch
+ unmatchedInstances []string
+ }{
+ {
+ listFile: "schema2list.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:030fcb92e1487b18c974784dcc110a93147c9fc402188370fbfd17efabffc6af"},
+ {"s390x", "", "sha256:e5aa1b0a24620228b75382997a0977f609b3ca3a95533dafdef84c74cc8df642"},
+ {"arm", "v7", "sha256:b5dbad4bdb4444d919294afe49a095c23e86782f98cdf0aa286198ddb814b50b"},
+ {"arm64", "", "sha256:dc472a59fb006797aa2a6bfb54cc9c57959bb0a6d11fadaa608df8c16dea39cf"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ { // Focus on ARM variant field testing
+ listFile: "schema2list-variants.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610"},
+ {"arm", "v7", "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"},
+ {"arm", "v6", "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"},
+ {"arm", "v5", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ {"arm", "", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ {"arm", "unrecognized-present", "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990"},
+ {"arm", "unrecognized-not-present", "sha256:c84b0a3a07b628bc4d62e5047d0f8dff80f7c00979e1e28a821a033ecda8fe53"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ {
+ listFile: "oci1index.json",
+ matchedInstances: []expectedMatch{
+ {"amd64", "", "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"},
+ {"ppc64le", "", "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"},
+ },
+ unmatchedInstances: []string{
+ "unmatched",
+ },
+ },
+ } {
+ rawManifest, err := os.ReadFile(filepath.Join("..", "internal", "manifest", "testdata", manifestList.listFile))
+ require.NoError(t, err)
+ list, err := ListFromBlob(rawManifest, GuessMIMEType(rawManifest))
+ require.NoError(t, err)
+ // Match found
+ for _, match := range manifestList.matchedInstances {
+ testName := fmt.Sprintf("%s %q+%q", manifestList.listFile, match.arch, match.variant)
+ digest, err := list.ChooseInstance(&types.SystemContext{
+ ArchitectureChoice: match.arch,
+ VariantChoice: match.variant,
+ OSChoice: "linux",
+ })
+ require.NoError(t, err, testName)
+ assert.Equal(t, match.instanceDigest, digest, testName)
+ }
+ // Not found
+ for _, arch := range manifestList.unmatchedInstances {
+ _, err := list.ChooseInstance(&types.SystemContext{
+ ArchitectureChoice: arch,
+ OSChoice: "linux",
+ })
+ assert.Error(t, err)
+ }
+ }
+}
diff --git a/manifest/manifest.go b/manifest/manifest.go
new file mode 100644
index 0000000..959aac9
--- /dev/null
+++ b/manifest/manifest.go
@@ -0,0 +1,170 @@
+package manifest
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/libtrust"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
+
+// FIXME(runcom, mitr): should we have a mediatype pkg??
+const (
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
+ DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
+ // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
+ DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
+ // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
+ DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
+ // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
+ DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
+ // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
+ DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
+ DockerV2ListMediaType = manifest.DockerV2ListMediaType
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
+)
+
+// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
+// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
+type NonImageArtifactError = manifest.NonImageArtifactError
+
+// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
+func SupportedSchema2MediaType(m string) error {
+ switch m {
+ case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed:
+ return nil
+ default:
+ return fmt.Errorf("unsupported docker v2s2 media type: %q", m)
+ }
+}
+
+// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource
+// should request from the backend unless directed otherwise.
+var DefaultRequestedManifestMIMETypes = []string{
+ imgspecv1.MediaTypeImageManifest,
+ DockerV2Schema2MediaType,
+ DockerV2Schema1SignedMediaType,
+ DockerV2Schema1MediaType,
+ DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageIndex,
+}
+
+// Manifest is an interface for parsing, modifying image manifests in isolation.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
+// directly.
+//
+// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
+// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
+type Manifest interface {
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ ConfigInfo() types.BlobInfo
+ // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []LayerInfo
+ // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+ UpdateLayerInfos(layerInfos []types.BlobInfo) error
+
+ // ImageID computes an ID which can uniquely identify this image by its contents, irrespective
+ // of which (of possibly more than one simultaneously valid) reference was used to locate the
+ // image, and unchanged by whether or how the layers are compressed. The result takes the form
+ // of the hexadecimal portion of a digest.Digest.
+ ImageID(diffIDs []digest.Digest) (string, error)
+
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest,
+ // incorporating information from a configuration blob returned by configGetter, if
+ // the underlying image format is expected to include a configuration blob.
+ Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
+
+ // Serialize returns the manifest in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+ Serialize() ([]byte, error)
+}
+
+// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos.
+type LayerInfo struct {
+ types.BlobInfo
+ EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
+}
+
+// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
+// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
+// but we may not have such metadata available (e.g. when the manifest is a local file).
+func GuessMIMEType(manifestBlob []byte) string {
+ return manifest.GuessMIMEType(manifestBlob)
+}
+
+// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
+func Digest(manifestBlob []byte) (digest.Digest, error) {
+ return manifest.Digest(manifestBlob)
+}
+
+// MatchesDigest returns true iff the manifest matches expectedDigest.
+// Error may be set if this returns false.
+// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
+// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
+func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
+ return manifest.MatchesDigest(manifestBlob, expectedDigest)
+}
+
+// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
+// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature).
+func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
+ key, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, err // Coverage: This can fail only if rand.Reader fails.
+ }
+
+ js, err := libtrust.NewJSONSignature(manifest)
+ if err != nil {
+ return nil, err
+ }
+ if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails.
+ return nil, err
+ }
+ return js.PrettySignature("signatures")
+}
+
+// MIMETypeIsMultiImage returns true if mimeType is a list of images
+func MIMETypeIsMultiImage(mimeType string) bool {
+ return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex
+}
+
+// MIMETypeSupportsEncryption returns true if the mimeType supports encryption
+func MIMETypeSupportsEncryption(mimeType string) bool {
+ return mimeType == imgspecv1.MediaTypeImageManifest
+}
+
+// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
+// centralizing various workarounds.
+func NormalizedMIMEType(input string) string {
+ return manifest.NormalizedMIMEType(input)
+}
+
+// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
+func FromBlob(manblob []byte, mt string) (Manifest, error) {
+ nmt := NormalizedMIMEType(mt)
+ switch nmt {
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
+ return Schema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return OCI1FromManifest(manblob)
+ case DockerV2Schema2MediaType:
+ return Schema2FromManifest(manblob)
+ case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex:
+ return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
+ }
+ // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
+}
diff --git a/manifest/manifest_test.go b/manifest/manifest_test.go
new file mode 100644
index 0000000..677b136
--- /dev/null
+++ b/manifest/manifest_test.go
@@ -0,0 +1,169 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/libtrust"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+)
+
+func TestGuessMIMEType(t *testing.T) {
+ cases := []struct {
+ path string
+ mimeType string
+ }{
+ {"v2s2.manifest.json", DockerV2Schema2MediaType},
+ {"v2list.manifest.json", DockerV2ListMediaType},
+ {"v2s1.manifest.json", DockerV2Schema1SignedMediaType},
+ {"v2s1-unsigned.manifest.json", DockerV2Schema1MediaType},
+ {"v2s1-invalid-signatures.manifest.json", DockerV2Schema1SignedMediaType},
+ {"v2s2nomime.manifest.json", DockerV2Schema2MediaType}, // It is unclear whether this one is legal, but we should guess v2s2 if anything at all.
+ {"unknown-version.manifest.json", ""},
+ {"non-json.manifest.json", ""}, // Not a manifest (nor JSON) at all
+ {"ociv1.manifest.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1.artifact.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex},
+ {"ociv1nomime.manifest.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1nomime.artifact.json", imgspecv1.MediaTypeImageManifest},
+ {"ociv1nomime.image.index.json", imgspecv1.MediaTypeImageIndex},
+ }
+
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", c.path))
+ require.NoError(t, err)
+ mimeType := GuessMIMEType(manifest)
+ assert.Equal(t, c.mimeType, mimeType, c.path)
+ }
+}
+
+func TestDigest(t *testing.T) {
+ cases := []struct {
+ path string
+ expectedDigest digest.Digest
+ }{
+ {"v2s2.manifest.json", TestDockerV2S2ManifestDigest},
+ {"v2s1.manifest.json", TestDockerV2S1ManifestDigest},
+ {"v2s1-unsigned.manifest.json", TestDockerV2S1UnsignedManifestDigest},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", c.path))
+ require.NoError(t, err)
+ actualDigest, err := Digest(manifest)
+ require.NoError(t, err)
+ assert.Equal(t, c.expectedDigest, actualDigest)
+ }
+
+ manifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ _, err = Digest(manifest)
+ assert.Error(t, err)
+
+ actualDigest, err := Digest([]byte{})
+ require.NoError(t, err)
+ assert.Equal(t, digest.Digest(digestSha256EmptyTar), actualDigest)
+}
+
+func TestMatchesDigest(t *testing.T) {
+ cases := []struct {
+ path string
+ expectedDigest digest.Digest
+ result bool
+ }{
+ // Success
+ {"v2s2.manifest.json", TestDockerV2S2ManifestDigest, true},
+ {"v2s1.manifest.json", TestDockerV2S1ManifestDigest, true},
+ // No match (switched s1/s2)
+ {"v2s2.manifest.json", TestDockerV2S1ManifestDigest, false},
+ {"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false},
+ // Unrecognized algorithm
+ {"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false},
+ // Mangled format
+ {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false},
+ {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false},
+ {"v2s2.manifest.json", digest.Digest(""), false},
+ }
+ for _, c := range cases {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", c.path))
+ require.NoError(t, err)
+ res, err := MatchesDigest(manifest, c.expectedDigest)
+ require.NoError(t, err)
+ assert.Equal(t, c.result, res)
+ }
+
+ manifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ // Even a correct SHA256 hash is rejected if we can't strip the JSON signature.
+ res, err := MatchesDigest(manifest, digest.FromBytes(manifest))
+ assert.False(t, res)
+ assert.Error(t, err)
+
+ res, err = MatchesDigest([]byte{}, digest.Digest(digestSha256EmptyTar))
+ assert.True(t, res)
+ assert.NoError(t, err)
+}
+
+func TestAddDummyV2S1Signature(t *testing.T) {
+ manifest, err := os.ReadFile("fixtures/v2s1-unsigned.manifest.json")
+ require.NoError(t, err)
+
+ signedManifest, err := AddDummyV2S1Signature(manifest)
+ require.NoError(t, err)
+
+ sig, err := libtrust.ParsePrettySignature(signedManifest, "signatures")
+ require.NoError(t, err)
+ signaturePayload, err := sig.Payload()
+ require.NoError(t, err)
+ assert.Equal(t, manifest, signaturePayload)
+
+ _, err = AddDummyV2S1Signature([]byte("}this is invalid JSON"))
+ assert.Error(t, err)
+}
+
+func TestMIMETypeIsMultiImage(t *testing.T) {
+ for _, c := range []struct {
+ mt string
+ expected bool
+ }{
+ {DockerV2ListMediaType, true},
+ {DockerV2Schema1MediaType, false},
+ {DockerV2Schema1SignedMediaType, false},
+ {DockerV2Schema2MediaType, false},
+ {imgspecv1.MediaTypeImageIndex, true},
+ {imgspecv1.MediaTypeImageManifest, false},
+ } {
+ res := MIMETypeIsMultiImage(c.mt)
+ assert.Equal(t, c.expected, res, c.mt)
+ }
+}
+
+func TestNormalizedMIMEType(t *testing.T) {
+ for _, c := range []string{ // Valid MIME types, normalized to themselves
+ DockerV2Schema1MediaType,
+ DockerV2Schema1SignedMediaType,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ } {
+ res := NormalizedMIMEType(c)
+ assert.Equal(t, c, res, c)
+ }
+ for _, c := range []string{
+ "application/json",
+ "text/plain",
+ "not at all a valid MIME type",
+ "",
+ } {
+ res := NormalizedMIMEType(c)
+ assert.Equal(t, DockerV2Schema1SignedMediaType, res, c)
+ }
+}
diff --git a/manifest/oci.go b/manifest/oci.go
new file mode 100644
index 0000000..a85641c
--- /dev/null
+++ b/manifest/oci.go
@@ -0,0 +1,274 @@
+package manifest
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/internal/manifest"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ ociencspec "github.com/containers/ocicrypt/spec"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/slices"
+)
+
+// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
+func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo {
+ return types.BlobInfo{
+ Digest: desc.Digest,
+ Size: desc.Size,
+ URLs: desc.URLs,
+ Annotations: desc.Annotations,
+ MediaType: desc.MediaType,
+ }
+}
+
+// OCI1 is a manifest.Manifest implementation for OCI images.
+// The underlying data from imgspecv1.Manifest is also available.
+type OCI1 struct {
+ imgspecv1.Manifest
+}
+
+// SupportedOCI1MediaType checks if the specified string is a supported OCI1
+// media type.
+//
+// Deprecated: blindly rejecting unknown MIME types when the consumer does not
+// need to process the input just reduces interoperability (and violates the
+// standard) with no benefit, and that this function does not check that the
+// media type is appropriate for any specific purpose, so it’s not all that
+// useful for validation anyway.
+func SupportedOCI1MediaType(m string) error {
+ switch m {
+ case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig,
+ imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd,
+ imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeLayoutHeader,
+ ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
+ return nil
+ default:
+ return fmt.Errorf("unsupported OCIv1 media type: %q", m)
+ }
+}
+
+// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
+func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
+ oci1 := OCI1{}
+ if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
+ return nil, err
+ }
+ if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
+ manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
+ return nil, err
+ }
+ return &oci1, nil
+}
+
+// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
+func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
+ return &OCI1{
+ imgspecv1.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Config: config,
+ Layers: layers,
+ },
+ }
+}
+
+// OCI1Clone creates a copy of the supplied OCI1 manifest.
+func OCI1Clone(src *OCI1) *OCI1 {
+ return &OCI1{
+ Manifest: src.Manifest,
+ }
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *OCI1) ConfigInfo() types.BlobInfo {
+ return BlobInfoFromOCI1Descriptor(m.Config)
+}
+
+// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *OCI1) LayerInfos() []LayerInfo {
+ blobs := []LayerInfo{}
+ for _, layer := range m.Layers {
+ blobs = append(blobs, LayerInfo{
+ BlobInfo: BlobInfoFromOCI1Descriptor(layer),
+ EmptyLayer: false,
+ })
+ }
+ return blobs
+}
+
+var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ },
+ {
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd,
+ },
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
+// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
+// CompressionAlgorithm that isn't supported by OCI.
+//
+// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on
+// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
+// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
+// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
+// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
+func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.Layers) != len(layerInfos) {
+ return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ }
+ original := m.Layers
+ m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ mimeType := original[i].MediaType
+ if info.CryptoOperation == types.Decrypt {
+ decMimeType, err := getDecryptedMediaType(mimeType)
+ if err != nil {
+ return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType)
+ }
+ mimeType = decMimeType
+ }
+ mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
+ if err != nil {
+ return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
+ }
+ if info.CryptoOperation == types.Encrypt {
+ encMediaType, err := getEncryptedMediaType(mimeType)
+ if err != nil {
+ return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType)
+ }
+ mimeType = encMediaType
+ }
+
+ m.Layers[i].MediaType = mimeType
+ m.Layers[i].Digest = info.Digest
+ m.Layers[i].Size = info.Size
+ m.Layers[i].Annotations = info.Annotations
+ m.Layers[i].URLs = info.URLs
+ }
+ return nil
+}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support encryption
+func getEncryptedMediaType(mediatype string) (string, error) {
+ if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
+ return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
+ }
+ unsuffixedMediatype := strings.Split(mediatype, "+")[0]
+ switch unsuffixedMediatype {
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
+ imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ return mediatype + "+encrypted", nil
+ }
+
+ return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
+}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support decryption
+func getDecryptedMediaType(mediatype string) (string, error) {
+ if !strings.HasSuffix(mediatype, "+encrypted") {
+ return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
+ }
+
+ return strings.TrimSuffix(mediatype, "+encrypted"), nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *OCI1) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos.
+ // Most software calling this without human intervention is going to expect the values to be realistic and relevant,
+ // and is probably better served by failing; we can always re-visit that later if we fail now, but
+ // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
+ return nil, manifest.NewNonImageArtifactError(&m.Manifest)
+ }
+
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ v1 := &imgspecv1.Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ d1 := &Schema2V1Image{}
+ if err := json.Unmarshal(config, d1); err != nil {
+ return nil, err
+ }
+ layerInfos := m.LayerInfos()
+ i := &types.ImageInspectInfo{
+ Tag: "",
+ Created: v1.Created,
+ DockerVersion: d1.DockerVersion,
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Variant: v1.Variant,
+ Os: v1.OS,
+ Layers: layerInfosToStrings(layerInfos),
+ LayersData: imgInspectLayersFromLayerInfos(layerInfos),
+ Env: v1.Config.Env,
+ Author: v1.Author,
+ }
+ return i, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *OCI1) ImageID([]digest.Digest) (string, error) {
+ // The way m.Config.Digest “uniquely identifies” an image is
+ // by containing RootFS.DiffIDs, which identify the layers of the image.
+ // For non-image artifacts, the we can’t expect the config to change
+ // any time the other layers (semantically) change, so this approach of
+ // distinguishing objects only by m.Config.Digest doesn’t work in general.
+ //
+ // Any caller of this method presumably wants to disambiguate the same
+ // images with a different representation, but doesn’t want to disambiguate
+ // representations (by using a manifest digest). So, submitting a non-image
+ // artifact to such a caller indicates an expectation mismatch.
+ // So, we just fail here instead of inventing some other ID value (e.g.
+ // by combining the config and blob layer digests). That still
+ // gives us the option to not fail, and return some value, in the future,
+ // without committing to that approach now.
+ // (The only known caller of ImageID is storage/storageImageDestination.computeID,
+ // which can’t work with non-image artifacts.)
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return "", manifest.NewNonImageArtifactError(&m.Manifest)
+ }
+
+ if err := m.Config.Digest.Validate(); err != nil {
+ return "", err
+ }
+ return m.Config.Digest.Hex(), nil
+}
+
+// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
+// (and the code can handle that).
+// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
+// algorithms depends not on the current format, but possibly on the target of a conversion.
+func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
+ if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
+ return false
+ }
+ return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
+}
diff --git a/manifest/oci_index.go b/manifest/oci_index.go
new file mode 100644
index 0000000..193b089
--- /dev/null
+++ b/manifest/oci_index.go
@@ -0,0 +1,27 @@
+package manifest
+
+import (
+ "github.com/containers/image/v5/internal/manifest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// OCI1Index is just an alias for the OCI index type, but one which we can
+// provide methods for.
+type OCI1Index = manifest.OCI1IndexPublic
+
+// OCI1IndexFromComponents creates an OCI1 image index instance from the
+// supplied data.
+func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
+ return manifest.OCI1IndexPublicFromComponents(components, annotations)
+}
+
+// OCI1IndexClone creates a deep copy of the passed-in index.
+func OCI1IndexClone(index *OCI1Index) *OCI1Index {
+ return manifest.OCI1IndexPublicClone(index)
+}
+
+// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
+// JSON, presumably generated by encoding a OCI1 manifest index.
+func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
+ return manifest.OCI1IndexPublicFromManifest(manifestBlob)
+}
diff --git a/manifest/oci_index_test.go b/manifest/oci_index_test.go
new file mode 100644
index 0000000..defa85f
--- /dev/null
+++ b/manifest/oci_index_test.go
@@ -0,0 +1,28 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestOCI1IndexFromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("fixtures", "ociv1.image.index.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := OCI1IndexFromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ "v2s2.manifest.json",
+ // Not "v2list.manifest.json" yet, without mediaType the two are too similar to tell the difference.
+ "ociv1.manifest.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"config", "fsLayers", "history", "layers"})
+}
diff --git a/manifest/oci_test.go b/manifest/oci_test.go
new file mode 100644
index 0000000..e27275b
--- /dev/null
+++ b/manifest/oci_test.go
@@ -0,0 +1,406 @@
+package manifest
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func manifestOCI1FromFixture(t *testing.T, fixture string) *OCI1 {
+ manifest, err := os.ReadFile(filepath.Join("fixtures", fixture))
+ require.NoError(t, err)
+
+ m, err := OCI1FromManifest(manifest)
+ require.NoError(t, err)
+ return m
+}
+
+func TestSupportedOCI1MediaType(t *testing.T) {
+ type testData struct {
+ m string
+ mustFail bool
+ }
+ data := []testData{
+ {imgspecv1.MediaTypeDescriptor, false},
+ {imgspecv1.MediaTypeImageConfig, false},
+ {imgspecv1.MediaTypeImageLayer, false},
+ {imgspecv1.MediaTypeImageLayerGzip, false},
+ {imgspecv1.MediaTypeImageLayerNonDistributable, false}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {imgspecv1.MediaTypeImageLayerNonDistributableGzip, false}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {imgspecv1.MediaTypeImageLayerNonDistributableZstd, false}, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ {imgspecv1.MediaTypeImageLayerZstd, false},
+ {imgspecv1.MediaTypeImageManifest, false},
+ {imgspecv1.MediaTypeLayoutHeader, false},
+ {"application/vnd.oci.image.layer.nondistributable.v1.tar+unknown", true},
+ }
+ for _, d := range data {
+ err := SupportedOCI1MediaType(d.m)
+ if d.mustFail {
+ assert.NotNil(t, err)
+ } else {
+ assert.Nil(t, err)
+ }
+ }
+}
+
+func TestOCI1FromManifest(t *testing.T) {
+ validManifest, err := os.ReadFile(filepath.Join("fixtures", "ociv1.manifest.json"))
+ require.NoError(t, err)
+
+ parser := func(m []byte) error {
+ _, err := OCI1FromManifest(m)
+ return err
+ }
+ // Schema mismatch is rejected
+ testManifestFixturesAreRejected(t, parser, []string{
+ "schema2-to-schema1-by-docker.json",
+ // Not "v2s2.manifest.json" yet, without mediaType the two are too similar to tell the difference.
+ "v2list.manifest.json",
+ "ociv1.image.index.json",
+ })
+ // Extra fields are rejected
+ testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"fsLayers", "history", "manifests"})
+}
+
+func TestOCI1UpdateLayerInfos(t *testing.T) {
+ customCompression := compression.Algorithm{}
+
+ for _, c := range []struct {
+ name string
+ sourceFixture string
+ updates []types.BlobInfo
+ expectedFixture string // or "" to indicate an expected failure
+ }{
+ {
+ name: "gzip → zstd",
+ sourceFixture: "ociv1.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ },
+ expectedFixture: "ociv1.zstd.manifest.json",
+ },
+ {
+ name: "zstd → gzip",
+ sourceFixture: "ociv1.zstd.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ },
+ expectedFixture: "ociv1.manifest.json",
+ },
+ {
+ name: "zstd → uncompressed",
+ sourceFixture: "ociv1.zstd.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Decompress,
+ },
+ },
+ expectedFixture: "ociv1.uncompressed.manifest.json",
+ },
+ {
+ name: "invalid compression operation",
+ sourceFixture: "ociv1.zstd.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: 42, // MUST fail here
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ },
+ expectedFixture: "",
+ },
+ {
+ name: "invalid compression algorithm",
+ sourceFixture: "ociv1.zstd.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: 42,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerZstd,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &customCompression, // MUST fail here
+ },
+ },
+ expectedFixture: "",
+ },
+ {
+ name: "gzip → uncompressed",
+ sourceFixture: "ociv1.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Decompress,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Decompress,
+ },
+ },
+ expectedFixture: "ociv1.uncompressed.manifest.json",
+ },
+ {
+ name: "nondistributable → gzip",
+ sourceFixture: "ociv1.nondistributable.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ },
+ },
+ expectedFixture: "ociv1.nondistributable.gzip.manifest.json",
+ },
+ {
+ name: "nondistributable → zstd",
+ sourceFixture: "ociv1.nondistributable.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Zstd,
+ },
+ },
+ expectedFixture: "ociv1.nondistributable.zstd.manifest.json",
+ },
+ {
+ name: "nondistributable gzip → uncompressed",
+ sourceFixture: "ociv1.nondistributable.gzip.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: imgspecv1.MediaTypeImageLayerGzip,
+ CompressionOperation: types.Decompress,
+ },
+ },
+ expectedFixture: "ociv1.nondistributable.manifest.json",
+ },
+ {
+ name: "uncompressed → gzip encrypted",
+ sourceFixture: "ociv1.uncompressed.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 32654,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"},
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ Size: 16724,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ {
+ Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ Size: 73109,
+ Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"},
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ CompressionOperation: types.Compress,
+ CompressionAlgorithm: &compression.Gzip,
+ CryptoOperation: types.Encrypt,
+ },
+ },
+ expectedFixture: "ociv1.encrypted.manifest.json",
+ },
+ {
+ name: "gzip encrypted → uncompressed decrypted",
+ sourceFixture: "ociv1.encrypted.manifest.json",
+ updates: []types.BlobInfo{
+ {
+ Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ Size: 32654,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
+ Size: 16724,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ {
+ Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
+ Size: 73109,
+ MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted",
+ CompressionOperation: types.Decompress,
+ CryptoOperation: types.Decrypt,
+ },
+ },
+ expectedFixture: "ociv1.uncompressed.manifest.json",
+ },
+ } {
+ manifest := manifestOCI1FromFixture(t, c.sourceFixture)
+
+ err := manifest.UpdateLayerInfos(c.updates)
+ if c.expectedFixture == "" {
+ assert.Error(t, err, c.name)
+ } else {
+ require.NoError(t, err, c.name)
+
+ updatedManifestBytes, err := manifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ expectedManifest := manifestOCI1FromFixture(t, c.expectedFixture)
+ expectedManifestBytes, err := expectedManifest.Serialize()
+ require.NoError(t, err, c.name)
+
+ assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes), c.name)
+ }
+ }
+}
+
+func TestOCI1Inspect(t *testing.T) {
+ // Success is tested in image.TestManifestOCI1Inspect .
+ m := manifestOCI1FromFixture(t, "ociv1.artifact.json")
+ _, err := m.Inspect(func(info types.BlobInfo) ([]byte, error) {
+ require.Equal(t, m.Config.Digest, info.Digest)
+ // This just-enough-artifact contains a zero-byte config, sanity-check that’s till the case.
+ require.Equal(t, int64(0), m.Config.Size)
+ return []byte{}, nil
+ })
+ var expected NonImageArtifactError
+ assert.ErrorAs(t, err, &expected)
+}
+
+func TestOCI1ImageID(t *testing.T) {
+ m := manifestOCI1FromFixture(t, "ociv1.manifest.json")
+ // These are not the real DiffID values, but they don’t actually matter in our implementation.
+ id, err := m.ImageID([]digest.Digest{
+ "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ })
+ require.NoError(t, err)
+ assert.Equal(t, "b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7", id)
+
+ m = manifestOCI1FromFixture(t, "ociv1.artifact.json")
+ _, err = m.ImageID([]digest.Digest{})
+ var expected NonImageArtifactError
+ assert.ErrorAs(t, err, &expected)
+}
+
+func TestOCI1CanChangeLayerCompression(t *testing.T) {
+ m := manifestOCI1FromFixture(t, "ociv1.manifest.json")
+
+ assert.True(t, m.CanChangeLayerCompression(imgspecv1.MediaTypeImageLayerGzip))
+ // Some projects like to use squashfs and other unspecified formats for layers; don’t touch those.
+ assert.False(t, m.CanChangeLayerCompression("a completely unknown and quite possibly invalid MIME type"))
+
+ artifact := manifestOCI1FromFixture(t, "ociv1.artifact.json")
+ assert.False(t, artifact.CanChangeLayerCompression(imgspecv1.MediaTypeImageLayerGzip))
+}
diff --git a/oci/archive/oci_dest.go b/oci/archive/oci_dest.go
new file mode 100644
index 0000000..8386c47
--- /dev/null
+++ b/oci/archive/oci_dest.go
@@ -0,0 +1,189 @@
+package archive
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type ociArchiveImageDestination struct {
+ impl.Compat
+
+ ref ociArchiveReference
+ unpackedDest private.ImageDestination
+ tempDirRef tempDirOCIRef
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing directory.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) {
+ tempDirRef, err := createOCIRef(sys, ref.image)
+ if err != nil {
+ return nil, fmt.Errorf("creating oci reference: %w", err)
+ }
+ unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
+ if err != nil {
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
+ }
+ return nil, err
+ }
+ d := &ociArchiveImageDestination{
+ ref: ref,
+ unpackedDest: imagedestination.FromPublic(unpackedDest),
+ tempDirRef: tempDirRef,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination.
+func (d *ociArchiveImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any
+// Close deletes the temp directory of the oci-archive image
+func (d *ociArchiveImageDestination) Close() error {
+ defer func() {
+ err := d.tempDirRef.deleteTempDir()
+ logrus.Debugf("Error deleting temporary directory: %v", err)
+ }()
+ return d.unpackedDest.Close()
+}
+
+func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string {
+ return d.unpackedDest.SupportedManifestMIMETypes()
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures
+func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error {
+ return d.unpackedDest.SupportsSignatures(ctx)
+}
+
+func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.unpackedDest.DesiredLayerCompression()
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool {
+ return d.unpackedDest.AcceptsForeignLayerURLs()
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise
+func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
+ return d.unpackedDest.MustMatchRuntimeOS()
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.unpackedDest.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
+ return d.unpackedDest.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
+ return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
+}
+
+// PutManifest writes the manifest to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
+func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// after the directory is made, it is tarred up into a file and the directory is deleted
+func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
+ return fmt.Errorf("storing image %q: %w", d.ref.image, err)
+ }
+
+ // path of directory to tar up
+ src := d.tempDirRef.tempDirectory
+ // path to save tarred up file
+ dst := d.ref.resolvedFile
+ return tarDirectory(src, dst)
+}
+
+// tar converts the directory at src and saves it to dst
+func tarDirectory(src, dst string) error {
+ // input is a stream of bytes from the archive of the directory at path
+ input, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
+ }
+
+ // creates the tar file
+ outFile, err := os.Create(dst)
+ if err != nil {
+ return fmt.Errorf("creating tar file %q: %w", dst, err)
+ }
+ defer outFile.Close()
+
+ // copies the contents of the directory to the tar file
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
+ _, err = io.Copy(outFile, input)
+
+ return err
+}
diff --git a/oci/archive/oci_dest_test.go b/oci/archive/oci_dest_test.go
new file mode 100644
index 0000000..a67112c
--- /dev/null
+++ b/oci/archive/oci_dest_test.go
@@ -0,0 +1,5 @@
+package archive
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageDestination = (*ociArchiveImageDestination)(nil)
diff --git a/oci/archive/oci_src.go b/oci/archive/oci_src.go
new file mode 100644
index 0000000..ee84098
--- /dev/null
+++ b/oci/archive/oci_src.go
@@ -0,0 +1,174 @@
+package archive
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ ocilayout "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
+// but nothing matches the “image” part of the provided reference.
+type ImageNotFoundError struct {
+ ref ociArchiveReference
+ // We may make members public, or add methods, in the future.
+}
+
+func (e ImageNotFoundError) Error() string {
+ return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
+}
+
+// ArchiveFileNotFoundError occurs when the archive file does not exist.
+type ArchiveFileNotFoundError struct {
+ // ref is the image reference
+ ref ociArchiveReference
+ // path is the file path that was not present
+ path string
+}
+
+func (e ArchiveFileNotFoundError) Error() string {
+ return fmt.Sprintf("archive file not found: %q", e.path)
+}
+
+type ociArchiveImageSource struct {
+ impl.Compat
+
+ ref ociArchiveReference
+ unpackedSrc private.ImageSource
+ tempDirRef tempDirOCIRef
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource untars the file and saves it in a temp directory
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) {
+ tempDirRef, err := createUntarTempDir(sys, ref)
+ if err != nil {
+ return nil, fmt.Errorf("creating temp directory: %w", err)
+ }
+
+ unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
+ if err != nil {
+ var notFound ocilayout.ImageNotFoundError
+ if errors.As(err, &notFound) {
+ err = ImageNotFoundError{ref: ref}
+ }
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
+ }
+ return nil, err
+ }
+ s := &ociArchiveImageSource{
+ ref: ref,
+ unpackedSrc: imagesource.FromPublic(unpackedSrc),
+ tempDirRef: tempDirRef,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// LoadManifestDescriptor loads the manifest
+// Deprecated: use LoadManifestDescriptorWithContext instead
+func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ return LoadManifestDescriptorWithContext(nil, imgRef)
+}
+
+// LoadManifestDescriptorWithContext loads the manifest
+func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ ociArchRef, ok := imgRef.(ociArchiveReference)
+ if !ok {
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference")
+ }
+ tempDirRef, err := createUntarTempDir(sys, ociArchRef)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err)
+ }
+ defer func() {
+ err := tempDirRef.deleteTempDir()
+ logrus.Debugf("Error deleting temporary directory: %v", err)
+ }()
+
+ descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
+ if err != nil {
+ return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err)
+ }
+ return descriptor, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ociArchiveImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+// Close deletes the temporary directory at dst
+func (s *ociArchiveImageSource) Close() error {
+ defer func() {
+ err := s.tempDirRef.deleteTempDir()
+ logrus.Debugf("error deleting tmp dir: %v", err)
+ }()
+ return s.unpackedSrc.Close()
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ return s.unpackedSrc.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(ctx, info, cache)
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
+ return s.unpackedSrc.SupportsGetBlobAt()
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest)
+}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest)
+}
diff --git a/oci/archive/oci_src_test.go b/oci/archive/oci_src_test.go
new file mode 100644
index 0000000..a78246c
--- /dev/null
+++ b/oci/archive/oci_src_test.go
@@ -0,0 +1,26 @@
+package archive
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageSource = (*ociArchiveImageSource)(nil)
+
+func TestNewImageSourceNotFound(t *testing.T) {
+ sysctx := types.SystemContext{}
+ emptyDir := t.TempDir()
+ archivePath := filepath.Join(emptyDir, "foo.ociarchive")
+ imgref, err := ParseReference(archivePath)
+ require.NoError(t, err)
+ _, err = LoadManifestDescriptorWithContext(&sysctx, imgref)
+ assert.NotNil(t, err)
+ var aerr ArchiveFileNotFoundError
+ assert.ErrorAs(t, err, &aerr)
+ assert.Equal(t, aerr.path, archivePath)
+}
diff --git a/oci/archive/oci_transport.go b/oci/archive/oci_transport.go
new file mode 100644
index 0000000..d5fee36
--- /dev/null
+++ b/oci/archive/oci_transport.go
@@ -0,0 +1,200 @@
+package archive
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/oci/internal"
+ ocilayout "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for OCI archive
+// it creates an oci-archive tar file by calling into the OCI transport
+// tarring the directory created by oci and deleting the directory
+var Transport = ociArchiveTransport{}
+
+type ociArchiveTransport struct{}
+
+// ociArchiveReference is an ImageReference for OCI Archive paths
+type ociArchiveReference struct {
+ file string
+ resolvedFile string
+ image string
+}
+
+func (t ociArchiveTransport) Name() string {
+ return "oci-archive"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix
+// into an ImageReference.
+func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
+ return internal.ValidateScope(scope)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
+func ParseReference(reference string) (types.ImageReference, error) {
+ file, image := internal.SplitPathAndImage(reference)
+ return NewReference(file, image)
+}
+
+// NewReference returns an OCI reference for a file and a image.
+func NewReference(file, image string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := internal.ValidateOCIPath(file); err != nil {
+ return nil, err
+ }
+
+ if err := internal.ValidateImageName(image); err != nil {
+ return nil, err
+ }
+
+ return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
+}
+
+func (ref ociArchiveReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+func (ref ociArchiveReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s:%s", ref.file, ref.image)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+func (ref ociArchiveReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+func (ref ociArchiveReference) PolicyConfigurationIdentity() string {
+ // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
+ // same image and the two can’t be statically disambiguated. Using at least the repository directory is
+ // less granular but hopefully still useful.
+ return ref.resolvedFile
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set
+func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by ociTransport.ValidatePolicyConfigurationScope above.
+ if lastSlash == -1 || path == "/" {
+ break
+ }
+ res = append(res, path)
+ path = path[:lastSlash]
+ }
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for oci: images")
+}
+
+// struct to store the ociReference and temporary directory returned by createOCIRef
+type tempDirOCIRef struct {
+ tempDirectory string
+ ociRefExtracted types.ImageReference
+}
+
+// deletes the temporary directory created
+func (t *tempDirOCIRef) deleteTempDir() error {
+ return os.RemoveAll(t.tempDirectory)
+}
+
+// createOCIRef creates the oci reference of the image
+// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
+func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
+ dir, err := tmpdir.MkDirBigFileTemp(sys, "oci")
+ if err != nil {
+ return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
+ }
+ ociRef, err := ocilayout.NewReference(dir, image)
+ if err != nil {
+ return tempDirOCIRef{}, err
+ }
+
+ tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef}
+ return tempDirRef, nil
+}
+
+// creates the temporary directory and copies the tarred content to it
+func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
+ src := ref.resolvedFile
+ arch, err := os.Open(src)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src}
+ } else {
+ return tempDirOCIRef{}, err
+ }
+ }
+ defer arch.Close()
+
+ tempDirRef, err := createOCIRef(sys, ref.image)
+ if err != nil {
+ return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
+ }
+ dst := tempDirRef.tempDirectory
+
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
+ if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
+ if err := tempDirRef.deleteTempDir(); err != nil {
+ return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
+ }
+ return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err)
+ }
+ return tempDirRef, nil
+}
diff --git a/oci/archive/oci_transport_test.go b/oci/archive/oci_transport_test.go
new file mode 100644
index 0000000..f521a27
--- /dev/null
+++ b/oci/archive/oci_transport_test.go
@@ -0,0 +1,274 @@
+package archive
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "oci-archive", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testParseReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "/etc",
+ "/this/does/not/exist",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "relative/path",
+ "/",
+ "/double//slashes",
+ "/has/./dot",
+ "/has/dot/../dot",
+ "/trailing/slash/",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestParseReference(t *testing.T) {
+ testParseReference(t, ParseReference)
+}
+
+// testParseReference is a test shared for Transport.ParseReference and ParseReference.
+func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ tmpDir := t.TempDir()
+
+ for _, path := range []string{
+ "/",
+ "/etc",
+ tmpDir,
+ "relativepath",
+ tmpDir + "/thisdoesnotexist",
+ } {
+ for _, image := range []struct{ suffix, image string }{
+ {":notlatest:image", "notlatest:image"},
+ {":latestimage", "latestimage"},
+ {":", ""},
+ {"", ""},
+ } {
+ input := path + image.suffix
+ ref, err := fn(input)
+ require.NoError(t, err, input)
+ ociArchRef, ok := ref.(ociArchiveReference)
+ require.True(t, ok)
+ assert.Equal(t, path, ociArchRef.file, input)
+ assert.Equal(t, image.image, ociArchRef.image, input)
+ }
+ }
+
+ _, err := fn(tmpDir + ":invalid'image!value@")
+ assert.Error(t, err)
+}
+
+func TestNewReference(t *testing.T) {
+ const (
+ imageValue = "imageValue"
+ noImageValue = ""
+ )
+
+ tmpDir := t.TempDir()
+
+ ref, err := NewReference(tmpDir, imageValue)
+ require.NoError(t, err)
+ ociArchRef, ok := ref.(ociArchiveReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir, ociArchRef.file)
+ assert.Equal(t, imageValue, ociArchRef.image)
+
+ ref, err = NewReference(tmpDir, noImageValue)
+ require.NoError(t, err)
+ ociArchRef, ok = ref.(ociArchiveReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir, ociArchRef.file)
+ assert.Equal(t, noImageValue, ociArchRef.image)
+
+ _, err = NewReference(tmpDir+"/thisparentdoesnotexist/something", imageValue)
+ assert.Error(t, err)
+
+ _, err = NewReference(tmpDir, "invalid'image!value@")
+ assert.Error(t, err)
+
+ _, err = NewReference(tmpDir+"/has:colon", imageValue)
+ assert.Error(t, err)
+}
+
+// refToTempOCI creates a temporary directory and returns an reference to it.
+func refToTempOCI(t *testing.T) (types.ImageReference, string) {
+ tmpDir := t.TempDir()
+ m := `{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ },
+ "annotations": {
+ "org.opencontainers.image.ref.name": "imageValue"
+ }
+ }
+ ]
+ }
+`
+ err := os.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644)
+ require.NoError(t, err)
+ ref, err := NewReference(tmpDir, "imageValue")
+ require.NoError(t, err)
+ return ref, tmpDir
+}
+
+// refToTempOCIArchive creates a temporary directory, copies the contents of that directory
+// to a temporary tar file and returns a reference to the temporary tar file
+func refToTempOCIArchive(t *testing.T) (ref types.ImageReference, tmpTarFile string) {
+ tmpDir := t.TempDir()
+ m := `{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ },
+ "annotations": {
+ "org.opencontainers.image.ref.name": "imageValue"
+ }
+ }
+ ]
+ }
+`
+ err := os.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644)
+ require.NoError(t, err)
+ tarFile, err := os.CreateTemp("", "oci-transport-test.tar")
+ require.NoError(t, err)
+ err = tarDirectory(tmpDir, tarFile.Name())
+ require.NoError(t, err)
+ ref, err = NewReference(tarFile.Name(), "")
+ require.NoError(t, err)
+ return ref, tarFile.Name()
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range []struct{ input, result string }{
+ {"/dir1:notlatest:notlatest", "/dir1:notlatest:notlatest"}, // Explicit image
+ {"/dir3:", "/dir3:"}, // No image
+ } {
+ ref, err := ParseReference(tmpDir + c.input)
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, tmpDir+c.result, stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ assert.Nil(t, ref.DockerReference())
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+ // A non-canonical path. Test just one, the various other cases are
+ // tested in explicitfilepath.ResolvePathToFullyExplicit.
+ ref, err := NewReference(tmpDir+"/.", "image2")
+ require.NoError(t, err)
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+
+ // "/" as a corner case.
+ ref, err = NewReference("/", "image3")
+ require.NoError(t, err)
+ assert.Equal(t, "/", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+ // We don't really know enough to make a full equality test here.
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.True(t, len(ns) >= 2)
+ assert.Equal(t, tmpDir, ns[0])
+ assert.Equal(t, filepath.Dir(tmpDir), ns[1])
+
+ // Test with a known path which should exist. Test just one non-canonical
+ // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ //
+ // It would be nice to test a deeper hierarchy, but it is not obvious what
+ // deeper path is always available in the various distros, AND is not likely
+ // to contains a symbolic link.
+ for _, path := range []string{"/usr/share", "/usr/share/./."} {
+ _, err := os.Lstat(path)
+ require.NoError(t, err)
+ ref, err := NewReference(path, "someimage")
+ require.NoError(t, err)
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.Equal(t, []string{"/usr/share", "/usr"}, ns)
+ }
+
+ // "/" as a corner case.
+ ref, err := NewReference("/", "image3")
+ require.NoError(t, err)
+ assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ _, err := ref.NewImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ ref, tmpTarFile := refToTempOCIArchive(t)
+ defer os.RemoveAll(tmpTarFile)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ assert.NoError(t, err)
+ defer src.Close()
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ assert.NoError(t, err)
+ defer dest.Close()
+}
+
+func TestReferenceDeleteImage(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ err := ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
diff --git a/oci/internal/oci_util.go b/oci/internal/oci_util.go
new file mode 100644
index 0000000..53827b1
--- /dev/null
+++ b/oci/internal/oci_util.go
@@ -0,0 +1,121 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
+const (
+ separator = `(?:[-._:@+]|--)`
+ alphanum = `(?:[A-Za-z0-9]+)`
+ component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
+)
+
+var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
+var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`)
+
+// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs.
+// In any other case an error is returned.
+func ValidateImageName(image string) error {
+ if len(image) == 0 {
+ return nil
+ }
+
+ var err error
+ if !refRegexp.MatchString(image) {
+ err = fmt.Errorf("Invalid image %s", image)
+ }
+ return err
+}
+
+// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image.
+// Neither path nor image parts are validated at this stage.
+func SplitPathAndImage(reference string) (string, string) {
+ if runtime.GOOS == "windows" {
+ return splitPathAndImageWindows(reference)
+ }
+ return splitPathAndImageNonWindows(reference)
+}
+
+func splitPathAndImageWindows(reference string) (string, string) {
+ groups := windowsRefRegexp.FindStringSubmatch(reference)
+ // nil group means no match
+ if groups == nil {
+ return reference, ""
+ }
+
+ // we expect three elements. First one full match, second the capture group for the path and
+ // the third the capture group for the image
+ if len(groups) != 3 {
+ return reference, ""
+ }
+ return groups[1], groups[2]
+}
+
+func splitPathAndImageNonWindows(reference string) (string, string) {
+ path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
+ return path, image
+}
+
+// ValidateOCIPath takes the OCI path and validates it.
+func ValidateOCIPath(path string) error {
+ if runtime.GOOS == "windows" {
+ // On Windows we must allow for a ':' as part of the path
+ if strings.Count(path, ":") > 1 {
+ return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
+ }
+ } else {
+ if strings.Contains(path, ":") {
+ return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path)
+ }
+ }
+ return nil
+}
+
+// ValidateScope validates a policy configuration scope for an OCI transport.
+func ValidateScope(scope string) error {
+ var err error
+ if runtime.GOOS == "windows" {
+ err = validateScopeWindows(scope)
+ } else {
+ err = validateScopeNonWindows(scope)
+ }
+ if err != nil {
+ return err
+ }
+
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+
+ return nil
+}
+
+func validateScopeWindows(scope string) error {
+ matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
+ if !matched {
+ return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
+ }
+
+ return nil
+}
+
+func validateScopeNonWindows(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
+ }
+
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+
+ return nil
+}
diff --git a/oci/internal/oci_util_test.go b/oci/internal/oci_util_test.go
new file mode 100644
index 0000000..2438acf
--- /dev/null
+++ b/oci/internal/oci_util_test.go
@@ -0,0 +1,63 @@
+package internal
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type testDataSplitReference struct {
+ ref string
+ dir string
+ image string
+}
+
+type testDataScopeValidation struct {
+ scope string
+ errMessage string
+}
+
+func TestSplitReferenceIntoDirAndImageWindows(t *testing.T) {
+ tests := []testDataSplitReference{
+ {`C:\foo\bar:busybox:latest`, `C:\foo\bar`, "busybox:latest"},
+ {`C:\foo\bar:busybox`, `C:\foo\bar`, "busybox"},
+ {`C:\foo\bar`, `C:\foo\bar`, ""},
+ }
+ for _, test := range tests {
+ dir, image := splitPathAndImageWindows(test.ref)
+ assert.Equal(t, test.dir, dir, "Unexpected OCI directory")
+ assert.Equal(t, test.image, image, "Unexpected image")
+ }
+}
+
+func TestSplitReferenceIntoDirAndImageNonWindows(t *testing.T) {
+ tests := []testDataSplitReference{
+ {"/foo/bar:busybox:latest", "/foo/bar", "busybox:latest"},
+ {"/foo/bar:busybox", "/foo/bar", "busybox"},
+ {"/foo/bar", "/foo/bar", ""},
+ }
+ for _, test := range tests {
+ dir, image := splitPathAndImageNonWindows(test.ref)
+ assert.Equal(t, test.dir, dir, "Unexpected OCI directory")
+ assert.Equal(t, test.image, image, "Unexpected image")
+ }
+}
+
+func TestValidateScopeWindows(t *testing.T) {
+ tests := []testDataScopeValidation{
+ {`C:\foo`, ""},
+ {`D:\`, ""},
+ {"C:", "Invalid scope 'C:'. Must be an absolute path"},
+ {"E", "Invalid scope 'E'. Must be an absolute path"},
+ {"", "Invalid scope ''. Must be an absolute path"},
+ }
+ for _, test := range tests {
+ err := validateScopeWindows(test.scope)
+ if test.errMessage == "" {
+ assert.NoError(t, err)
+ } else {
+ assert.EqualError(t, err, test.errMessage, fmt.Sprintf("No error for scope '%s'", test.scope))
+ }
+ }
+}
diff --git a/oci/layout/fixtures/accepted_certs/cacert.crt b/oci/layout/fixtures/accepted_certs/cacert.crt
new file mode 100644
index 0000000..ef24c57
--- /dev/null
+++ b/oci/layout/fixtures/accepted_certs/cacert.crt
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICDTCCAW+gAwIBAgITALhjLVYYA/83IbFqkDcaeQwEQTAKBggqhkjOPQQDBDAS
+MRAwDgYDVQQKDAdBY21lIENvMB4XDTIwMDkzMDEyMzkzNloXDTMwMDkyODEyMzkz
+NlowEjEQMA4GA1UECgwHQWNtZSBDbzCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAE
+AI3pxckijV44L3ffAlLOqB4oA/HpP7S5gTpWrIUU+2SxFJU/bcTKDLPk1cEC87vW
++UCYIXAyYGlyMAGSm0GxAFHnAIIrQzx9m3yiHbUyIPvRMW4BoDKsLaf5+GIZMm9n
+Oq2qnjvHr9ag2J3IzxEqQ8KZ95ivmHYrh3VsnfisI7c3opiro2EwXzAOBgNVHQ8B
+Af8EBAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0RBBYwFIIJbG9jYWxob3N0gQdhQGEuY29tMAoGCCqGSM49
+BAMEA4GLADCBhwJBONccd6jEGC+KO+gvc6xRqCCyn4jT8Dod5u1AxnvAUSQNFxEp
+YE8LzdaqyRQbB8cbubtN2FOuBXVu/enOQp/9AJMCQgEgwEDarjW47JDEjplFF8to
+LsrEtr2NLL5PLIgBpdVAnpQLumI78x/PvlmABwH6/mw8ZRNFRuUJJ5JajmE5zjbZ
+4Q==
+-----END CERTIFICATE-----
diff --git a/oci/layout/fixtures/accepted_certs/cert.cert b/oci/layout/fixtures/accepted_certs/cert.cert
new file mode 100644
index 0000000..ef24c57
--- /dev/null
+++ b/oci/layout/fixtures/accepted_certs/cert.cert
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICDTCCAW+gAwIBAgITALhjLVYYA/83IbFqkDcaeQwEQTAKBggqhkjOPQQDBDAS
+MRAwDgYDVQQKDAdBY21lIENvMB4XDTIwMDkzMDEyMzkzNloXDTMwMDkyODEyMzkz
+NlowEjEQMA4GA1UECgwHQWNtZSBDbzCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAE
+AI3pxckijV44L3ffAlLOqB4oA/HpP7S5gTpWrIUU+2SxFJU/bcTKDLPk1cEC87vW
++UCYIXAyYGlyMAGSm0GxAFHnAIIrQzx9m3yiHbUyIPvRMW4BoDKsLaf5+GIZMm9n
+Oq2qnjvHr9ag2J3IzxEqQ8KZ95ivmHYrh3VsnfisI7c3opiro2EwXzAOBgNVHQ8B
+Af8EBAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0RBBYwFIIJbG9jYWxob3N0gQdhQGEuY29tMAoGCCqGSM49
+BAMEA4GLADCBhwJBONccd6jEGC+KO+gvc6xRqCCyn4jT8Dod5u1AxnvAUSQNFxEp
+YE8LzdaqyRQbB8cbubtN2FOuBXVu/enOQp/9AJMCQgEgwEDarjW47JDEjplFF8to
+LsrEtr2NLL5PLIgBpdVAnpQLumI78x/PvlmABwH6/mw8ZRNFRuUJJ5JajmE5zjbZ
+4Q==
+-----END CERTIFICATE-----
diff --git a/oci/layout/fixtures/accepted_certs/cert.key b/oci/layout/fixtures/accepted_certs/cert.key
new file mode 100644
index 0000000..b221f74
--- /dev/null
+++ b/oci/layout/fixtures/accepted_certs/cert.key
@@ -0,0 +1,7 @@
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIAMDtdVU5PeUWCo1Ndvr+1X+Hry4I7+NdTqxLlU0ZBudm2ov0iJdZj
+O2PdSW6pRHJl9gYL+D/QjcEIwQBK4vsHS3SgBwYFK4EEACOhgYkDgYYABACN6cXJ
+Io1eOC933wJSzqgeKAPx6T+0uYE6VqyFFPtksRSVP23Eygyz5NXBAvO71vlAmCFw
+MmBpcjABkptBsQBR5wCCK0M8fZt8oh21MiD70TFuAaAyrC2n+fhiGTJvZzqtqp47
+x6/WoNidyM8RKkPCmfeYr5h2K4d1bJ34rCO3N6KYqw==
+-----END EC PRIVATE KEY-----
diff --git a/oci/layout/fixtures/accepted_certs/gencert.sh b/oci/layout/fixtures/accepted_certs/gencert.sh
new file mode 100755
index 0000000..b29a517
--- /dev/null
+++ b/oci/layout/fixtures/accepted_certs/gencert.sh
@@ -0,0 +1,23 @@
+#!/bin/bash -e
+config=$(mktemp -t)
+if test -z "$config" ; then
+ echo error creating temporary file for configuration
+ exit 1
+fi
+trap 'rm -f "$config"' EXIT
+cat > "$config" << EOF
+[req]
+prompt=no
+distinguished_name=dn
+x509_extensions=extensions
+[extensions]
+keyUsage=critical,digitalSignature,keyEncipherment,keyCertSign
+extendedKeyUsage=serverAuth,clientAuth
+basicConstraints=critical,CA:TRUE
+subjectAltName=DNS:localhost,email:a@a.com
+[dn]
+O=Acme Co
+EOF
+serial=$(dd if=/dev/random bs=1 count=16 status=none | hexdump -e '"%x1"')
+openssl req -new -set_serial 0x"$serial" -x509 -sha512 -days 3650 -key cert.key -config "$config" -out cert.cert
+cp cert.cert cacert.crt
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022
new file mode 100644
index 0000000..add1797
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022
@@ -0,0 +1 @@
+insert binary content here #26559
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef
new file mode 100644
index 0000000..b3a7a96
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8",
+ "size": 585
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922",
+ "size": 33
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be
new file mode 100644
index 0000000..8b7c8e3
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a",
+ "size": 583
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022",
+ "size": 34
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
new file mode 100644
index 0000000..19c1c12
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
@@ -0,0 +1 @@
+insert binary content here #9811
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805
new file mode 100644
index 0000000..aba2333
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde",
+ "size": 585
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe",
+ "size": 33
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402
new file mode 100644
index 0000000..f21c274
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402
@@ -0,0 +1 @@
+{"created":"2023-08-07T19:38:34.915445772Z","architecture":"386","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:53bfdd548f8566a059cd188348b202a50fb9d39ce80eb5b8f0c670dfa9bc6569"]},"history":[{"created":"2023-08-07T19:38:34.803529816Z","created_by":"/bin/sh -c #(nop) ADD file:c06b4f6991638e506d4d0a4d70c4a78ba30b971767802af4c6b837cdf59d4303 in / "},{"created":"2023-08-07T19:38:34.915445772Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f
new file mode 100644
index 0000000..85617bd
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f
@@ -0,0 +1,24 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be",
+ "size": 525,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0",
+ "size": 525,
+ "platform": {
+ "architecture": "386",
+ "os": "linux"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
new file mode 100644
index 0000000..ebe323d
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
@@ -0,0 +1 @@
+{"created":"2023-08-07T19:20:20.894140623Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:4693057ce2364720d39e57e85a5b8e0bd9ac3573716237736d6470ec5b7b7230"]},"history":[{"created":"2023-08-07T19:20:20.71894984Z","created_by":"/bin/sh -c #(nop) ADD file:32ff5e7a78b890996ee4681cc0a26185d3e9acdb4eb1e2aaccb2411f922fed6b in / "},{"created":"2023-08-07T19:20:20.894140623Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
new file mode 100644
index 0000000..ccf025c
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8",
+ "size": 584
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861",
+ "size": 33
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe
new file mode 100644
index 0000000..a0cd5aa
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe
@@ -0,0 +1 @@
+insert binary content here #7959
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1
new file mode 100644
index 0000000..aeecdfa
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1
@@ -0,0 +1,24 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5",
+ "size": 525,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3",
+ "size": 525,
+ "platform": {
+ "architecture": "386",
+ "os": "linux"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922
new file mode 100644
index 0000000..f26e504
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922
@@ -0,0 +1 @@
+insert binary content here #1234
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f
new file mode 100644
index 0000000..e1d45d3
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f
@@ -0,0 +1 @@
+{"created":"2023-08-07T19:38:27.007952531Z","architecture":"386","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:f05b0759429ba12d5fda46c196f253cc1cab8f56cd874e9e7be674fc1b8337de"]},"history":[{"created":"2023-08-07T19:38:26.69689892Z","created_by":"/bin/sh -c #(nop) ADD file:4b33c52e11b19fde30197c62ead0b77bde28d34edaa08346a5302cd892d3cebe in / "},{"created":"2023-08-07T19:38:27.007952531Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0
new file mode 100644
index 0000000..c2d027a
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402",
+ "size": 583
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242",
+ "size": 33
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde
new file mode 100644
index 0000000..1ff4ad5
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde
@@ -0,0 +1 @@
+{"created":"2023-08-07T19:20:26.426857961Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:36b50b131297b8860da51b2d2b24bb4c08dfbdf2789b08e3cc0f187c98637a19"]},"history":[{"created":"2023-08-07T19:20:26.326707843Z","created_by":"/bin/sh -c #(nop) ADD file:6dd87346b8be240b21b4f4d9296253bf0d28b6579aa52d2118872e3936963b6b in / "},{"created":"2023-08-07T19:20:26.426857961Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a
new file mode 100644
index 0000000..832c118
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a
@@ -0,0 +1 @@
+insert binary content here #28017
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242
new file mode 100644
index 0000000..a18eab8
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242
@@ -0,0 +1 @@
+insert binary content here #4794
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3
new file mode 100644
index 0000000..fb85ad2
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f",
+ "size": 582
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a",
+ "size": 34
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a
new file mode 100644
index 0000000..016b01b
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a
@@ -0,0 +1 @@
+{"created":"2023-08-07T19:20:31.99661329Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:0e182002b05f2ab123995821ef14f1cda765a0c31f7a6d260221558f6466535e"]},"history":[{"created":"2023-08-07T19:20:31.893185238Z","created_by":"/bin/sh -c #(nop) ADD file:76d829bbce3dd420a8419919b0916c0fda917011d1e6752ca5b9e53d5ca890a6 in / "},{"created":"2023-08-07T19:20:31.99661329Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/index.json b/oci/layout/fixtures/delete_image_multiple_images/index.json
new file mode 100644
index 0000000..d781143
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/index.json
@@ -0,0 +1,61 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "digest": "sha256:a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1",
+ "size": 759,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "latest"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "digest": "sha256:a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1",
+ "size": 759,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "3.18.3"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5",
+ "size": 525,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "3"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5",
+ "size": 525,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "3.18"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805",
+ "size": 525,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "3.17.5"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "digest": "sha256:861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f",
+ "size": 759,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "3.16.7"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef",
+ "size": 525,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "1.0.0"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_multiple_images/info.txt b/oci/layout/fixtures/delete_image_multiple_images/info.txt
new file mode 100644
index 0000000..4ec2b14
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/info.txt
@@ -0,0 +1,61 @@
+This is tree representation of the fixture to help writing the tests:
+
+7 references in the index, 10 descriptors and 19 blobs in the blob directory
+
+index.json
+│
+├── 3.17.5
+│ └── manifest: 5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805
+│ config: df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde
+│ layers: 986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe
+│
+├── 3.18
+│ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
+│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
+│ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
+│
+├── 3
+│ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
+│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
+│ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
+│
+├── 1.0.0
+│ └── manifest: 0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef
+│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
+│ layers: a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922
+│
+├── latest
+│ └── index: a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1
+│ ├── linux/am64
+│ │ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
+│ │ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
+│ │ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
+│ │
+│ └── linux/386
+│ └── manifest: f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3
+│ config: aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f
+│ layers: e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a
+│
+├── 3.18.3
+│ └── index: a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1
+│ ├── linux/am64
+│ │ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5
+│ │ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8
+│ │ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861
+│ │
+│ └── linux/386
+│ └── manifest: f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3
+│ config: aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f
+│ layers: e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a
+│
+├── 3.16.7
+│ └── index: 861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f
+│ ├── linux/am64
+│ │ └── manifest: 39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be
+│ │ config: f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a
+│ │ layers: 02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022
+│ │
+│ └── linux/386
+│ └── manifest: be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0
+│ config: 7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402
+│ layers: e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242
diff --git a/oci/layout/fixtures/delete_image_multiple_images/oci-layout b/oci/layout/fixtures/delete_image_multiple_images/oci-layout
new file mode 100644
index 0000000..21b1439
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_multiple_images/oci-layout
@@ -0,0 +1 @@
+{"imageLayoutVersion": "1.0.0"} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
new file mode 100644
index 0000000..e7e64ba
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
@@ -0,0 +1 @@
+insert binary content here #9671
diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
new file mode 100644
index 0000000..f0f0620
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
@@ -0,0 +1,30 @@
+{
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/sh"
+ ]
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0"
+ ]
+ },
+ "history": [
+ {
+ "created": "2019-08-20T20:19:55.062606894Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / "
+ },
+ {
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18
new file mode 100644
index 0000000..1ff195d
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423",
+ "size": 585
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc",
+ "size": 33
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_only_one_image/index.json b/oci/layout/fixtures/delete_image_only_one_image/index.json
new file mode 100644
index 0000000..b0a0c98
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_only_one_image/index.json
@@ -0,0 +1,13 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18",
+ "size": 476,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "latest"
+ }
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_only_one_image/oci-layout b/oci/layout/fixtures/delete_image_only_one_image/oci-layout
new file mode 100644
index 0000000..21b1439
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_only_one_image/oci-layout
@@ -0,0 +1 @@
+{"imageLayoutVersion": "1.0.0"} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
new file mode 100644
index 0000000..f0f0620
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
@@ -0,0 +1,30 @@
+{
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/sh"
+ ]
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0"
+ ]
+ },
+ "history": [
+ {
+ "created": "2019-08-20T20:19:55.062606894Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / "
+ },
+ {
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json b/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json
new file mode 100644
index 0000000..49925c1
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json
@@ -0,0 +1,13 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18",
+ "size": 405,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "latest"
+ }
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout b/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout
new file mode 100644
index 0000000..21b1439
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout
@@ -0,0 +1 @@
+{"imageLayoutVersion": "1.0.0"} \ No newline at end of file
diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
new file mode 100644
index 0000000..e7e64ba
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
@@ -0,0 +1 @@
+insert binary content here #9671
diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18
new file mode 100644
index 0000000..1ff195d
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423",
+ "size": 585
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc",
+ "size": 33
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
new file mode 100644
index 0000000..e7e64ba
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc
@@ -0,0 +1 @@
+insert binary content here #9671
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3
new file mode 100644
index 0000000..e59a5f8
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423",
+ "size": 740
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc",
+ "size": 33
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
new file mode 100644
index 0000000..f0f0620
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423
@@ -0,0 +1,30 @@
+{
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/sh"
+ ]
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0"
+ ]
+ },
+ "history": [
+ {
+ "created": "2019-08-20T20:19:55.062606894Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / "
+ },
+ {
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41
new file mode 100644
index 0000000..9578f7d
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41
@@ -0,0 +1,30 @@
+{
+ "created": "2019-08-20T20:20:55.211423266Z",
+ "architecture": "amd64",
+ "os": "linux",
+ "config": {
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/sh"
+ ]
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0"
+ ]
+ },
+ "history": [
+ {
+ "created": "2019-08-20T20:19:55.062606894Z",
+ "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / "
+ },
+ {
+ "created": "2019-08-20T20:19:55.211423266Z",
+ "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
+ "empty_layer": true
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed
new file mode 100644
index 0000000..16b7c27
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41",
+ "size": 740
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8",
+ "size": 33
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8
new file mode 100644
index 0000000..3badd42
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8
@@ -0,0 +1 @@
+insert binary content here 32515
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/index.json b/oci/layout/fixtures/delete_image_two_identical_references/index.json
new file mode 100644
index 0000000..80850d4
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/index.json
@@ -0,0 +1,21 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3",
+ "size": 476,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "1.0.0"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed",
+ "size": 476,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "1.0.0"
+ }
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/delete_image_two_identical_references/oci-layout b/oci/layout/fixtures/delete_image_two_identical_references/oci-layout
new file mode 100644
index 0000000..21b1439
--- /dev/null
+++ b/oci/layout/fixtures/delete_image_two_identical_references/oci-layout
@@ -0,0 +1 @@
+{"imageLayoutVersion": "1.0.0"} \ No newline at end of file
diff --git a/oci/layout/fixtures/manifest/index.json b/oci/layout/fixtures/manifest/index.json
new file mode 100644
index 0000000..7e77908
--- /dev/null
+++ b/oci/layout/fixtures/manifest/index.json
@@ -0,0 +1,17 @@
+{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0",
+ "size": 496,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "v0.1.1"
+ },
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/oci/layout/fixtures/name_lookups/index.json b/oci/layout/fixtures/name_lookups/index.json
new file mode 100644
index 0000000..b79d108
--- /dev/null
+++ b/oci/layout/fixtures/name_lookups/index.json
@@ -0,0 +1,38 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "size": 1,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "a"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "size": 2,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "b"
+ }
+ },
+ {
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ "size": 3,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "invalid-mime"
+ }
+ },
+ {
+ "mediaType": "x-completely-unknown",
+ "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ "size": 4,
+ "annotations": {
+ "org.opencontainers.image.ref.name": "invalid-mime"
+ }
+ }
+ ]
+}
diff --git a/oci/layout/fixtures/rejected_certs/cert.cert b/oci/layout/fixtures/rejected_certs/cert.cert
new file mode 100644
index 0000000..98db6b7
--- /dev/null
+++ b/oci/layout/fixtures/rejected_certs/cert.cert
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICDjCCAW+gAwIBAgITALVv4wUc4xK6AbXE9Xge43LAcTAKBggqhkjOPQQDBDAS
+MRAwDgYDVQQKDAdBY21lIENvMB4XDTIwMDkzMDEyMzk1MloXDTMwMDkyODEyMzk1
+MlowEjEQMA4GA1UECgwHQWNtZSBDbzCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAE
+AUePiRf7kRYLuiitqOB9YkcOz1PHYfZWO/PMuu4mebNzr5NUwoNehqrEHK/FQnod
+cRVW2FiCN6agYXfbJZvNX4PnAZOWHfsgpY6kdUoYufq9DuQVoVROXrmXcgbOkZ34
+IObGtPYZhV0uOatEg01vLkkLt+WwKq/oU+VsP+oD1LxrdTmvo2EwXzAOBgNVHQ8B
+Af8EBAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0RBBYwFIIJbG9jYWxob3N0gQdiQGIuY29tMAoGCCqGSM49
+BAMEA4GMADCBiAJCAYrKggJuI6r+SE/2URLnPc33sIQYahLu1EwIhk32PrSAMo/Y
+uJHSoW9NB69/9o7QhjAp7jcdkmi9QMAx0s1ocLd4AkIA6XDWHmRXpQ6g4IEmJFvO
+ayk0+DU4esE9LLD5J0te33sck76+q9gnjr5mdw86TD1+VHqDDAbZ0bLiyxOri2WG
+jVM=
+-----END CERTIFICATE-----
diff --git a/oci/layout/fixtures/rejected_certs/cert.key b/oci/layout/fixtures/rejected_certs/cert.key
new file mode 100644
index 0000000..e68f355
--- /dev/null
+++ b/oci/layout/fixtures/rejected_certs/cert.key
@@ -0,0 +1,7 @@
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIB3BPUEOohwxGCV8V2fwIBdZ3S7yWADrbz5w17YITBt0p6j1C0NKRx
+xL9V7Cq+P2OkfQa6rxiD7cM8DjP/6y1//XKgBwYFK4EEACOhgYkDgYYABAFHj4kX
++5EWC7oorajgfWJHDs9Tx2H2VjvzzLruJnmzc6+TVMKDXoaqxByvxUJ6HXEVVthY
+gjemoGF32yWbzV+D5wGTlh37IKWOpHVKGLn6vQ7kFaFUTl65l3IGzpGd+CDmxrT2
+GYVdLjmrRINNby5JC7flsCqv6FPlbD/qA9S8a3U5rw==
+-----END EC PRIVATE KEY-----
diff --git a/oci/layout/fixtures/rejected_certs/gencert.sh b/oci/layout/fixtures/rejected_certs/gencert.sh
new file mode 100755
index 0000000..5d81894
--- /dev/null
+++ b/oci/layout/fixtures/rejected_certs/gencert.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -e
+config=$(mktemp -t)
+if test -z "$config" ; then
+ echo error creating temporary file for configuration
+ exit 1
+fi
+trap 'rm -f "$config"' EXIT
+cat > "$config" << EOF
+[req]
+prompt=no
+distinguished_name=dn
+x509_extensions=extensions
+[extensions]
+keyUsage=critical,digitalSignature,keyEncipherment,keyCertSign
+extendedKeyUsage=serverAuth,clientAuth
+basicConstraints=critical,CA:TRUE
+subjectAltName=DNS:localhost,email:b@b.com
+[dn]
+O=Acme Co
+EOF
+serial=$(dd if=/dev/random bs=1 count=16 status=none | hexdump -e '"%x1"')
+openssl req -new -set_serial 0x"$serial" -x509 -sha512 -days 3650 -key cert.key -config "$config" -out cert.cert
diff --git a/oci/layout/fixtures/two_images_manifest/index.json b/oci/layout/fixtures/two_images_manifest/index.json
new file mode 100644
index 0000000..0dd68af
--- /dev/null
+++ b/oci/layout/fixtures/two_images_manifest/index.json
@@ -0,0 +1,27 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7682,
+ "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux",
+ "features": [
+ "sse4"
+ ]
+ }
+ }
+ ]
+}
diff --git a/oci/layout/oci_delete.go b/oci/layout/oci_delete.go
new file mode 100644
index 0000000..8dd54f2
--- /dev/null
+++ b/oci/layout/oci_delete.go
@@ -0,0 +1,240 @@
+package layout
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/fs"
+ "os"
+
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+// DeleteImage deletes the named image from the directory, if supported.
+func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ sharedBlobsDir := ""
+ if sys != nil && sys.OCISharedBlobDirPath != "" {
+ sharedBlobsDir = sys.OCISharedBlobDirPath
+ }
+
+ descriptor, descriptorIndex, err := ref.getManifestDescriptor()
+ if err != nil {
+ return err
+ }
+
+ var blobsUsedByImage map[digest.Digest]int
+
+ switch descriptor.MediaType {
+ case imgspecv1.MediaTypeImageManifest:
+ blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
+ case imgspecv1.MediaTypeImageIndex:
+ blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
+ default:
+ return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
+ }
+ if err != nil {
+ return err
+ }
+
+ blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+
+ err = ref.deleteBlobs(blobsToDelete)
+ if err != nil {
+ return err
+ }
+
+ return ref.deleteReferenceFromIndex(descriptorIndex)
+}
+
+func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
+ manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+ blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
+ blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
+
+ return blobsUsedInManifest, nil
+}
+
+func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
+ blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+ index, err := parseIndex(blobPath)
+ if err != nil {
+ return nil, err
+ }
+
+ blobsUsedInImageRefIndex := make(map[digest.Digest]int)
+ err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+ blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
+
+ return blobsUsedInImageRefIndex, nil
+}
+
+// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
+func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
+ for _, descriptor := range index.Manifests {
+ destination[descriptor.Digest]++
+ switch descriptor.MediaType {
+ case imgspecv1.MediaTypeImageManifest:
+ manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+ for digest, count := range ref.getBlobsUsedInManifest(manifest) {
+ destination[digest] += count
+ }
+ case imgspecv1.MediaTypeImageIndex:
+ blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+ index, err := parseIndex(blobPath)
+ if err != nil {
+ return err
+ }
+ err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
+ }
+ }
+
+ return nil
+}
+
+func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
+ blobsUsedInManifest := make(map[digest.Digest]int, 0)
+
+ blobsUsedInManifest[manifest.Config.Digest]++
+ for _, layer := range manifest.Layers {
+ blobsUsedInManifest[layer.Digest]++
+ }
+
+ return blobsUsedInManifest
+}
+
+// This takes in a map of the digest and their usage count in the manifest to be deleted
+// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
+func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
+ rootIndex, err := ref.getIndex()
+ if err != nil {
+ return nil, err
+ }
+ blobsUsedInRootIndex := make(map[digest.Digest]int)
+ err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+
+ blobsToDelete := set.New[digest.Digest]()
+
+ for digest, count := range blobsUsedInRootIndex {
+ if count-blobsUsedByDescriptorToDelete[digest] == 0 {
+ blobsToDelete.Add(digest)
+ }
+ }
+
+ return blobsToDelete, nil
+}
+
+// This transport never generates layouts where blobs for an image are both in the local blobs directory
+// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set.
+//
+// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what
+// the other layouts sharing that directory are, and we might not even have permission to read them),
+// so we can’t really delete any blobs in that case.
+// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt,
+// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently
+// check for local blobs (but we should make no noise if the blobs are actually in the shared directory).
+//
+// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
+func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
+ for _, digest := range blobsToDelete.Values() {
+ blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
+ if err != nil {
+ return err
+ }
+ err = deleteBlob(blobPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func deleteBlob(blobPath string) error {
+ logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath))
+
+ err := os.Remove(blobPath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ } else {
+ return nil
+ }
+}
+
+func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
+ index, err := ref.getIndex()
+ if err != nil {
+ return err
+ }
+
+ index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1)
+
+ return saveJSON(ref.indexPath(), index)
+}
+
+func saveJSON(path string, content any) error {
+ // If the file already exists, get its mode to preserve it
+ var mode fs.FileMode
+ existingfi, err := os.Stat(path)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ } else { // File does not exist, use default mode
+ mode = 0644
+ }
+ } else {
+ mode = existingfi.Mode()
+ }
+
+ file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return json.NewEncoder(file).Encode(content)
+}
+
+func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
+ manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
+ if err != nil {
+ return nil, err
+ }
+
+ manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
+ if err != nil {
+ return nil, err
+ }
+
+ return manifest, nil
+}
diff --git a/oci/layout/oci_delete_test.go b/oci/layout/oci_delete_test.go
new file mode 100644
index 0000000..7e06456
--- /dev/null
+++ b/oci/layout/oci_delete_test.go
@@ -0,0 +1,298 @@
+package layout
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ cp "github.com/otiai10/copy"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReferenceDeleteImage_onlyOneImage(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_only_one_image")
+
+ ref, err := NewReference(tmpDir, "latest")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that all blobs were deleted
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Empty(t, files)
+
+ // Check that the index is empty as there is only one image in the fixture
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 0, len(index.Manifests))
+}
+
+func TestReferenceDeleteImage_onlyOneImage_emptyImageName(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_only_one_image")
+
+ ref, err := NewReference(tmpDir, "")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that all blobs were deleted
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Empty(t, files)
+
+ // Check that the index is empty as there is only one image in the fixture
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 0, len(index.Manifests))
+}
+
+func TestReferenceDeleteImage_sharedBlobDir(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_shared_blobs_dir")
+
+ ref, err := NewReference(tmpDir, "latest")
+ require.NoError(t, err)
+
+ sys := &types.SystemContext{OCISharedBlobDirPath: filepath.Join(tmpDir, "shared_blobs")}
+ err = ref.DeleteImage(context.Background(), sys)
+ require.NoError(t, err)
+
+ // Check that the only blob in the local directory was deleted
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Empty(t, files)
+
+ // Check that the blobs in the shared blob directory are still present
+ sharedBlobsDir := filepath.Join(tmpDir, "shared_blobs")
+ files, err = os.ReadDir(filepath.Join(sharedBlobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 2, len(files))
+
+ // Check that the index is empty as there is only one image in the fixture
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 0, len(index.Manifests))
+}
+
+func TestReferenceDeleteImage_multipleImages(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "3.17.5")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that the relevant blobs were deleted/preserved
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 16, len(files))
+ assertBlobDoesNotExist(t, blobsDir, "sha256:5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe")
+
+ // Check the index
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ // .. Check that the index has been reduced to the correct size
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 6, len(index.Manifests))
+ // .. Check that the image is not in the index anymore
+ for _, descriptor := range index.Manifests {
+ switch descriptor.Annotations[imgspecv1.AnnotationRefName] {
+ case "3.17.5":
+ assert.Fail(t, "image still present in the index after deletion")
+ default:
+ continue
+ }
+ }
+}
+
+func TestReferenceDeleteImage_multipleImages_blobsUsedByOtherImages(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "1.0.0")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that the relevant blobs were deleted/preserved
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 17, len(files))
+ assertBlobExists(t, blobsDir, "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922")
+
+ // Check the index
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ // .. Check that the index has been reduced to the correct size
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 6, len(index.Manifests))
+ // .. Check that the image is not in the index anymore
+ for _, descriptor := range index.Manifests {
+ switch descriptor.Annotations[imgspecv1.AnnotationRefName] {
+ case "1.0.0":
+ assert.Fail(t, "image still present in the index after deletion")
+ default:
+ continue
+ }
+ }
+}
+
+func TestReferenceDeleteImage_multipleImages_imageDoesNotExist(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "does-not-exist")
+ assert.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceDeleteImage_multipleImages_emptyImageName(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.Error(t, err)
+}
+
+func TestReferenceDeleteImage_multipleImages_nestedIndexImage(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "3.16.7")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that the relevant blobs were deleted/preserved
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 12, len(files))
+ assertBlobDoesNotExist(t, blobsDir, "sha256:861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402")
+ assertBlobDoesNotExist(t, blobsDir, "sha256:e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242")
+
+ // Check the index
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ // .. Check that the index has been reduced to the correct size
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 6, len(index.Manifests))
+ // .. Check that the image is not in the index anymore
+ for _, descriptor := range index.Manifests {
+ switch descriptor.Annotations[imgspecv1.AnnotationRefName] {
+ case "3.16.7":
+ assert.Fail(t, "image still present in the index after deletion")
+ default:
+ continue
+ }
+ }
+}
+
+func TestReferenceDeleteImage_multipleImages_nestedIndexImage_refWithSameContent(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_multiple_images")
+
+ ref, err := NewReference(tmpDir, "3.18.3")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that the relevant blobs were deleted/preserved
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 19, len(files))
+
+ // Check the index
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ // .. Check that the index has been reduced to the correct size
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 6, len(index.Manifests))
+}
+
+func TestReferenceDeleteImage_multipleImages_twoIdenticalReferences(t *testing.T) {
+ tmpDir := loadFixture(t, "delete_image_two_identical_references")
+
+ ref, err := NewReference(tmpDir, "1.0.0")
+ require.NoError(t, err)
+
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+
+ // Check that the relevant blobs were deleted/preserved - in this case only the first reference should be deleted
+ blobsDir := filepath.Join(tmpDir, "blobs")
+ files, err := os.ReadDir(filepath.Join(blobsDir, "sha256"))
+ require.NoError(t, err)
+ require.Equal(t, 3, len(files))
+ assertBlobExists(t, blobsDir, "sha256:ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed")
+ assertBlobExists(t, blobsDir, "sha256:ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41")
+ assertBlobExists(t, blobsDir, "sha256:fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8")
+
+ // Check the index
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ // .. Check that the index has been reduced to the correct size
+ index, err := ociRef.getIndex()
+ require.NoError(t, err)
+ require.Equal(t, 1, len(index.Manifests))
+}
+
+func loadFixture(t *testing.T, fixtureName string) string {
+ tmpDir := t.TempDir()
+ err := cp.Copy(fmt.Sprintf("fixtures/%v/", fixtureName), tmpDir)
+ require.NoError(t, err)
+ return tmpDir
+}
+
+func assertBlobExists(t *testing.T, blobsDir string, blobDigest string) {
+ digest, err := digest.Parse(blobDigest)
+ require.NoError(t, err)
+ blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
+ _, err = os.Stat(blobPath)
+ require.NoError(t, err)
+}
+
+func assertBlobDoesNotExist(t *testing.T, blobsDir string, blobDigest string) {
+ digest, err := digest.Parse(blobDigest)
+ require.NoError(t, err)
+ blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
+ _, err = os.Stat(blobPath)
+ require.True(t, os.IsNotExist(err))
+}
diff --git a/oci/layout/oci_dest.go b/oci/layout/oci_dest.go
new file mode 100644
index 0000000..100d167
--- /dev/null
+++ b/oci/layout/oci_dest.go
@@ -0,0 +1,328 @@
+package layout
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspec "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/slices"
+)
+
+type ociImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.NoSignaturesInitialize
+
+ ref ociReference
+ index imgspecv1.Index
+ sharedBlobDir string
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing directory.
+func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
+ var index *imgspecv1.Index
+ if indexExists(ref) {
+ var err error
+ index, err = ref.getIndex()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ index = &imgspecv1.Index{
+ Versioned: imgspec.Versioned{
+ SchemaVersion: 2,
+ },
+ Annotations: make(map[string]string),
+ }
+ }
+
+ desiredLayerCompression := types.Compress
+ if sys != nil && sys.OCIAcceptUncompressedLayers {
+ desiredLayerCompression = types.PreserveOriginal
+ }
+
+ d := &ociImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeImageIndex,
+ },
+ DesiredLayerCompression: desiredLayerCompression,
+ AcceptsForeignLayerURLs: true,
+ MustMatchRuntimeOS: false,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: true,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+ NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"),
+
+ ref: ref,
+ index: *index,
+ }
+ d.Compat = impl.AddCompat(d)
+ if sys != nil {
+ d.sharedBlobDir = sys.OCISharedBlobDirPath
+ }
+
+ if err := ensureDirectoryExists(d.ref.dir); err != nil {
+ return nil, err
+ }
+ // Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
+ // but it MAY be empty (e.g. if we never end up calling PutBlob)
+ // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
+ if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil {
+ return nil, err
+ }
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *ociImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *ociImageDestination) Close() error {
+ return nil
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ succeeded := false
+ explicitClosed := false
+ defer func() {
+ if !explicitClosed {
+ blobFile.Close()
+ }
+ if !succeeded {
+ os.Remove(blobFile.Name())
+ }
+ }()
+
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(blobFile, stream)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ blobDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
+ // On Windows, the “permissions of newly created files” argument to syscall.Open is
+ // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
+ // always fails on Windows.
+ if runtime.GOOS != "windows" {
+ if err := blobFile.Chmod(0644); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ }
+
+ blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ if err := ensureParentDirectoryExists(blobPath); err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ // need to explicitly close the file, since a rename won't otherwise not work on Windows
+ blobFile.Close()
+ explicitClosed = true
+ if err := os.Rename(blobFile.Name(), blobPath); err != nil {
+ return private.UploadedBlob{}, err
+ }
+ succeeded = true
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if info.Digest == "" {
+ return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
+ }
+ blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ finfo, err := os.Stat(blobPath)
+ if err != nil && os.IsNotExist(err) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+
+ return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
+}
+
+// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
+// this should be either an OCI manifest (possibly converted to this format by the caller) or index,
+// neither of which we'll need to modify further.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when
+// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+// by `manifest.Digest()`.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ var digest digest.Digest
+ var err error
+ if instanceDigest != nil {
+ digest = *instanceDigest
+ } else {
+ digest, err = manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ }
+
+ blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir)
+ if err != nil {
+ return err
+ }
+ if err := ensureParentDirectoryExists(blobPath); err != nil {
+ return err
+ }
+ if err := os.WriteFile(blobPath, m, 0644); err != nil {
+ return err
+ }
+
+ if instanceDigest != nil {
+ return nil
+ }
+
+ // If we had platform information, we'd build an imgspecv1.Platform structure here.
+
+ // Start filling out the descriptor for this entry
+ desc := imgspecv1.Descriptor{}
+ desc.Digest = digest
+ desc.Size = int64(len(m))
+ if d.ref.image != "" {
+ desc.Annotations = make(map[string]string)
+ desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image
+ }
+
+ // If we knew the MIME type, we wouldn't have to guess here.
+ desc.MediaType = manifest.GuessMIMEType(m)
+
+ d.addManifest(&desc)
+
+ return nil
+}
+
+func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
+ // If the new entry has a name, remove any conflicting names which we already have.
+ if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" {
+ // The name is being set on a new entry, so remove any older ones that had the same name.
+ // We might be storing an index and all of its component images, and we'll want to attach
+ // the name to the last one, which is the index.
+ for i, manifest := range d.index.Manifests {
+ if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] {
+ delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName)
+ break
+ }
+ }
+ }
+ // If it has the same digest as another entry in the index, we already overwrote the file,
+ // so just pick up the other information.
+ for i, manifest := range d.index.Manifests {
+ if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" {
+ // Replace it completely.
+ d.index.Manifests[i] = *desc
+ return
+ }
+ }
+ // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created.
+ d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
+ layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
+ Version: imgspecv1.ImageLayoutVersion,
+ })
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil {
+ return err
+ }
+ indexJSON, err := json.Marshal(d.index)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
+}
+
+func ensureDirectoryExists(path string) error {
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ensureParentDirectoryExists ensures the parent of the supplied path exists.
+func ensureParentDirectoryExists(path string) error {
+ return ensureDirectoryExists(filepath.Dir(path))
+}
+
+// indexExists checks whether the index location specified in the OCI reference exists.
+// The implementation is opinionated, since in case of unexpected errors false is returned
+func indexExists(ref ociReference) bool {
+ _, err := os.Stat(ref.indexPath())
+ if err == nil {
+ return true
+ }
+ if os.IsNotExist(err) {
+ return false
+ }
+ return true
+}
diff --git a/oci/layout/oci_dest_test.go b/oci/layout/oci_dest_test.go
new file mode 100644
index 0000000..33cf851
--- /dev/null
+++ b/oci/layout/oci_dest_test.go
@@ -0,0 +1,180 @@
+package layout
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageDestination = (*ociImageDestination)(nil)
+
+// readerFromFunc allows implementing Reader by any function, e.g. a closure.
+type readerFromFunc func([]byte) (int, error)
+
+func (fn readerFromFunc) Read(p []byte) (int, error) {
+ return fn(p)
+}
+
+// TestPutBlobDigestFailure simulates behavior on digest verification failure.
+func TestPutBlobDigestFailure(t *testing.T) {
+ const digestErrorString = "Simulated digest error"
+ const blobDigest = "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+
+ ref, _ := refToTempOCI(t)
+ dirRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ blobPath, err := dirRef.blobPath(blobDigest, "")
+ assert.NoError(t, err)
+ cache := memory.New()
+
+ firstRead := true
+ reader := readerFromFunc(func(p []byte) (int, error) {
+ _, err := os.Lstat(blobPath)
+ require.Error(t, err)
+ require.True(t, os.IsNotExist(err))
+ if firstRead {
+ if len(p) > 0 {
+ firstRead = false
+ }
+ for i := 0; i < len(p); i++ {
+ p[i] = 0xAA
+ }
+ return len(p), nil
+ }
+ return 0, errors.New(digestErrorString)
+ })
+
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ defer dest.Close()
+ _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false)
+ assert.ErrorContains(t, err, digestErrorString)
+ err = dest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ _, err = os.Lstat(blobPath)
+ require.Error(t, err)
+ require.True(t, os.IsNotExist(err))
+}
+
+// TestPutManifestAppendsToExistingManifest tests that new manifests are getting added to existing index.
+func TestPutManifestAppendsToExistingManifest(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+
+ // initially we have one manifest
+ index, err := ociRef.getIndex()
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(index.Manifests), "Unexpected number of manifests")
+
+ // create a new test reference
+ ociRef2, err := NewReference(tmpDir, "new-image")
+ assert.NoError(t, err)
+
+ putTestManifest(t, ociRef2.(ociReference), tmpDir)
+
+ index, err = ociRef.getIndex()
+ assert.NoError(t, err)
+ assert.Equal(t, 2, len(index.Manifests), "Unexpected number of manifests")
+}
+
+// TestPutManifestTwice tests that existing manifest gets updated and not appended.
+func TestPutManifestTwice(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+
+ putTestConfig(t, ociRef, tmpDir)
+ putTestManifest(t, ociRef, tmpDir)
+ putTestManifest(t, ociRef, tmpDir)
+
+ index, err := ociRef.getIndex()
+ assert.NoError(t, err)
+ assert.Len(t, index.Manifests, 2, "Unexpected number of manifests")
+}
+
+func TestPutTwoDifferentTags(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+
+ putTestConfig(t, ociRef, tmpDir)
+ putTestManifest(t, ociRef, tmpDir)
+
+ // add the same manifest with a different tag; it shouldn't get overwritten
+ ref, err := NewReference(tmpDir, "zomg")
+ assert.NoError(t, err)
+ ociRef, ok = ref.(ociReference)
+ require.True(t, ok)
+ putTestManifest(t, ociRef, tmpDir)
+
+ index, err := ociRef.getIndex()
+ assert.NoError(t, err)
+ assert.Len(t, index.Manifests, 3, "Unexpected number of manifests")
+ assert.Equal(t, "imageValue", index.Manifests[1].Annotations[imgspecv1.AnnotationRefName])
+ assert.Equal(t, "zomg", index.Manifests[2].Annotations[imgspecv1.AnnotationRefName])
+}
+
+func putTestConfig(t *testing.T, ociRef ociReference, tmpDir string) {
+ data, err := os.ReadFile("../../internal/image/fixtures/oci1-config.json")
+ assert.NoError(t, err)
+ imageDest, err := newImageDestination(nil, ociRef)
+ assert.NoError(t, err)
+
+ cache := memory.New()
+
+ _, err = imageDest.PutBlob(context.Background(), bytes.NewReader(data), types.BlobInfo{Size: int64(len(data)), Digest: digest.FromBytes(data)}, cache, true)
+ assert.NoError(t, err)
+
+ err = imageDest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ paths := []string{}
+ err = filepath.WalkDir(tmpDir, func(path string, _ fs.DirEntry, err error) error {
+ paths = append(paths, path)
+ return nil
+ })
+ assert.NoError(t, err)
+
+ digest := digest.FromBytes(data).Encoded()
+ assert.Contains(t, paths, filepath.Join(tmpDir, "blobs", "sha256", digest), "The OCI directory does not contain the new config data")
+}
+
+func putTestManifest(t *testing.T, ociRef ociReference, tmpDir string) {
+ data, err := os.ReadFile("../../internal/image/fixtures/oci1.json")
+ assert.NoError(t, err)
+ imageDest, err := newImageDestination(nil, ociRef)
+ assert.NoError(t, err)
+
+ err = imageDest.PutManifest(context.Background(), data, nil)
+ assert.NoError(t, err)
+
+ err = imageDest.Commit(context.Background(), nil) // nil unparsedToplevel is invalid, we don’t currently use the value
+ assert.NoError(t, err)
+
+ paths := []string{}
+ err = filepath.WalkDir(tmpDir, func(path string, _ fs.DirEntry, err error) error {
+ paths = append(paths, path)
+ return nil
+ })
+ assert.NoError(t, err)
+
+ digest := digest.FromBytes(data).Encoded()
+ assert.Contains(t, paths, filepath.Join(tmpDir, "blobs", "sha256", digest), "The OCI directory does not contain the new manifest data")
+}
diff --git a/oci/layout/oci_src.go b/oci/layout/oci_src.go
new file mode 100644
index 0000000..f5f1deb
--- /dev/null
+++ b/oci/layout/oci_src.go
@@ -0,0 +1,216 @@
+package layout
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/tlsclientconfig"
+ "github.com/containers/image/v5/types"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
+// but nothing matches the “image” part of the provided reference.
+type ImageNotFoundError struct {
+ ref ociReference
+ // We may make members public, or add methods, in the future.
+}
+
+func (e ImageNotFoundError) Error() string {
+ return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
+}
+
+type ociImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ ref ociReference
+ index *imgspecv1.Index
+ descriptor imgspecv1.Descriptor
+ client *http.Client
+ sharedBlobDir string
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) {
+ tr := tlsclientconfig.NewTransport()
+ tr.TLSClientConfig = tlsconfig.ServerDefault()
+
+ if sys != nil && sys.OCICertPath != "" {
+ if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil {
+ return nil, err
+ }
+ tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify
+ }
+
+ client := &http.Client{}
+ client.Transport = tr
+ descriptor, _, err := ref.getManifestDescriptor()
+ if err != nil {
+ return nil, err
+ }
+ index, err := ref.getIndex()
+ if err != nil {
+ return nil, err
+ }
+ s := &ociImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ index: index,
+ descriptor: descriptor,
+ client: client,
+ }
+ if sys != nil {
+ // TODO(jonboulle): check dir existence?
+ s.sharedBlobDir = sys.OCISharedBlobDirPath
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ociImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *ociImageSource) Close() error {
+ s.client.CloseIdleConnections()
+ return nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ var dig digest.Digest
+ var mimeType string
+ var err error
+
+ if instanceDigest == nil {
+ dig = s.descriptor.Digest
+ mimeType = s.descriptor.MediaType
+ } else {
+ dig = *instanceDigest
+ for _, md := range s.index.Manifests {
+ if md.Digest == dig {
+ mimeType = md.MediaType
+ break
+ }
+ }
+ }
+
+ manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir)
+ if err != nil {
+ return nil, "", err
+ }
+
+ m, err := os.ReadFile(manifestPath)
+ if err != nil {
+ return nil, "", err
+ }
+ if mimeType == "" {
+ mimeType = manifest.GuessMIMEType(m)
+ }
+
+ return m, mimeType, nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ r, s, err := s.getExternalBlob(ctx, info.URLs)
+ if err != nil {
+ return nil, 0, err
+ } else if r != nil {
+ return r, s, nil
+ }
+ }
+
+ path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, 0, err
+ }
+ fi, err := r.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ return r, fi.Size(), nil
+}
+
+// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
+// This function can return nil reader when no url is supported by this function. In this case, the caller
+// should fallback to fetch the non-external blob (i.e. pull from the registry).
+func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
+ if len(urls) == 0 {
+ return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
+ }
+
+ errWrap := errors.New("failed fetching external blob from all urls")
+ hasSupportedURL := false
+ for _, u := range urls {
+ if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
+ continue // unsupported url. skip this url.
+ }
+ hasSupportedURL = true
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
+ if err != nil {
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
+ continue
+ }
+
+ resp, err := s.client.Do(req)
+ if err != nil {
+ errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
+ continue
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ resp.Body.Close()
+ errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
+ continue
+ }
+
+ return resp.Body, getBlobSize(resp), nil
+ }
+ if !hasSupportedURL {
+ return nil, 0, nil // fallback to non-external blob
+ }
+
+ return nil, 0, errWrap
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
diff --git a/oci/layout/oci_src_test.go b/oci/layout/oci_src_test.go
new file mode 100644
index 0000000..3fe49a2
--- /dev/null
+++ b/oci/layout/oci_src_test.go
@@ -0,0 +1,143 @@
+package layout
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var _ private.ImageSource = (*ociImageSource)(nil)
+
+const RemoteLayerContent = "This is the remote layer content"
+
+var httpServerAddr string
+
+func TestMain(m *testing.M) {
+ httpServer, err := startRemoteLayerServer()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error starting test TLS server: %v", err.Error())
+ os.Exit(1)
+ }
+
+ httpServerAddr = strings.Replace(httpServer.URL, "127.0.0.1", "localhost", 1)
+ code := m.Run()
+ httpServer.Close()
+ os.Exit(code)
+}
+
+func TestGetBlobForRemoteLayers(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, "Hello world")
+ }))
+ defer ts.Close()
+ cache := memory.New()
+
+ imageSource := createImageSource(t, &types.SystemContext{})
+ defer imageSource.Close()
+ layerInfo := types.BlobInfo{
+ Digest: digest.FromBytes([]byte("Hello world")),
+ Size: -1,
+ URLs: []string{
+ "brokenurl",
+ ts.URL,
+ },
+ }
+
+ reader, _, err := imageSource.GetBlob(context.Background(), layerInfo, cache)
+ require.NoError(t, err)
+ defer reader.Close()
+
+ data, err := io.ReadAll(reader)
+ require.NoError(t, err)
+ assert.Contains(t, string(data), "Hello world")
+}
+
+func TestGetBlobForRemoteLayersWithTLS(t *testing.T) {
+ imageSource := createImageSource(t, &types.SystemContext{
+ OCICertPath: "fixtures/accepted_certs",
+ })
+ defer imageSource.Close()
+ cache := memory.New()
+
+ layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{
+ URLs: []string{httpServerAddr},
+ }, cache)
+ require.NoError(t, err)
+
+ layerContent, _ := io.ReadAll(layer)
+ assert.Equal(t, RemoteLayerContent, string(layerContent))
+ assert.Equal(t, int64(len(RemoteLayerContent)), size)
+}
+
+func TestGetBlobForRemoteLayersOnTLSFailure(t *testing.T) {
+ imageSource := createImageSource(t, &types.SystemContext{
+ OCICertPath: "fixtures/rejected_certs",
+ })
+ defer imageSource.Close()
+ cache := memory.New()
+ layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{
+ URLs: []string{httpServerAddr},
+ }, cache)
+
+ require.Error(t, err)
+ assert.Nil(t, layer)
+ assert.Equal(t, int64(0), size)
+}
+
+func remoteLayerContent(w http.ResponseWriter, req *http.Request) {
+ fmt.Fprint(w, RemoteLayerContent)
+}
+
+func startRemoteLayerServer() (*httptest.Server, error) {
+ certBytes, err := os.ReadFile("fixtures/accepted_certs/cert.cert")
+ if err != nil {
+ return nil, err
+ }
+
+ clientCertPool := x509.NewCertPool()
+ if !clientCertPool.AppendCertsFromPEM(certBytes) {
+ return nil, fmt.Errorf("Could not append certificate")
+ }
+
+ cert, err := tls.LoadX509KeyPair("fixtures/accepted_certs/cert.cert", "fixtures/accepted_certs/cert.key")
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig := &tls.Config{
+ // Reject any TLS certificate that cannot be validated
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ // Ensure that we only use our "CA" to validate certificates
+ ClientCAs: clientCertPool,
+ Certificates: []tls.Certificate{cert},
+ }
+
+ httpServer := httptest.NewUnstartedServer(http.HandlerFunc(remoteLayerContent))
+ httpServer.TLS = tlsConfig
+
+ httpServer.StartTLS()
+
+ return httpServer, nil
+}
+
+func createImageSource(t *testing.T, sys *types.SystemContext) types.ImageSource {
+ imageRef, err := NewReference("fixtures/manifest", "")
+ require.NoError(t, err)
+ imageSource, err := imageRef.NewImageSource(context.Background(), sys)
+ require.NoError(t, err)
+ return imageSource
+}
diff --git a/oci/layout/oci_transport.go b/oci/layout/oci_transport.go
new file mode 100644
index 0000000..1e26dc5
--- /dev/null
+++ b/oci/layout/oci_transport.go
@@ -0,0 +1,260 @@
+package layout
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/oci/internal"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+var (
+ // Transport is an ImageTransport for OCI directories.
+ Transport = ociTransport{}
+
+ // ErrMoreThanOneImage is an error returned when the manifest includes
+ // more than one image and the user should choose which one to use.
+ ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image")
+)
+
+type ociTransport struct{}
+
+func (t ociTransport) Name() string {
+ return "oci"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
+ return internal.ValidateScope(scope)
+}
+
+// ociReference is an ImageReference for OCI directory paths.
+type ociReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ dir string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+ // If image=="", it means the "only image" in the index.json is used in the case it is a source
+ // for destinations, the image name annotation "image.ref.name" is not added to the index.json
+ image string
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
+func ParseReference(reference string) (types.ImageReference, error) {
+ dir, image := internal.SplitPathAndImage(reference)
+ return NewReference(dir, image)
+}
+
+// NewReference returns an OCI reference for a directory and a image.
+//
+// We do not expose an API supplying the resolvedDir; we could, but recomputing it
+// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
+func NewReference(dir, image string) (types.ImageReference, error) {
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := internal.ValidateOCIPath(dir); err != nil {
+ return nil, err
+ }
+
+ if err = internal.ValidateImageName(image); err != nil {
+ return nil, err
+ }
+
+ return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
+}
+
+func (ref ociReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref ociReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s:%s", ref.dir, ref.image)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref ociReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref ociReference) PolicyConfigurationIdentity() string {
+ // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
+ // same image and the two can’t be statically disambiguated. Using at least the repository directory is
+ // less granular but hopefully still useful.
+ return ref.resolvedDir
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref ociReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedDir
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by ociTransport.ValidatePolicyConfigurationScope above.
+ if lastSlash == -1 || path == "/" {
+ break
+ }
+ res = append(res, path)
+ path = path[:lastSlash]
+ }
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
+// with an error.
+func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
+ return parseIndex(ref.indexPath())
+}
+
+func parseIndex(path string) (*imgspecv1.Index, error) {
+ return parseJSON[imgspecv1.Index](path)
+}
+
+func parseJSON[T any](path string) (*T, error) {
+ content, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer content.Close()
+
+ obj := new(T)
+ if err := json.NewDecoder(content).Decode(obj); err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
+
+func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) {
+ index, err := ref.getIndex()
+ if err != nil {
+ return imgspecv1.Descriptor{}, -1, err
+ }
+
+ if ref.image == "" {
+ // return manifest if only one image is in the oci directory
+ if len(index.Manifests) != 1 {
+ // ask user to choose image when more than one image in the oci directory
+ return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
+ }
+ return index.Manifests[0], 0, nil
+ } else {
+ // if image specified, look through all manifests for a match
+ var unsupportedMIMETypes []string
+ for i, md := range index.Manifests {
+ if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
+ if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
+ return md, i, nil
+ }
+ unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
+ }
+ }
+ if len(unsupportedMIMETypes) != 0 {
+ return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
+ }
+ }
+ return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
+}
+
+// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
+// when pulling an image
+func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ ociRef, ok := imgRef.(ociReference)
+ if !ok {
+ return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
+ }
+ md, _, err := ociRef.getManifestDescriptor()
+ return md, err
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(sys, ref)
+}
+
+// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
+func (ref ociReference) ociLayoutPath() string {
+ return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile)
+}
+
+// indexPath returns a path for the index.json within a directory using OCI conventions.
+func (ref ociReference) indexPath() string {
+ return filepath.Join(ref.dir, imgspecv1.ImageIndexFile)
+}
+
+// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
+func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) {
+ if err := digest.Validate(); err != nil {
+ return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
+ }
+ var blobDir string
+ if sharedBlobDir != "" {
+ blobDir = sharedBlobDir
+ } else {
+ blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
+ }
+ return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
+}
diff --git a/oci/layout/oci_transport_test.go b/oci/layout/oci_transport_test.go
new file mode 100644
index 0000000..8beb52d
--- /dev/null
+++ b/oci/layout/oci_transport_test.go
@@ -0,0 +1,371 @@
+package layout
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetManifestDescriptor(t *testing.T) {
+ emptyDir := t.TempDir()
+
+ for _, c := range []struct {
+ dir, image string
+ expectedDescriptor *imgspecv1.Descriptor // nil if a failure ie expected. errorIs / errorAs allows more specific checks.
+ expectedIndex int
+ errorIs error
+ errorAs any
+ }{
+ { // Index is missing
+ dir: emptyDir,
+ image: "",
+ expectedDescriptor: nil,
+ },
+ { // A valid reference to the only manifest
+ dir: "fixtures/manifest",
+ image: "",
+ expectedDescriptor: &imgspecv1.Descriptor{
+ MediaType: "application/vnd.oci.image.manifest.v1+json",
+ Digest: "sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0",
+ Size: 496,
+ Annotations: map[string]string{"org.opencontainers.image.ref.name": "v0.1.1"},
+ Platform: &imgspecv1.Platform{
+ Architecture: "amd64",
+ OS: "linux",
+ },
+ },
+ expectedIndex: 0,
+ },
+ { // An ambiguous reference to a multi-manifest directory
+ dir: "fixtures/two_images_manifest",
+ image: "",
+ expectedDescriptor: nil,
+ errorIs: ErrMoreThanOneImage,
+ },
+ { // A valid reference in a multi-manifest directory
+ dir: "fixtures/name_lookups",
+ image: "a",
+ expectedDescriptor: &imgspecv1.Descriptor{
+ MediaType: "application/vnd.oci.image.manifest.v1+json",
+ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ Size: 1,
+ Annotations: map[string]string{"org.opencontainers.image.ref.name": "a"},
+ },
+ expectedIndex: 0,
+ },
+ { // A valid reference in a multi-manifest directory
+ dir: "fixtures/name_lookups",
+ image: "b",
+ expectedDescriptor: &imgspecv1.Descriptor{
+ MediaType: "application/vnd.oci.image.manifest.v1+json",
+ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ Size: 2,
+ Annotations: map[string]string{"org.opencontainers.image.ref.name": "b"},
+ },
+ expectedIndex: 1,
+ },
+ { // No entry found
+ dir: "fixtures/name_lookups",
+ image: "this-does-not-exist",
+ expectedDescriptor: nil,
+ errorAs: &ImageNotFoundError{},
+ },
+ { // Entries with invalid MIME types found
+ dir: "fixtures/name_lookups",
+ image: "invalid-mime",
+ expectedDescriptor: nil,
+ },
+ } {
+ ref, err := NewReference(c.dir, c.image)
+ require.NoError(t, err)
+
+ res, i, err := ref.(ociReference).getManifestDescriptor()
+ if c.expectedDescriptor != nil {
+ require.NoError(t, err)
+ assert.Equal(t, c.expectedIndex, i)
+ assert.Equal(t, *c.expectedDescriptor, res)
+ } else {
+ require.Error(t, err)
+ if c.errorIs != nil {
+ assert.ErrorIs(t, err, c.errorIs)
+ }
+ if c.errorAs != nil {
+ assert.ErrorAs(t, err, &c.errorAs)
+ }
+ }
+ }
+}
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "oci", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testParseReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "/etc",
+ "/this/does/not/exist",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "relative/path",
+ "/",
+ "/double//slashes",
+ "/has/./dot",
+ "/has/dot/../dot",
+ "/trailing/slash/",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestParseReference(t *testing.T) {
+ testParseReference(t, ParseReference)
+}
+
+// testParseReference is a test shared for Transport.ParseReference and ParseReference.
+func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ tmpDir := t.TempDir()
+
+ for _, path := range []string{
+ "/",
+ "/etc",
+ tmpDir,
+ "relativepath",
+ tmpDir + "/thisdoesnotexist",
+ } {
+ for _, image := range []struct{ suffix, image string }{
+ {":notlatest:image", "notlatest:image"},
+ {":latestimage", "latestimage"},
+ {":", ""},
+ {"", ""},
+ } {
+ input := path + image.suffix
+ ref, err := fn(input)
+ require.NoError(t, err, input)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ assert.Equal(t, path, ociRef.dir, input)
+ assert.Equal(t, image.image, ociRef.image, input)
+ }
+ }
+
+ _, err := fn(tmpDir + ":invalid'image!value@")
+ assert.Error(t, err)
+}
+
+func TestNewReference(t *testing.T) {
+ const (
+ imageValue = "imageValue"
+ noImageValue = ""
+ )
+
+ tmpDir := t.TempDir()
+
+ ref, err := NewReference(tmpDir, imageValue)
+ require.NoError(t, err)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir, ociRef.dir)
+ assert.Equal(t, imageValue, ociRef.image)
+
+ ref, err = NewReference(tmpDir, noImageValue)
+ require.NoError(t, err)
+ ociRef, ok = ref.(ociReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir, ociRef.dir)
+ assert.Equal(t, noImageValue, ociRef.image)
+
+ _, err = NewReference(tmpDir+"/thisparentdoesnotexist/something", imageValue)
+ assert.Error(t, err)
+
+ _, err = NewReference(tmpDir, "invalid'image!value@")
+ assert.Error(t, err)
+
+ _, err = NewReference(tmpDir+"/has:colon", imageValue)
+ assert.Error(t, err)
+}
+
+// refToTempOCI creates a temporary directory and returns an reference to it.
+func refToTempOCI(t *testing.T) (types.ImageReference, string) {
+ tmpDir := t.TempDir()
+ m := `{
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "size": 7143,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
+ "platform": {
+ "architecture": "ppc64le",
+ "os": "linux"
+ },
+ "annotations": {
+ "org.opencontainers.image.ref.name": "imageValue"
+ }
+ }
+ ]
+ }
+`
+ err := os.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644)
+ require.NoError(t, err)
+ ref, err := NewReference(tmpDir, "imageValue")
+ require.NoError(t, err)
+ return ref, tmpDir
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range []struct{ input, result string }{
+ {"/dir1:notlatest:notlatest", "/dir1:notlatest:notlatest"}, // Explicit image
+ {"/dir3:", "/dir3:"}, // No image
+ } {
+ ref, err := ParseReference(tmpDir + c.input)
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, tmpDir+c.result, stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ assert.Nil(t, ref.DockerReference())
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+ // A non-canonical path. Test just one, the various other cases are
+ // tested in explicitfilepath.ResolvePathToFullyExplicit.
+ ref, err := NewReference(tmpDir+"/.", "image2")
+ require.NoError(t, err)
+ assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
+
+ // "/" as a corner case.
+ ref, err = NewReference("/", "image3")
+ require.NoError(t, err)
+ assert.Equal(t, "/", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+ // We don't really know enough to make a full equality test here.
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.True(t, len(ns) >= 2)
+ assert.Equal(t, tmpDir, ns[0])
+ assert.Equal(t, filepath.Dir(tmpDir), ns[1])
+
+ // Test with a known path which should exist. Test just one non-canonical
+ // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ //
+ // It would be nice to test a deeper hierarchy, but it is not obvious what
+ // deeper path is always available in the various distros, AND is not likely
+ // to contains a symbolic link.
+ for _, path := range []string{"/usr/share", "/usr/share/./."} {
+ _, err := os.Lstat(path)
+ require.NoError(t, err)
+ ref, err := NewReference(path, "someimage")
+ require.NoError(t, err)
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.Equal(t, []string{"/usr/share", "/usr"}, ns)
+ }
+
+ // "/" as a corner case.
+ ref, err := NewReference("/", "image3")
+ require.NoError(t, err)
+ assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ _, err := ref.NewImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ assert.NoError(t, err)
+ defer src.Close()
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ ref, _ := refToTempOCI(t)
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ assert.NoError(t, err)
+ defer dest.Close()
+}
+
+func TestReferenceOCILayoutPath(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/oci-layout", ociRef.ociLayoutPath())
+}
+
+func TestReferenceIndexPath(t *testing.T) {
+ ref, tmpDir := refToTempOCI(t)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ assert.Equal(t, tmpDir+"/index.json", ociRef.indexPath())
+}
+
+func TestReferenceBlobPath(t *testing.T) {
+ const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+
+ ref, tmpDir := refToTempOCI(t)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ bp, err := ociRef.blobPath("sha256:"+hex, "")
+ assert.NoError(t, err)
+ assert.Equal(t, tmpDir+"/blobs/sha256/"+hex, bp)
+}
+
+func TestReferenceSharedBlobPathShared(t *testing.T) {
+ const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+
+ ref, _ := refToTempOCI(t)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ bp, err := ociRef.blobPath("sha256:"+hex, "/external/path")
+ assert.NoError(t, err)
+ assert.Equal(t, "/external/path/sha256/"+hex, bp)
+}
+
+func TestReferenceBlobPathInvalid(t *testing.T) {
+ const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+
+ ref, _ := refToTempOCI(t)
+ ociRef, ok := ref.(ociReference)
+ require.True(t, ok)
+ _, err := ociRef.blobPath(hex, "")
+ assert.ErrorContains(t, err, "unexpected digest reference "+hex)
+}
diff --git a/oci/oci.go b/oci/oci.go
new file mode 100644
index 0000000..03607d3
--- /dev/null
+++ b/oci/oci.go
@@ -0,0 +1 @@
+package oci
diff --git a/openshift/openshift-copies.go b/openshift/openshift-copies.go
new file mode 100644
index 0000000..c6498f6
--- /dev/null
+++ b/openshift/openshift-copies.go
@@ -0,0 +1,1200 @@
+package openshift
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/netip"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ "dario.cat/mergo"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+ "gopkg.in/yaml.v3"
+)
+
+// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig.
+// restTLSClientConfig contains settings to enable transport layer security
+type restTLSClientConfig struct {
+ // Server requires TLS client certificate authentication
+ CertFile string
+ // Server requires TLS client certificate authentication
+ KeyFile string
+ // Trusted root certificates for server
+ CAFile string
+
+ // CertData holds PEM-encoded bytes (typically read from a client certificate file).
+ // CertData takes precedence over CertFile
+ CertData []byte
+ // KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+ // KeyData takes precedence over KeyFile
+ KeyData []byte
+ // CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+ // CAData takes precedence over CAFile
+ CAData []byte
+}
+
+// restConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.Config.
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type restConfig struct {
+ // Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+ // If a URL is given then the (optional) Path of that URL represents a prefix that must
+ // be appended to all request URIs used to access the apiserver. This allows a frontend
+ // proxy to easily relocate all of the apiserver endpoints.
+ Host string
+
+ // Server requires Basic authentication
+ Username string
+ Password string
+
+ // Server requires Bearer authentication. This client will not attempt to use
+ // refresh tokens for an OAuth2 flow.
+ // TODO: demonstrate an OAuth2 compatible client.
+ BearerToken string
+
+ // TLSClientConfig contains settings to enable transport layer security
+ TLSClientConfig restTLSClientConfig
+
+ // Server should be accessed without verifying the TLS
+ // certificate. For testing only.
+ Insecure bool
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig.
+// ClientConfig is used to make it easy to get an api server client
+type clientConfig interface {
+ // ClientConfig returns a complete client config
+ ClientConfig() (*restConfig, error)
+}
+
+// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig.
+func defaultClientConfig() clientConfig {
+ loadingRules := newOpenShiftClientConfigLoadingRules()
+ // REMOVED: Allowing command-line overriding of loadingRules
+ // REMOVED: clientcmd.ConfigOverrides
+
+ clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules)
+
+ return clientConfig
+}
+
+var recommendedHomeFile = path.Join(homedir.Get(), ".kube/config")
+
+// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules.
+// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift.
+// 1. --config value
+// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file
+func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules {
+ chain := []string{}
+
+ envVarFile := os.Getenv("KUBECONFIG")
+ if len(envVarFile) != 0 {
+ chain = append(chain, filepath.SplitList(envVarFile)...)
+ } else {
+ chain = append(chain, recommendedHomeFile)
+ }
+
+ return &clientConfigLoadingRules{
+ Precedence: chain,
+ // REMOVED: Migration support; run (oc login) to trigger migration
+ }
+}
+
+// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.
+// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules
+// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
+// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before
+// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
+// passing extraneous information down a call stack
+type deferredLoadingClientConfig struct {
+ loadingRules *clientConfigLoadingRules
+
+ clientConfig clientConfig
+}
+
+// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig.
+// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
+func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig {
+ return &deferredLoadingClientConfig{loadingRules: loadingRules}
+}
+
+func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) {
+ if config.clientConfig == nil {
+ // REMOVED: Support for concurrent use in multiple threads.
+ mergedConfig, err := config.loadingRules.Load()
+ if err != nil {
+ return nil, err
+ }
+
+ // REMOVED: Interactive fallback support.
+ mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig)
+
+ config.clientConfig = mergedClientConfig
+ }
+
+ return config.clientConfig, nil
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig.
+// ClientConfig implements ClientConfig
+func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) {
+ mergedClientConfig, err := config.createClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ mergedConfig, err := mergedClientConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ // REMOVED: In-cluster service account configuration use.
+
+ return mergedConfig, nil
+}
+
+var (
+ // DefaultCluster is the cluster config used when no other config is specified
+ // TODO: eventually apiserver should start on 443 and be secure by default
+ defaultCluster = clientcmdCluster{Server: "http://localhost:8080"}
+
+ // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name
+ envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")}
+)
+
+// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.
+// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
+type directClientConfig struct {
+ config clientcmdConfig
+}
+
+// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig.
+// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
+func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig {
+ return &directClientConfig{config}
+}
+
+// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig.
+// ClientConfig implements ClientConfig
+func (config *directClientConfig) ClientConfig() (*restConfig, error) {
+ if err := config.ConfirmUsable(); err != nil {
+ return nil, err
+ }
+
+ configAuthInfo := config.getAuthInfo()
+ configClusterInfo := config.getCluster()
+
+ clientConfig := &restConfig{}
+ clientConfig.Host = configClusterInfo.Server
+ if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
+ u.RawQuery = ""
+ u.Fragment = ""
+ clientConfig.Host = u.String()
+ }
+
+ // only try to read the auth information if we are secure
+ if isConfigTransportTLS(*clientConfig) {
+ var err error
+ // REMOVED: Support for interactive fallback.
+ userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo)
+ if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
+ return nil, err
+ }
+
+ serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo)
+ if err != nil {
+ return nil, err
+ }
+ if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ return clientConfig, nil
+}
+
+// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig.
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for the server identification
+// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. load the ~/.kubernetes_auth file as a default
+func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) {
+ mergedConfig := &restConfig{}
+
+ // configClusterInfo holds the information identify the server provided by .kubeconfig
+ configClientConfig := &restConfig{}
+ configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority
+ configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData
+ configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+ if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil {
+ return nil, err
+ }
+
+ return mergedConfig, nil
+}
+
+// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
+// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for user identification
+// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
+// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
+// 4. if there is not enough information to identify the user, prompt if possible
+func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig {
+ mergedConfig := &restConfig{}
+
+ // blindly overwrite existing values based on precedence
+ if len(configAuthInfo.Token) > 0 {
+ mergedConfig.BearerToken = configAuthInfo.Token
+ }
+ if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+ mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate
+ mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData
+ mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey
+ mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData
+ }
+ if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+ mergedConfig.Username = configAuthInfo.Username
+ mergedConfig.Password = configAuthInfo.Password
+ }
+
+ // REMOVED: prompting for missing information.
+ return mergedConfig
+}
+
+// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
+// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config,
+// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
+func (config *directClientConfig) ConfirmUsable() error {
+ var validationErrors []error
+ validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...)
+ validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...)
+ // when direct client config is specified, and our only error is that no server is defined, we should
+ // return a standard "no config" error
+ if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster {
+ return newErrConfigurationInvalid([]error{errEmptyConfig})
+ }
+ return newErrConfigurationInvalid(validationErrors)
+}
+
+// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName.
+func (config *directClientConfig) getContextName() string {
+ // REMOVED: overrides support
+ return config.config.CurrentContext
+}
+
+// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName.
+func (config *directClientConfig) getAuthInfoName() string {
+ // REMOVED: overrides support
+ return config.getContext().AuthInfo
+}
+
+// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName.
+func (config *directClientConfig) getClusterName() string {
+ // REMOVED: overrides support
+ return config.getContext().Cluster
+}
+
+// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext.
+func (config *directClientConfig) getContext() clientcmdContext {
+ contexts := config.config.Contexts
+ contextName := config.getContextName()
+
+ var mergedContext clientcmdContext
+ if configContext, exists := contexts[contextName]; exists {
+ if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil {
+ logrus.Debugf("Can't merge configContext: %v", err)
+ }
+ }
+ // REMOVED: overrides support
+
+ return mergedContext
+}
+
+var (
+ errEmptyConfig = errors.New("no configuration has been provided")
+ // message is for consistency with old behavior
+ errEmptyCluster = errors.New("cluster has no server defined")
+)
+
+// helper for checking certificate/key/CA
+func validateFileIsReadable(name string) error {
+ answer, err := os.Open(name)
+ defer func() {
+ if err := answer.Close(); err != nil {
+ logrus.Debugf("Error closing %v: %v", name, err)
+ }
+ }()
+ return err
+}
+
+// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo.
+// validateClusterInfo looks for conflicts and errors in the cluster info
+func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error {
+ var validationErrors []error
+
+ if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) {
+ return []error{errEmptyCluster}
+ }
+
+ if len(clusterInfo.Server) == 0 {
+ if len(clusterName) == 0 {
+ validationErrors = append(validationErrors, errors.New("default cluster has no server defined"))
+ } else {
+ validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
+ }
+ }
+ // Make sure CA data and CA file aren't both specified
+ if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
+ }
+ if len(clusterInfo.CertificateAuthority) != 0 {
+ err := validateFileIsReadable(clusterInfo.CertificateAuthority)
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo.
+// validateAuthInfo looks for conflicts and errors in the auth info
+func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
+ var validationErrors []error
+
+ usingAuthPath := false
+ methods := make([]string, 0, 3)
+ if len(authInfo.Token) != 0 {
+ methods = append(methods, "token")
+ }
+ if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
+ methods = append(methods, "basicAuth")
+ }
+
+ if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
+ // Make sure cert data and file aren't both specified
+ if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
+ }
+ // Make sure key data and file aren't both specified
+ if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+ }
+ // Make sure a key is specified
+ if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
+ validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
+ }
+
+ if len(authInfo.ClientCertificate) != 0 {
+ err := validateFileIsReadable(authInfo.ClientCertificate)
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+ }
+ }
+ if len(authInfo.ClientKey) != 0 {
+ err := validateFileIsReadable(authInfo.ClientKey)
+ if err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+ }
+ }
+ }
+
+ // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
+ if (len(methods) > 1) && (!usingAuthPath) {
+ validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+ }
+
+ return validationErrors
+}
+
+// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo.
+func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo {
+ authInfos := config.config.AuthInfos
+ authInfoName := config.getAuthInfoName()
+
+ var mergedAuthInfo clientcmdAuthInfo
+ if configAuthInfo, exists := authInfos[authInfoName]; exists {
+ if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil {
+ logrus.Debugf("Can't merge configAuthInfo: %v", err)
+ }
+ }
+ // REMOVED: overrides support
+
+ return mergedAuthInfo
+}
+
+// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster.
+func (config *directClientConfig) getCluster() clientcmdCluster {
+ clusterInfos := config.config.Clusters
+ clusterInfoName := config.getClusterName()
+
+ var mergedClusterInfo clientcmdCluster
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil {
+ logrus.Debugf("Can't merge defaultCluster: %v", err)
+ }
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil {
+ logrus.Debugf("Can't merge envVarCluster: %v", err)
+ }
+ if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil {
+ logrus.Debugf("Can't merge configClusterInfo: %v", err)
+ }
+ }
+ // REMOVED: overrides support
+
+ return mergedClusterInfo
+}
+
+// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
+// This helper implements the error and Errors interfaces. Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregateErr []error
+
+// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface. If the slice is empty,
+// this returns nil.
+// It will check if any of the element of input error list is nil, to avoid
+// nil pointer panic when call Error().
+func newAggregate(errlist []error) error {
+ if len(errlist) == 0 {
+ return nil
+ }
+ // In case of input error list contains nil
+ var errs []error
+ for _, e := range errlist {
+ if e != nil {
+ errs = append(errs, e)
+ }
+ }
+ if len(errs) == 0 {
+ return nil
+ }
+ return aggregateErr(errs)
+}
+
+// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
+// Error is part of the error interface.
+func (agg aggregateErr) Error() string {
+ if len(agg) == 0 {
+ // This should never happen, really.
+ return ""
+ }
+ if len(agg) == 1 {
+ return agg[0].Error()
+ }
+ result := fmt.Sprintf("[%s", agg[0].Error())
+ for i := 1; i < len(agg); i++ {
+ result += fmt.Sprintf(", %s", agg[i].Error())
+ }
+ result += "]"
+ return result
+}
+
+// REMOVED: aggregateErr.Errors
+
+// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
+// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
+type errConfigurationInvalid []error
+
+var _ error = errConfigurationInvalid{}
+
+// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid.
+
+// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid.
+func newErrConfigurationInvalid(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ default:
+ return errConfigurationInvalid(errs)
+ }
+}
+
+// Error implements the error interface
+func (e errConfigurationInvalid) Error() string {
+ return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error())
+}
+
+// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules
+// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
+// Callers can put the chain together however they want, but we'd recommend:
+// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
+// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present
+type clientConfigLoadingRules struct {
+ Precedence []string
+}
+
+// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
+// Load starts by running the MigrationRules and then
+// takes the loading rules and returns a Config object based on following rules.
+//
+// - if the ExplicitPath, return the unmerged explicit file
+// - Otherwise, return a merged config based on the Precedence slice
+//
+// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
+// Read errors or files with non-deserializable content produce errors.
+// The first file to set a particular map key wins and map key's value is never changed.
+// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
+// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
+// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even
+// non-conflicting entries from the second file's "red-user" are discarded.
+// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
+// and only absolute file paths are returned.
+func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
+ errlist := []error{}
+
+ kubeConfigFiles := []string{}
+
+ // REMOVED: explicit path support
+ kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
+
+ kubeconfigs := []*clientcmdConfig{}
+ // read and cache the config files so that we only look at them once
+ for _, filename := range kubeConfigFiles {
+ if len(filename) == 0 {
+ // no work to do
+ continue
+ }
+
+ config, err := loadFromFile(filename)
+ if os.IsNotExist(err) {
+ // skip missing files
+ continue
+ }
+ if err != nil {
+ errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
+ continue
+ }
+
+ kubeconfigs = append(kubeconfigs, config)
+ }
+
+ // first merge all of our maps
+ mapConfig := clientcmdNewConfig()
+ for _, kubeconfig := range kubeconfigs {
+ if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
+ }
+
+ // merge all of the struct values in the reverse order so that priority is given correctly
+ // errors are not added to the list the second time
+ nonMapConfig := clientcmdNewConfig()
+ for i := len(kubeconfigs) - 1; i >= 0; i-- {
+ kubeconfig := kubeconfigs[i]
+ if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
+ }
+
+ // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
+ // get the values we expect.
+ config := clientcmdNewConfig()
+ if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil {
+ return nil, err
+ }
+ if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil {
+ return nil, err
+ }
+
+ // REMOVED: Possibility to skip this.
+ if err := resolveLocalPaths(config); err != nil {
+ errlist = append(errlist, err)
+ }
+
+ return config, newAggregate(errlist)
+}
+
+// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
+// LoadFromFile takes a filename and deserializes the contents into Config object
+func loadFromFile(filename string) (*clientcmdConfig, error) {
+ kubeconfigBytes, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ config, err := load(kubeconfigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // set LocationOfOrigin on every Cluster, User, and Context
+ for key, obj := range config.AuthInfos {
+ obj.LocationOfOrigin = filename
+ config.AuthInfos[key] = obj
+ }
+ for key, obj := range config.Clusters {
+ obj.LocationOfOrigin = filename
+ config.Clusters[key] = obj
+ }
+ for key, obj := range config.Contexts {
+ obj.LocationOfOrigin = filename
+ config.Contexts[key] = obj
+ }
+
+ if config.AuthInfos == nil {
+ config.AuthInfos = map[string]*clientcmdAuthInfo{}
+ }
+ if config.Clusters == nil {
+ config.Clusters = map[string]*clientcmdCluster{}
+ }
+ if config.Contexts == nil {
+ config.Contexts = map[string]*clientcmdContext{}
+ }
+
+ return config, nil
+}
+
+// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load
+// Load takes a byte slice and deserializes the contents into Config object.
+// Encapsulates deserialization without assuming the source is a file.
+func load(data []byte) (*clientcmdConfig, error) {
+ config := clientcmdNewConfig()
+ // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
+ if len(data) == 0 {
+ return config, nil
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ if err := yaml.Unmarshal(data, config); err != nil {
+ return nil, err
+ }
+ return config, nil
+}
+
+// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths.
+// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
+// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
+// modification of its contents.
+func resolveLocalPaths(config *clientcmdConfig) error {
+ for _, cluster := range config.Clusters {
+ if len(cluster.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err)
+ }
+
+ if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
+ return err
+ }
+ }
+ for _, authInfo := range config.AuthInfos {
+ if len(authInfo.LocationOfOrigin) == 0 {
+ continue
+ }
+ base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+ if err != nil {
+ return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err)
+ }
+
+ if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences.
+func getClusterFileReferences(cluster *clientcmdCluster) []*string {
+ return []*string{&cluster.CertificateAuthority}
+}
+
+// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences.
+func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string {
+ return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey}
+}
+
+// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths.
+// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
+func resolvePaths(refs []*string, base string) error {
+ for _, ref := range refs {
+ // Don't resolve empty paths
+ if len(*ref) > 0 {
+ // Don't resolve absolute paths
+ if !filepath.IsAbs(*ref) {
+ *ref = filepath.Join(base, *ref)
+ }
+ }
+ }
+ return nil
+}
+
+// restClientFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.RESTClientFor.
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
+ // REMOVED: Configurable GroupVersion, Codec
+ // REMOVED: Configurable versionedAPIPath
+ baseURL, err := defaultServerURLFor(config)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ transport, err := transportFor(config)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ }
+
+ // REMOVED: Configurable QPS, Burst, ContentConfig
+ // REMOVED: Actually returning a RESTClient object.
+ return baseURL, httpClient, nil
+}
+
+// defaultServerURL is a modified copy of k8s.io/kubernetes/pkg/client/restclient.DefaultServerURL.
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
+ if host == "" {
+ return nil, errors.New("host must be a URL or a host:port pair")
+ }
+ base := host
+ hostURL, err := url.Parse(base)
+ if err != nil {
+ return nil, err
+ }
+ if hostURL.Scheme == "" {
+ scheme := "http://"
+ if defaultTLS {
+ scheme = "https://"
+ }
+ hostURL, err = url.Parse(scheme + base)
+ if err != nil {
+ return nil, err
+ }
+ if hostURL.Path != "" && hostURL.Path != "/" {
+ return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
+ }
+ }
+
+ // REMOVED: versionedAPIPath computation.
+ return hostURL, nil
+}
+
+// defaultServerURLFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.defaultServerURLFor.
+// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func defaultServerURLFor(config *restConfig) (*url.URL, error) {
+ // TODO: move the default to secure when the apiserver supports TLS by default
+ // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+ hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0
+ hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0
+ defaultTLS := hasCA || hasCert || config.Insecure
+ host := config.Host
+ if host == "" {
+ host = "localhost"
+ }
+
+ // REMOVED: Configurable APIPath, GroupVersion
+ return defaultServerURL(host, defaultTLS)
+}
+
+// transportFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.transportFor.
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func transportFor(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support
+ return transportNew(config)
+}
+
+// isConfigTransportTLS is a modified copy of k8s.io/kubernetes/pkg/client/restclient.IsConfigTransportTLS.
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor(). Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func isConfigTransportTLS(config restConfig) bool {
+ baseURL, err := defaultServerURLFor(&config)
+ if err != nil {
+ return false
+ }
+ return baseURL.Scheme == "https"
+}
+
+// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New.
+// New returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config.
+func transportNew(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: custom config.Transport support.
+ // Set transport level security
+
+ var (
+ rt http.RoundTripper
+ err error
+ )
+
+ rt, err = tlsCacheGet(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
+ if len(config.Username) != 0 && len(config.BearerToken) != 0 {
+ return nil, errors.New("username/password or bearer token may be set, but not both")
+ }
+
+ return rt, nil
+}
+
+// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR.
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+ // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
+ noProxyEnv := os.Getenv("NO_PROXY")
+ noProxyRules := strings.Split(noProxyEnv, ",")
+
+ cidrs := []netip.Prefix{}
+ for _, noProxyRule := range noProxyRules {
+ prefix, err := netip.ParsePrefix(noProxyRule)
+ if err == nil {
+ cidrs = append(cidrs, prefix)
+ }
+ }
+
+ if len(cidrs) == 0 {
+ return delegate
+ }
+
+ return func(req *http.Request) (*url.URL, error) {
+ host := req.URL.Host
+ // for some urls, the Host is already the host, not the host:port
+ if _, err := netip.ParseAddr(host); err != nil {
+ var err error
+ host, _, err = net.SplitHostPort(req.URL.Host)
+ if err != nil {
+ return delegate(req)
+ }
+ }
+
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return delegate(req)
+ }
+
+ if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool {
+ return cidr.Contains(ip)
+ }) {
+ return nil, nil
+ }
+
+ return delegate(req)
+ }
+}
+
+// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get.
+func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
+ // REMOVED: any actual caching
+
+ // Get the TLS options for this client config
+ tlsConfig, err := tlsConfigFor(config)
+ if err != nil {
+ return nil, err
+ }
+ // The options didn't require a custom TLS config
+ if tlsConfig == nil {
+ return http.DefaultTransport, nil
+ }
+
+ // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here.
+ t := &http.Transport{
+ // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+ // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+ Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: tlsConfig,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ }
+ // Allow clients to disable http2 if needed.
+ if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
+ t.ForceAttemptHTTP2 = true
+ }
+ return t, nil
+}
+
+// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor.
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func tlsConfigFor(c *restConfig) (*tls.Config, error) {
+ if !(c.HasCA() || c.HasCertAuth() || c.Insecure) {
+ return nil, nil
+ }
+ if c.HasCA() && c.Insecure {
+ return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed")
+ }
+ if err := loadTLSFiles(c); err != nil {
+ return nil, err
+ }
+
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: c.Insecure,
+ }
+
+ if c.HasCA() {
+ tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData)
+ }
+
+ if c.HasCertAuth() {
+ cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ return tlsConfig, nil
+}
+
+// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles.
+// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func loadTLSFiles(c *restConfig) error {
+ var err error
+ c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile)
+ if err != nil {
+ return err
+ }
+
+ c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile)
+ if err != nil {
+ return err
+ }
+
+ c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile.
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+ if len(data) > 0 {
+ return data, nil
+ }
+ if len(file) > 0 {
+ fileData, err := os.ReadFile(file)
+ if err != nil {
+ return []byte{}, err
+ }
+ return fileData, nil
+ }
+ return nil, nil
+}
+
+// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool.
+// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
+// When caData is not empty, it will be the ONLY information used in the CertPool.
+func rootCertPool(caData []byte) *x509.CertPool {
+ // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
+ // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
+ // It doesn't allow trusting either/or, but hopefully that won't be an issue
+ if len(caData) == 0 {
+ return nil
+ }
+
+ // if we have caData, use it
+ certPool := x509.NewCertPool()
+ certPool.AppendCertsFromPEM(caData)
+ return certPool
+}
+
+// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA.
+// HasCA returns whether the configuration has a certificate authority or not.
+func (c *restConfig) HasCA() bool {
+ return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0
+}
+
+// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth.
+// HasCertAuth returns whether the configuration has certificate authentication or not.
+func (c *restConfig) HasCertAuth() bool {
+ return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0
+}
+
+// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config.
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type clientcmdConfig struct {
+ // Clusters is a map of referenceable names to cluster configs
+ Clusters clustersMap `yaml:"clusters"`
+ // AuthInfos is a map of referenceable names to user configs
+ AuthInfos authInfosMap `yaml:"users"`
+ // Contexts is a map of referenceable names to context configs
+ Contexts contextsMap `yaml:"contexts"`
+ // CurrentContext is the name of the context that you would like to use by default
+ CurrentContext string `yaml:"current-context"`
+}
+
+type clustersMap map[string]*clientcmdCluster
+
+func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error {
+ var a []v1NamedCluster
+ if err := value.Decode(&a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ cluster := e.Cluster // Allocates a new instance in each iteration
+ (*m)[e.Name] = &cluster
+ }
+ return nil
+}
+
+type authInfosMap map[string]*clientcmdAuthInfo
+
+func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error {
+ var a []v1NamedAuthInfo
+ if err := value.Decode(&a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ authInfo := e.AuthInfo // Allocates a new instance in each iteration
+ (*m)[e.Name] = &authInfo
+ }
+ return nil
+}
+
+type contextsMap map[string]*clientcmdContext
+
+func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error {
+ var a []v1NamedContext
+ if err := value.Decode(&a); err != nil {
+ return err
+ }
+ for _, e := range a {
+ context := e.Context // Allocates a new instance in each iteration
+ (*m)[e.Name] = &context
+ }
+ return nil
+}
+
+// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig.
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func clientcmdNewConfig() *clientcmdConfig {
+ return &clientcmdConfig{
+ Clusters: make(map[string]*clientcmdCluster),
+ AuthInfos: make(map[string]*clientcmdAuthInfo),
+ Contexts: make(map[string]*clientcmdContext),
+ }
+}
+
+// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary
+type yamlBinaryAsBase64String []byte
+
+func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error {
+ res := make([]byte, base64.StdEncoding.DecodedLen(len(text)))
+ n, err := base64.StdEncoding.Decode(res, text)
+ if err != nil {
+ return err
+ }
+ *bin = res[:n]
+ return nil
+}
+
+// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
+// Cluster contains information about how to communicate with a kubernetes cluster
+type clientcmdCluster struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `yaml:"server"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+ InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"`
+ // CertificateAuthority is the path to a cert file for the certificate authority.
+ CertificateAuthority string `yaml:"certificate-authority,omitempty"`
+ // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+ CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"`
+}
+
+// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
+// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
+type clientcmdAuthInfo struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // ClientCertificate is the path to a client cert file for TLS.
+ ClientCertificate string `yaml:"client-certificate,omitempty"`
+ // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+ ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"`
+ // ClientKey is the path to a client key file for TLS.
+ ClientKey string `yaml:"client-key,omitempty"`
+ // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+ ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"`
+ // Token is the bearer token for authentication to the kubernetes cluster.
+ Token string `yaml:"token,omitempty"`
+ // Username is the username for basic authentication to the kubernetes cluster.
+ Username string `yaml:"username,omitempty"`
+ // Password is the password for basic authentication to the kubernetes cluster.
+ Password string `yaml:"password,omitempty"`
+}
+
+// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type clientcmdContext struct {
+ // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
+ LocationOfOrigin string
+ // Cluster is the name of the cluster for this context
+ Cluster string `yaml:"cluster"`
+ // AuthInfo is the name of the authInfo for this context
+ AuthInfo string `yaml:"user"`
+ // Namespace is the default namespace to use on unspecified requests
+ Namespace string `yaml:"namespace,omitempty"`
+}
+
+// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
+// NamedCluster relates nicknames to cluster information
+type v1NamedCluster struct {
+ // Name is the nickname for this Cluster
+ Name string `yaml:"name"`
+ // Cluster holds the cluster information
+ Cluster clientcmdCluster `yaml:"cluster"`
+}
+
+// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
+// NamedContext relates nicknames to context information
+type v1NamedContext struct {
+ // Name is the nickname for this Context
+ Name string `yaml:"name"`
+ // Context holds the context information
+ Context clientcmdContext `yaml:"context"`
+}
+
+// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
+// NamedAuthInfo relates nicknames to auth information
+type v1NamedAuthInfo struct {
+ // Name is the nickname for this AuthInfo
+ Name string `yaml:"name"`
+ // AuthInfo holds the auth information
+ AuthInfo clientcmdAuthInfo `yaml:"user"`
+}
diff --git a/openshift/openshift-copies_test.go b/openshift/openshift-copies_test.go
new file mode 100644
index 0000000..1fdc37d
--- /dev/null
+++ b/openshift/openshift-copies_test.go
@@ -0,0 +1,115 @@
+package openshift
+
+import (
+ "encoding"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+)
+
+const fixtureKubeConfigPath = "testdata/admin.kubeconfig"
+
+var (
+ _ yaml.Unmarshaler = (*clustersMap)(nil)
+ _ yaml.Unmarshaler = (*authInfosMap)(nil)
+ _ yaml.Unmarshaler = (*contextsMap)(nil)
+ _ encoding.TextUnmarshaler = (*yamlBinaryAsBase64String)(nil)
+)
+
+// These are only smoke tests based on the skopeo integration test cluster. Error handling, non-trivial configuration merging,
+// and any other situations are not currently covered.
+
+// Set up KUBECONFIG to point at the fixture.
+// Callers MUST NOT call testing.T.Parallel().
+func setupKubeConfigForSerialTest(t *testing.T) {
+ t.Setenv("KUBECONFIG", fixtureKubeConfigPath)
+}
+
+func TestClientConfigLoadingRules(t *testing.T) {
+ setupKubeConfigForSerialTest(t)
+
+ rules := newOpenShiftClientConfigLoadingRules()
+ res, err := rules.Load()
+ require.NoError(t, err)
+ expected := clientcmdConfig{
+ Clusters: clustersMap{
+ "172-17-0-2:8443": &clientcmdCluster{
+ LocationOfOrigin: fixtureKubeConfigPath,
+ Server: "https://172.17.0.2:8443",
+ CertificateAuthorityData: []byte("Cluster CA"),
+ },
+ },
+ AuthInfos: authInfosMap{
+ "system:admin/172-17-0-2:8443": &clientcmdAuthInfo{
+ LocationOfOrigin: fixtureKubeConfigPath,
+ ClientCertificateData: []byte("Client cert"),
+ ClientKeyData: []byte("Client key"),
+ },
+ },
+ Contexts: contextsMap{
+ "default/172-17-0-2:8443/system:admin": &clientcmdContext{
+ LocationOfOrigin: fixtureKubeConfigPath,
+ Cluster: "172-17-0-2:8443",
+ AuthInfo: "system:admin/172-17-0-2:8443",
+ Namespace: "default",
+ },
+ },
+ CurrentContext: "default/172-17-0-2:8443/system:admin",
+ }
+ assert.Equal(t, &expected, res)
+}
+
+func TestDirectClientConfig(t *testing.T) {
+ setupKubeConfigForSerialTest(t)
+
+ rules := newOpenShiftClientConfigLoadingRules()
+ config, err := rules.Load()
+ require.NoError(t, err)
+
+ direct := newNonInteractiveClientConfig(*config)
+ res, err := direct.ClientConfig()
+ require.NoError(t, err)
+ assert.Equal(t, &restConfig{
+ Host: "https://172.17.0.2:8443",
+ TLSClientConfig: restTLSClientConfig{
+ CertData: []byte("Client cert"),
+ KeyData: []byte("Client key"),
+ CAData: []byte("Cluster CA"),
+ },
+ }, res)
+}
+
+func TestDeferredLoadingClientConfig(t *testing.T) {
+ setupKubeConfigForSerialTest(t)
+
+ rules := newOpenShiftClientConfigLoadingRules()
+ deferred := newNonInteractiveDeferredLoadingClientConfig(rules)
+ res, err := deferred.ClientConfig()
+ require.NoError(t, err)
+ assert.Equal(t, &restConfig{
+ Host: "https://172.17.0.2:8443",
+ TLSClientConfig: restTLSClientConfig{
+ CertData: []byte("Client cert"),
+ KeyData: []byte("Client key"),
+ CAData: []byte("Cluster CA"),
+ },
+ }, res)
+}
+
+func TestDefaultClientConfig(t *testing.T) {
+ setupKubeConfigForSerialTest(t)
+
+ config := defaultClientConfig()
+ res, err := config.ClientConfig()
+ require.NoError(t, err)
+ assert.Equal(t, &restConfig{
+ Host: "https://172.17.0.2:8443",
+ TLSClientConfig: restTLSClientConfig{
+ CertData: []byte("Client cert"),
+ KeyData: []byte("Client key"),
+ CAData: []byte("Cluster CA"),
+ },
+ }, res)
+}
diff --git a/openshift/openshift.go b/openshift/openshift.go
new file mode 100644
index 0000000..2c69afb
--- /dev/null
+++ b/openshift/openshift.go
@@ -0,0 +1,226 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/iolimits"
+ "github.com/containers/image/v5/version"
+ "github.com/sirupsen/logrus"
+)
+
+// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
+type openshiftClient struct {
+ ref openshiftReference
+ baseURL *url.URL
+ // Values from Kubernetes configuration
+ httpClient *http.Client
+ bearerToken string // "" if not used
+ username string // "" if not used
+ password string // if username != ""
+}
+
+// newOpenshiftClient creates a new openshiftClient for the specified reference.
+func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
+ // We have already done this parsing in ParseReference, but thrown away
+ // httpClient. So, parse again.
+ // (We could also rework/split restClientFor to "get base URL" to be done
+ // in ParseReference, and "get httpClient" to be done here. But until/unless
+ // we support non-default clusters, this is good enough.)
+
+ // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
+ cmdConfig := defaultClientConfig()
+ logrus.Debugf("cmdConfig: %#v", cmdConfig)
+ restConfig, err := cmdConfig.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
+ logrus.Debugf("restConfig: %#v", restConfig)
+ baseURL, httpClient, err := restClientFor(restConfig)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("URL: %#v", *baseURL)
+
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+
+ return &openshiftClient{
+ ref: ref,
+ baseURL: baseURL,
+ httpClient: httpClient,
+ bearerToken: restConfig.BearerToken,
+ username: restConfig.Username,
+ password: restConfig.Password,
+ }, nil
+}
+
+func (c *openshiftClient) close() {
+ c.httpClient.CloseIdleConnections()
+}
+
+// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
+func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
+ requestURL := *c.baseURL
+ requestURL.Path = path
+ var requestBodyReader io.Reader
+ if requestBody != nil {
+ logrus.Debugf("Will send body: %s", requestBody)
+ requestBodyReader = bytes.NewReader(requestBody)
+ }
+ req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), requestBodyReader)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(c.bearerToken) != 0 {
+ req.Header.Set("Authorization", "Bearer "+c.bearerToken)
+ } else if len(c.username) != 0 {
+ req.SetBasicAuth(c.username, c.password)
+ }
+ req.Header.Set("Accept", "application/json, */*")
+ req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version))
+ if requestBody != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+
+ logrus.Debugf("%s %s", method, requestURL.Redacted())
+ res, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("Got body: %s", body)
+ // FIXME: Just throwing this useful information away only to try to guess later...
+ logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type"))
+
+ var status status
+ statusValid := false
+ if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 {
+ statusValid = true
+ }
+
+ switch {
+ case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient.
+ if statusValid && status.Status != "Success" {
+ return nil, errors.New(status.Message)
+ }
+ case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent:
+ // OK.
+ default:
+ if statusValid {
+ return nil, errors.New(status.Message)
+ }
+ return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
+ }
+
+ return body, nil
+}
+
+// getImage loads the specified image object.
+func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
+ body, err := c.doRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return nil, err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var isi imageStreamImage
+ if err := json.Unmarshal(body, &isi); err != nil {
+ return nil, err
+ }
+ return &isi.Image, nil
+}
+
+// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
+// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
+func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
+ _, repo, gotRepo := strings.Cut(ref, "/")
+ if !gotRepo {
+ return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
+ }
+ return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
+}
+
+// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
+type imageStream struct {
+ Status imageStreamStatus `json:"status,omitempty"`
+}
+type imageStreamStatus struct {
+ DockerImageRepository string `json:"dockerImageRepository"`
+ Tags []namedTagEventList `json:"tags,omitempty"`
+}
+type namedTagEventList struct {
+ Tag string `json:"tag"`
+ Items []tagEvent `json:"items"`
+}
+type tagEvent struct {
+ DockerImageReference string `json:"dockerImageReference"`
+ Image string `json:"image"`
+}
+type imageStreamImage struct {
+ Image image `json:"image"`
+}
+type image struct {
+ objectMeta `json:"metadata,omitempty"`
+ DockerImageReference string `json:"dockerImageReference,omitempty"`
+ // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"`
+ DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
+ DockerImageManifest string `json:"dockerImageManifest,omitempty"`
+ // DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
+ Signatures []imageSignature `json:"signatures,omitempty"`
+}
+
+const imageSignatureTypeAtomic string = "atomic"
+
+type imageSignature struct {
+ typeMeta `json:",inline"`
+ objectMeta `json:"metadata,omitempty"`
+ Type string `json:"type"`
+ Content []byte `json:"content"`
+ // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+ // ImageIdentity string `json:"imageIdentity,omitempty"`
+ // SignedClaims map[string]string `json:"signedClaims,omitempty"`
+ // Created *unversioned.Time `json:"created,omitempty"`
+ // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"`
+ // IssuedTo SignatureSubject `json:"issuedTo,omitempty"`
+}
+type typeMeta struct {
+ Kind string `json:"kind,omitempty"`
+ APIVersion string `json:"apiVersion,omitempty"`
+}
+type objectMeta struct {
+ Name string `json:"name,omitempty"`
+ GenerateName string `json:"generateName,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
+ Generation int64 `json:"generation,omitempty"`
+ DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status
+type status struct {
+ Status string `json:"status,omitempty"`
+ Message string `json:"message,omitempty"`
+ // Reason StatusReason `json:"reason,omitempty"`
+ // Details *StatusDetails `json:"details,omitempty"`
+ Code int32 `json:"code,omitempty"`
+}
diff --git a/openshift/openshift_dest.go b/openshift/openshift_dest.go
new file mode 100644
index 0000000..50a5339
--- /dev/null
+++ b/openshift/openshift_dest.go
@@ -0,0 +1,248 @@
+package openshift
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "golang.org/x/exp/slices"
+)
+
+type openshiftImageDestination struct {
+ impl.Compat
+ stubs.AlwaysSupportsSignatures
+
+ client *openshiftClient
+ docker private.ImageDestination // The docker/distribution API endpoint
+ // State
+ imageStreamImageName string // "" if not yet known
+}
+
+// newImageDestination creates a new ImageDestination for the specified reference.
+func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
+ // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
+ // the manifest digest at this point.
+ dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
+ dockerRef, err := docker.ParseReference(dockerRefString)
+ if err != nil {
+ return nil, err
+ }
+ docker, err := dockerRef.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, err
+ }
+
+ d := &openshiftImageDestination{
+ client: client,
+ docker: imagedestination.FromPublic(docker),
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *openshiftImageDestination) Reference() types.ImageReference {
+ return d.client.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *openshiftImageDestination) Close() error {
+ err := d.docker.Close()
+ d.client.close()
+ return err
+}
+
+func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
+ return d.docker.SupportedManifestMIMETypes()
+}
+
+func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
+ return types.Compress
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+// and would prefer to receive an unmodified manifest instead of one modified for the destination.
+// Does not make a difference if Reference().DockerReference() is nil.
+func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.docker.IgnoresEmbeddedDockerReference()
+}
+
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
+ return d.docker.SupportsPutBlobPartial()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
+ return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ return d.docker.TryReusingBlobWithOptions(ctx, info, options)
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest == nil {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.imageStreamImageName = manifestDigest.String()
+ }
+ return d.docker.PutManifest(ctx, m, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if d.imageStreamImageName == "" {
+ return errors.New("Internal error: Unknown manifest digest, can't add signatures")
+ }
+ imageStreamImageName = d.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures.
+
+ if len(signatures) == 0 {
+ return nil // No need to even read the old state.
+ }
+
+ image, err := d.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return err
+ }
+ existingSigNames := set.New[string]()
+ for _, sig := range image.Signatures {
+ existingSigNames.Add(sig.objectMeta.Name)
+ }
+
+ for _, newSigWithFormat := range signatures {
+ newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
+ if !ok {
+ return signature.UnsupportedFormatError(newSigWithFormat)
+ }
+ newSig := newSigSimple.UntrustedSignature()
+
+ if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
+ return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
+ }) {
+ continue
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return fmt.Errorf("generating random signature len %d: %w", n, err)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
+ if !existingSigNames.Contains(signatureName) {
+ break
+ }
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ sig := imageSignature{
+ typeMeta: typeMeta{
+ Kind: "ImageSignature",
+ APIVersion: "v1",
+ },
+ objectMeta: objectMeta{Name: signatureName},
+ Type: imageSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.docker.Commit(ctx, unparsedToplevel)
+}
diff --git a/openshift/openshift_dest_test.go b/openshift/openshift_dest_test.go
new file mode 100644
index 0000000..795aa71
--- /dev/null
+++ b/openshift/openshift_dest_test.go
@@ -0,0 +1,5 @@
+package openshift
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageDestination = (*openshiftImageDestination)(nil)
diff --git a/openshift/openshift_src.go b/openshift/openshift_src.go
new file mode 100644
index 0000000..0ac0127
--- /dev/null
+++ b/openshift/openshift_src.go
@@ -0,0 +1,174 @@
+package openshift
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type openshiftImageSource struct {
+ impl.Compat
+ impl.DoesNotAffectLayerInfosForCopy
+ // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(),
+ // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we
+ // can change it), but this is a deprecated transport anyway, so for now we just punt.
+ stubs.NoGetBlobAtInitialize
+
+ client *openshiftClient
+ // Values specific to this image
+ sys *types.SystemContext
+ // State
+ docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
+ imageStreamImageName string // Resolved image identifier, or "" if not known yet
+}
+
+// newImageSource creates a new ImageSource for the specified reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) {
+ client, err := newOpenshiftClient(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &openshiftImageSource{
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ client: client,
+ sys: sys,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *openshiftImageSource) Reference() types.ImageReference {
+ return s.client.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *openshiftImageSource) Close() error {
+ var err error
+ if s.docker != nil {
+ err = s.docker.Close()
+ s.docker = nil
+ }
+
+ s.client.close()
+
+ return err
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, "", err
+ }
+ return s.docker.GetManifest(ctx, instanceDigest)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, 0, err
+ }
+ return s.docker.GetBlob(ctx, info, cache)
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var imageStreamImageName string
+ if instanceDigest == nil {
+ if err := s.ensureImageIsResolved(ctx); err != nil {
+ return nil, err
+ }
+ imageStreamImageName = s.imageStreamImageName
+ } else {
+ imageStreamImageName = instanceDigest.String()
+ }
+ image, err := s.client.getImage(ctx, imageStreamImageName)
+ if err != nil {
+ return nil, err
+ }
+ var sigs []signature.Signature
+ for _, sig := range image.Signatures {
+ if sig.Type == imageSignatureTypeAtomic {
+ sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+ }
+ }
+ return sigs, nil
+}
+
+// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
+func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
+ if s.docker != nil {
+ return nil
+ }
+
+ // FIXME: validate components per validation.IsValidPathSegmentName?
+ path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return err
+ }
+ // Note: This does absolutely no kind/version checking or conversions.
+ var is imageStream
+ if err := json.Unmarshal(body, &is); err != nil {
+ return err
+ }
+ var te *tagEvent
+ for _, tag := range is.Status.Tags {
+ if tag.Tag != s.client.ref.dockerReference.Tag() {
+ continue
+ }
+ if len(tag.Items) > 0 {
+ te = &tag.Items[0]
+ break
+ }
+ }
+ if te == nil {
+ return errors.New("No matching tag found")
+ }
+ logrus.Debugf("tag event %#v", te)
+ dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("Resolved reference %#v", dockerRefString)
+ dockerRef, err := docker.ParseReference("//" + dockerRefString)
+ if err != nil {
+ return err
+ }
+ d, err := dockerRef.NewImageSource(ctx, s.sys)
+ if err != nil {
+ return err
+ }
+ s.docker = d
+ s.imageStreamImageName = te.Image
+ return nil
+}
diff --git a/openshift/openshift_src_test.go b/openshift/openshift_src_test.go
new file mode 100644
index 0000000..0f88892
--- /dev/null
+++ b/openshift/openshift_src_test.go
@@ -0,0 +1,5 @@
+package openshift
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*openshiftImageSource)(nil)
diff --git a/openshift/openshift_transport.go b/openshift/openshift_transport.go
new file mode 100644
index 0000000..0ba435d
--- /dev/null
+++ b/openshift/openshift_transport.go
@@ -0,0 +1,153 @@
+package openshift
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ genericImage "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for OpenShift registry-hosted images.
+var Transport = openshiftTransport{}
+
+type openshiftTransport struct{}
+
+func (t openshiftTransport) Name() string {
+ return "atomic"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// Note that imageNameRegexp is namespace/stream:tag, this
+// is HOSTNAME/namespace/stream:tag or parent prefixes.
+// Keep this in sync with imageNameRegexp!
+var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if scopeRegexp.FindStringIndex(scope) == nil {
+ return fmt.Errorf("Invalid scope name %s", scope)
+ }
+ return nil
+}
+
+// openshiftReference is an ImageReference for OpenShift images.
+type openshiftReference struct {
+ dockerReference reference.NamedTagged
+ namespace string // Computed from dockerReference in advance.
+ stream string // Computed from dockerReference in advance.
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
+func ParseReference(ref string) (types.ImageReference, error) {
+ r, err := reference.ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
+ }
+ tagged, ok := r.(reference.NamedTagged)
+ if !ok {
+ return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
+ }
+ return NewReference(tagged)
+}
+
+// NewReference returns an OpenShift reference for a reference.NamedTagged
+func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
+ r := strings.SplitN(reference.Path(dockerRef), "/", 3)
+ if len(r) != 2 {
+ return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
+ reference.FamiliarString(dockerRef))
+ }
+ return openshiftReference{
+ namespace: r[0],
+ stream: r[1],
+ dockerReference: dockerRef,
+ }, nil
+}
+
+func (ref openshiftReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref openshiftReference) StringWithinTransport() string {
+ return reference.FamiliarString(ref.dockerReference)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref openshiftReference) DockerReference() reference.Named {
+ return ref.dockerReference
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref openshiftReference) PolicyConfigurationIdentity() string {
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
+ return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference)
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return genericImage.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, sys, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for atomic: images")
+}
diff --git a/openshift/openshift_transport_test.go b/openshift/openshift_transport_test.go
new file mode 100644
index 0000000..af8dfd8
--- /dev/null
+++ b/openshift/openshift_transport_test.go
@@ -0,0 +1,130 @@
+package openshift
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256digest = "@sha256:" + sha256digestHex
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "atomic", Transport.Name())
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "registry.example.com/ns/stream" + sha256digest,
+ "registry.example.com/ns/stream:notlatest",
+ "registry.example.com/ns/stream",
+ "registry.example.com/ns",
+ "registry.example.com",
+ "*.example.com",
+ "*.com",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "registry.example.com/too/deep/hierarchy",
+ "registry.example.com/ns/stream:tag1:tag2",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestNewReference(t *testing.T) {
+ // too many ns
+ r, err := reference.ParseNormalizedNamed("registry.example.com/ns1/ns2/ns3/stream:tag")
+ require.NoError(t, err)
+ tagged, ok := r.(reference.NamedTagged)
+ require.True(t, ok)
+ _, err = NewReference(tagged)
+ assert.Error(t, err)
+
+ r, err = reference.ParseNormalizedNamed("registry.example.com/ns/stream:tag")
+ require.NoError(t, err)
+ tagged, ok = r.(reference.NamedTagged)
+ require.True(t, ok)
+ _, err = NewReference(tagged)
+ assert.NoError(t, err)
+}
+
+func TestParseReference(t *testing.T) {
+ // Success
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ osRef, ok := ref.(openshiftReference)
+ require.True(t, ok)
+ assert.Equal(t, "ns", osRef.namespace)
+ assert.Equal(t, "stream", osRef.stream)
+ assert.Equal(t, "notlatest", osRef.dockerReference.Tag())
+ assert.Equal(t, "registry.example.com:8443", reference.Domain(osRef.dockerReference))
+
+ // Components creating an invalid Docker Reference name
+ _, err = ParseReference("registry.example.com/ns/UPPERCASEISINVALID:notlatest")
+ assert.Error(t, err)
+
+ _, err = ParseReference("registry.example.com/ns/stream:invalid!tag@value=")
+ assert.Error(t, err)
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ dockerRef := ref.DockerReference()
+ require.NotNil(t, dockerRef)
+ assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", dockerRef.String())
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.StringWithinTransport())
+ // We should do one more round to verify that the output can be parsed, to an equal value,
+ // but that is untested because it depends on per-user configuration.
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ assert.Equal(t, []string{
+ "registry.example.com:8443/ns/stream",
+ "registry.example.com:8443/ns",
+ "registry.example.com:8443",
+ "*.example.com",
+ "*.com",
+ }, ref.PolicyConfigurationNamespaces())
+}
+
+// openshiftReference.NewImage, openshiftReference.NewImageSource, openshiftReference.NewImageDestination untested because they depend
+// on per-user configuration when initializing httpClient.
+
+func TestReferenceDeleteImage(t *testing.T) {
+ ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
+ require.NoError(t, err)
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
diff --git a/openshift/testdata/admin.kubeconfig b/openshift/testdata/admin.kubeconfig
new file mode 100644
index 0000000..8f23477
--- /dev/null
+++ b/openshift/testdata/admin.kubeconfig
@@ -0,0 +1,20 @@
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: Q2x1c3RlciBDQQ==
+ server: https://172.17.0.2:8443
+ name: 172-17-0-2:8443
+contexts:
+- context:
+ cluster: 172-17-0-2:8443
+ namespace: default
+ user: system:admin/172-17-0-2:8443
+ name: default/172-17-0-2:8443/system:admin
+current-context: default/172-17-0-2:8443/system:admin
+kind: Config
+preferences: {}
+users:
+- name: system:admin/172-17-0-2:8443
+ user:
+ client-certificate-data: Q2xpZW50IGNlcnQ=
+ client-key-data: Q2xpZW50IGtleQ==
diff --git a/ostree/ostree_dest.go b/ostree/ostree_dest.go
new file mode 100644
index 0000000..d00a0cd
--- /dev/null
+++ b/ostree/ostree_dest.go
@@ -0,0 +1,517 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/klauspost/pgzip"
+ "github.com/opencontainers/go-digest"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+ "github.com/ostreedev/ostree-go/pkg/otbuiltin"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include <stdlib.h>
+// #include <ostree.h>
+// #include <gio/ginputstream.h>
+// #include <selinux/selinux.h>
+// #include <selinux/label.h>
+import "C"
+
+type blobToImport struct {
+ Size int64
+ Digest digest.Digest
+ BlobPath string
+}
+
+type descriptor struct {
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+}
+
+type fsLayersSchema1 struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+type manifestSchema struct {
+ LayersDescriptors []descriptor `json:"layers"`
+ FSLayers []fsLayersSchema1 `json:"fsLayers"`
+}
+
+type ostreeImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoPutBlobPartialInitialize
+ stubs.AlwaysSupportsSignatures
+
+ ref ostreeReference
+ manifest string
+ schema manifestSchema
+ tmpDirPath string
+ blobs map[string]*blobToImport
+ digest digest.Digest
+ signaturesLen int
+ repo *C.struct_OstreeRepo
+}
+
+// newImageDestination returns an ImageDestination for writing to an existing ostree.
+func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
+ tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
+ if err := ensureDirectoryExists(tmpDirPath); err != nil {
+ return nil, err
+ }
+ d := &ostreeImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
+ HasThreadSafePutBlob: false,
+ }),
+ NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
+
+ ref: ref,
+ manifest: "",
+ schema: manifestSchema{},
+ tmpDirPath: tmpDirPath,
+ blobs: map[string]*blobToImport{},
+ digest: "",
+ signaturesLen: 0,
+ repo: nil,
+ }
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *ostreeImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *ostreeImageDestination) Close() error {
+ if d.repo != nil {
+ C.g_object_unref(C.gpointer(d.repo))
+ }
+ return os.RemoveAll(d.tmpDirPath)
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ blobPath := filepath.Join(tmpDir, "content")
+ blobFile, err := os.Create(blobPath)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ defer blobFile.Close()
+
+ digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(blobFile, stream)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+ blobDigest := digester.Digest()
+ if inputInfo.Size != -1 && size != inputInfo.Size {
+ return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
+ }
+ if err := blobFile.Sync(); err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ hash := blobDigest.Hex()
+ d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
+ return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
+}
+
+func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range entries {
+ fullpath := filepath.Join(dir, entry.Name())
+ if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+ if err := os.Remove(fullpath); err != nil {
+ return err
+ }
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ return err
+ }
+ if selinuxHnd != nil {
+ relPath, err := filepath.Rel(root, fullpath)
+ if err != nil {
+ return err
+ }
+ // Handle /exports/hostfs as a special case. Files under this directory are copied to the host,
+ // thus we benefit from maintaining the same SELinux label they would have on the host as we could
+ // use hard links instead of copying the files.
+ relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/"))
+
+ relPathC := C.CString(relPath)
+ defer C.free(unsafe.Pointer(relPathC))
+ var context *C.char
+
+ res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
+ if int(res) < 0 && err != syscall.ENOENT {
+ return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
+ }
+ if int(res) == 0 {
+ defer C.freecon(context)
+ fullpathC := C.CString(fullpath)
+ defer C.free(unsafe.Pointer(fullpathC))
+ res, err = C.lsetfilecon_raw(fullpathC, context)
+ if int(res) < 0 {
+ return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
+ }
+ }
+ }
+
+ if entry.IsDir() {
+ if usermode {
+ if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
+ return err
+ }
+ }
+ err = fixFiles(selinuxHnd, root, fullpath, usermode)
+ if err != nil {
+ return err
+ }
+ } else if usermode && (entry.Type().IsRegular()) {
+ if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error {
+ opts := otbuiltin.NewCommitOptions()
+ opts.AddMetadataString = metadata
+ opts.Timestamp = time.Now()
+ // OCI layers have no parent OSTree commit
+ opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
+ _, err := repo.Commit(root, branch, opts)
+ return err
+}
+
+func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
+ mfz := pgzip.NewWriter(output)
+ defer mfz.Close()
+ metaPacker := storage.NewJSONPacker(mfz)
+
+ stream, err := os.OpenFile(file, os.O_RDONLY, 0)
+ if err != nil {
+ return "", -1, err
+ }
+ defer stream.Close()
+
+ gzReader, err := archive.DecompressStream(stream)
+ if err != nil {
+ return "", -1, err
+ }
+ defer gzReader.Close()
+
+ its, err := asm.NewInputTarStream(gzReader, metaPacker, nil)
+ if err != nil {
+ return "", -1, err
+ }
+
+ digester := digest.Canonical.Digester()
+
+ written, err := io.Copy(digester.Hash(), its)
+ if err != nil {
+ return "", -1, err
+ }
+
+ return digester.Digest(), written, nil
+}
+
+func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
+ // TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
+
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+ destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
+ if err := ensureDirectoryExists(destinationPath); err != nil {
+ return err
+ }
+ defer func() {
+ os.Remove(blob.BlobPath)
+ os.RemoveAll(destinationPath)
+ }()
+
+ var tarSplitOutput bytes.Buffer
+ uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath)
+ if err != nil {
+ return err
+ }
+
+ if os.Getuid() == 0 {
+ if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
+ return err
+ }
+ if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil {
+ return err
+ }
+ } else {
+ os.MkdirAll(destinationPath, 0755)
+ if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil {
+ return err
+ }
+
+ if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil {
+ return err
+ }
+ }
+ return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size),
+ fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize),
+ fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()),
+ fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))})
+
+}
+
+func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
+ ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
+ destinationPath := filepath.Dir(blob.BlobPath)
+
+ return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ if d.repo == nil {
+ repo, err := openRepo(d.ref.repo)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ d.repo = repo
+ }
+ branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
+
+ found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
+ if err != nil || !found {
+ return found, private.ReusedBlob{}, err
+ }
+
+ found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
+ if err != nil || !found {
+ return found, private.ReusedBlob{}, err
+ }
+
+ found, data, err = readMetadata(d.repo, branch, "docker.size")
+ if err != nil || !found {
+ return found, private.ReusedBlob{}, err
+ }
+
+ size, err := strconv.ParseInt(data, 10, 64)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+
+ return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
+}
+
+// PutManifest writes manifest to the destination.
+// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
+// there can be no secondary manifests.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
+ d.manifest = string(manifestBlob)
+
+ if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
+ return err
+ }
+
+ manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath())
+ if err := ensureParentDirectoryExists(manifestPath); err != nil {
+ return err
+ }
+
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ d.digest = digest
+
+ return os.WriteFile(manifestPath, manifestBlob, 0644)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ if instanceDigest != nil {
+ return errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
+ path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
+ if err := ensureParentDirectoryExists(path); err != nil {
+ return err
+ }
+
+ for i, sig := range signatures {
+ signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
+ blob, err := signature.Blob(sig)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
+ return err
+ }
+ }
+ d.signaturesLen = len(signatures)
+ return nil
+}
+
+func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ repo, err := otbuiltin.OpenRepo(d.ref.repo)
+ if err != nil {
+ return err
+ }
+
+ _, err = repo.PrepareTransaction()
+ if err != nil {
+ return err
+ }
+
+ var selinuxHnd *C.struct_selabel_handle
+
+ if os.Getuid() == 0 && selinux.GetEnabled() {
+ selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
+ if selinuxHnd == nil {
+ return fmt.Errorf("cannot open the SELinux DB: %w", err)
+ }
+
+ defer C.selabel_close(selinuxHnd)
+ }
+
+ checkLayer := func(hash string) error {
+ blob := d.blobs[hash]
+ // if the blob is not present in d.blobs then it is already stored in OSTree,
+ // and we don't need to import it.
+ if blob == nil {
+ return nil
+ }
+ err := d.importBlob(selinuxHnd, repo, blob)
+ if err != nil {
+ return err
+ }
+
+ delete(d.blobs, hash)
+ return nil
+ }
+ for _, layer := range d.schema.LayersDescriptors {
+ hash := layer.Digest.Hex()
+ if err = checkLayer(hash); err != nil {
+ return err
+ }
+ }
+ for _, layer := range d.schema.FSLayers {
+ hash := layer.BlobSum.Hex()
+ if err = checkLayer(hash); err != nil {
+ return err
+ }
+ }
+
+ // Import the other blobs that are not layers
+ for _, blob := range d.blobs {
+ err := d.importConfig(repo, blob)
+ if err != nil {
+ return err
+ }
+ }
+
+ manifestPath := filepath.Join(d.tmpDirPath, "manifest")
+
+ metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
+ fmt.Sprintf("signatures=%d", d.signaturesLen),
+ fmt.Sprintf("docker.digest=%s", string(d.digest))}
+ if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil {
+ return err
+ }
+
+ _, err = repo.CommitTransaction()
+ return err
+}
+
+func ensureDirectoryExists(path string) error {
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ensureParentDirectoryExists(path string) error {
+ return ensureDirectoryExists(filepath.Dir(path))
+}
diff --git a/ostree/ostree_dest_test.go b/ostree/ostree_dest_test.go
new file mode 100644
index 0000000..ccbf3c5
--- /dev/null
+++ b/ostree/ostree_dest_test.go
@@ -0,0 +1,10 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import (
+ "github.com/containers/image/v5/internal/private"
+)
+
+var _ private.ImageDestination = (*ostreeImageDestination)(nil)
diff --git a/ostree/ostree_src.go b/ostree/ostree_src.go
new file mode 100644
index 0000000..9983acc
--- /dev/null
+++ b/ostree/ostree_src.go
@@ -0,0 +1,450 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/klauspost/pgzip"
+ digest "github.com/opencontainers/go-digest"
+ glib "github.com/ostreedev/ostree-go/pkg/glibobject"
+ "github.com/vbatts/tar-split/tar/asm"
+ "github.com/vbatts/tar-split/tar/storage"
+)
+
+// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
+// #include <glib.h>
+// #include <glib-object.h>
+// #include <gio/gio.h>
+// #include <stdlib.h>
+// #include <ostree.h>
+// #include <gio/ginputstream.h>
+import "C"
+
+type ostreeImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
+ ref ostreeReference
+ tmpDir string
+ repo *C.struct_OstreeRepo
+ // get the compressed layer by its uncompressed checksum
+ compressed map[digest.Digest]digest.Digest
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
+ s := &ostreeImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ tmpDir: tmpDir,
+ compressed: nil,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *ostreeImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *ostreeImageSource) Close() error {
+ if s.repo != nil {
+ C.g_object_unref(C.gpointer(s.repo))
+ }
+ return nil
+}
+
+func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
+ var metadataKey string
+ if isCompressed {
+ metadataKey = "docker.uncompressed_size"
+ } else {
+ metadataKey = "docker.size"
+ }
+ b := fmt.Sprintf("ociimage/%s", blob)
+ found, data, err := readMetadata(s.repo, b, metadataKey)
+ if err != nil || !found {
+ return 0, err
+ }
+ return strconv.ParseInt(data, 10, 64)
+}
+
+func (s *ostreeImageSource) getLenSignatures() (int64, error) {
+ b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+ found, data, err := readMetadata(s.repo, b, "signatures")
+ if err != nil {
+ return -1, err
+ }
+ if !found {
+ // if 'signatures' is not present, just return 0 signatures.
+ return 0, nil
+ }
+ return strconv.ParseInt(data, 10, 64)
+}
+
+func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
+ b := fmt.Sprintf("ociimage/%s", blob)
+ found, out, err := readMetadata(s.repo, b, "tarsplit.output")
+ if err != nil || !found {
+ return nil, err
+ }
+ return base64.StdEncoding.DecodeString(out)
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be non-default instances.
+func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, "", err
+ }
+ s.repo = repo
+ }
+
+ b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+ found, out, err := readMetadata(s.repo, b, "docker.manifest")
+ if err != nil {
+ return nil, "", err
+ }
+ if !found {
+ return nil, "", errors.New("manifest not found")
+ }
+ m := []byte(out)
+ return m, manifest.GuessMIMEType(m), nil
+}
+
+func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return nil, "", errors.New("manifest lists are not supported by this transport")
+}
+
+func openRepo(path string) (*C.struct_OstreeRepo, error) {
+ var cerr *C.GError
+ cpath := C.CString(path)
+ defer C.free(unsafe.Pointer(cpath))
+ pathc := C.g_file_new_for_path(cpath)
+ defer C.g_object_unref(C.gpointer(pathc))
+ repo := C.ostree_repo_new(pathc)
+ r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
+ if !r {
+ C.g_object_unref(C.gpointer(repo))
+ return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ return repo, nil
+}
+
+type ostreePathFileGetter struct {
+ repo *C.struct_OstreeRepo
+ parentRoot *C.GFile
+}
+
+type ostreeReader struct {
+ stream *C.GFileInputStream
+}
+
+func (o ostreeReader) Close() error {
+ C.g_object_unref(C.gpointer(o.stream))
+ return nil
+}
+func (o ostreeReader) Read(p []byte) (int, error) {
+ var cerr *C.GError
+ instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type())
+ stream := (*C.GInputStream)(unsafe.Pointer(instanceCast))
+
+ b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr)
+ if b == nil {
+ return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ defer C.g_bytes_unref(b)
+
+ count := int(C.g_bytes_get_size(b))
+ if count == 0 {
+ return 0, io.EOF
+ }
+ data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
+ copy(p, data)
+ return count, nil
+}
+
+func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) {
+ var cerr *C.GError
+ var ref *C.char
+ defer C.free(unsafe.Pointer(ref))
+
+ cCommit := C.CString(commit)
+ defer C.free(unsafe.Pointer(cCommit))
+
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) {
+ return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ if ref == nil {
+ return false, "", nil
+ }
+
+ var variant *C.GVariant
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) {
+ return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+ defer C.g_variant_unref(variant)
+ if variant != nil {
+ cKey := C.CString(key)
+ defer C.free(unsafe.Pointer(cKey))
+
+ metadata := C.g_variant_get_child_value(variant, 0)
+ defer C.g_variant_unref(metadata)
+
+ data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil)
+ if data != nil {
+ defer C.g_variant_unref(data)
+ ptr := (*C.char)(C.g_variant_get_string(data, nil))
+ val := C.GoString(ptr)
+ return true, val, nil
+ }
+ }
+ return false, "", nil
+}
+
+func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) {
+ var cerr *C.GError
+ var parentRoot *C.GFile
+ cCommit := C.CString(commit)
+ defer C.free(unsafe.Pointer(cCommit))
+ if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) {
+ return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ C.g_object_ref(C.gpointer(repo))
+
+ return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil
+}
+
+func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
+ var file *C.GFile
+ if strings.HasPrefix(filename, "./") {
+ filename = filename[2:]
+ }
+ cfilename := C.CString(filename)
+ defer C.free(unsafe.Pointer(cfilename))
+
+ file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename))
+
+ var cerr *C.GError
+ stream := C.g_file_read(file, nil, &cerr)
+ if stream == nil {
+ return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
+ }
+
+ return &ostreeReader{stream: stream}, nil
+}
+
+func (o ostreePathFileGetter) Close() {
+ C.g_object_unref(C.gpointer(o.repo))
+ C.g_object_unref(C.gpointer(o.parentRoot))
+}
+
+func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) {
+ getter, err := newOSTreePathFileGetter(s.repo, commit)
+ if err != nil {
+ return nil, err
+ }
+ defer getter.Close()
+
+ return getter.Get(path)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+
+ blob := info.Digest.Hex()
+
+ // Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
+ if s.compressed == nil {
+ _, err := s.LayerInfosForCopy(ctx, nil)
+ if err != nil {
+ return nil, -1, err
+ }
+
+ }
+ compressedBlob, isCompressed := s.compressed[info.Digest]
+ if isCompressed {
+ blob = compressedBlob.Hex()
+ }
+ branch := fmt.Sprintf("ociimage/%s", blob)
+
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, 0, err
+ }
+ s.repo = repo
+ }
+
+ layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ tarsplit, err := s.getTarSplitData(blob)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // if tarsplit is nil we are looking at the manifest. Return directly the file in /content
+ if tarsplit == nil {
+ file, err := s.readSingleFile(branch, "/content")
+ if err != nil {
+ return nil, 0, err
+ }
+ return file, layerSize, nil
+ }
+
+ mf := bytes.NewReader(tarsplit)
+ mfz, err := pgzip.NewReader(mf)
+ if err != nil {
+ return nil, 0, err
+ }
+ metaUnpacker := storage.NewJSONUnpacker(mfz)
+
+ getter, err := newOSTreePathFileGetter(s.repo, branch)
+ if err != nil {
+ mfz.Close()
+ return nil, 0, err
+ }
+
+ ots := asm.NewOutputTarStream(getter, metaUnpacker)
+
+ rc := ioutils.NewReadCloserWrapper(ots, func() error {
+ getter.Close()
+ mfz.Close()
+ return ots.Close()
+ })
+ return rc, layerSize, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ if instanceDigest != nil {
+ return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+ lenSignatures, err := s.getLenSignatures()
+ if err != nil {
+ return nil, err
+ }
+ branch := fmt.Sprintf("ociimage/%s", s.ref.branchName)
+
+ if s.repo == nil {
+ repo, err := openRepo(s.ref.repo)
+ if err != nil {
+ return nil, err
+ }
+ s.repo = repo
+ }
+
+ signatures := []signature.Signature{}
+ for i := int64(1); i <= lenSignatures; i++ {
+ path := fmt.Sprintf("/signature-%d", i)
+ sigReader, err := s.readSingleFile(branch, path)
+ if err != nil {
+ return nil, err
+ }
+ defer sigReader.Close()
+
+ sigBlob, err := io.ReadAll(sigReader)
+ if err != nil {
+ return nil, err
+ }
+ sig, err := signature.FromBlob(sigBlob)
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature %q: %w", path, err)
+ }
+ signatures = append(signatures, sig)
+ }
+ return signatures, nil
+}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
+// as the primary manifest can not be a list, so there can be secondary manifests.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ if instanceDigest != nil {
+ return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
+ }
+
+ updatedBlobInfos := []types.BlobInfo{}
+ manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ man, err := manifest.FromBlob(manifestBlob, manifestType)
+
+ s.compressed = make(map[digest.Digest]digest.Digest)
+
+ layerBlobs := man.LayerInfos()
+
+ for _, layerBlob := range layerBlobs {
+ branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
+ found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
+ if err != nil || !found {
+ return nil, err
+ }
+
+ found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size")
+ if err != nil || !found {
+ return nil, err
+ }
+
+ uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ uncompressedDigest := digest.Digest(uncompressedDigestStr)
+ blobInfo := types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: uncompressedSize,
+ MediaType: layerBlob.MediaType,
+ }
+ s.compressed[uncompressedDigest] = layerBlob.Digest
+ updatedBlobInfos = append(updatedBlobInfos, blobInfo)
+ }
+ return updatedBlobInfos, nil
+}
diff --git a/ostree/ostree_src_test.go b/ostree/ostree_src_test.go
new file mode 100644
index 0000000..60c35e6
--- /dev/null
+++ b/ostree/ostree_src_test.go
@@ -0,0 +1,8 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*ostreeImageSource)(nil)
diff --git a/ostree/ostree_transport.go b/ostree/ostree_transport.go
new file mode 100644
index 0000000..d83f85b
--- /dev/null
+++ b/ostree/ostree_transport.go
@@ -0,0 +1,242 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/regexp"
+)
+
+const defaultOSTreeRepo = "/ostree/repo"
+
+// Transport is an ImageTransport for ostree paths.
+var Transport = ostreeTransport{}
+
+type ostreeTransport struct{}
+
+func (t ostreeTransport) Name() string {
+ return "ostree"
+}
+
+func init() {
+ transports.Register(Transport)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
+ sep := strings.Index(scope, ":")
+ if sep < 0 {
+ return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
+ }
+ repo := scope[:sep]
+
+ if !strings.HasPrefix(repo, "/") {
+ return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
+ }
+ cleaned := filepath.Clean(repo)
+ if cleaned != repo {
+ return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
+ }
+
+ // FIXME? In the namespaces within a repo,
+ // we could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// ostreeReference is an ImageReference for ostree paths.
+type ostreeReference struct {
+ image string
+ branchName string
+ repo string
+}
+
+type ostreeImageCloser struct {
+ types.ImageCloser
+ size int64
+}
+
+func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
+ var repo = ""
+ image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
+ if !gotRepoPart {
+ repo = defaultOSTreeRepo
+ } else {
+ repo = "/" + repoPart
+ }
+
+ return NewReference(image, repo)
+}
+
+// NewReference returns an OSTree reference for a specified repo and image.
+func NewReference(image string, repo string) (types.ImageReference, error) {
+ // image is not _really_ in a containers/image/docker/reference format;
+ // as far as the libOSTree ociimage/* namespace is concerned, it is more or
+ // less an arbitrary string with an implied tag.
+ // Parse the image using reference.ParseNormalizedNamed so that we can
+ // check whether the images has a tag specified and we can add ":latest" if needed
+ ostreeImage, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if reference.IsNameOnly(ostreeImage) {
+ image = image + ":latest"
+ }
+
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
+ if err != nil {
+ // With os.IsNotExist(err), the parent directory of repo is also not existent;
+ // that should ordinarily not happen, but it would be a bit weird to reject
+ // references which do not specify a repo just because the implicit defaultOSTreeRepo
+ // does not exist.
+ if os.IsNotExist(err) && repo == defaultOSTreeRepo {
+ resolved = repo
+ } else {
+ return nil, err
+ }
+ }
+ // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
+ // from being ambiguous with values of PolicyConfigurationIdentity.
+ if strings.Contains(resolved, ":") {
+ return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
+ }
+
+ return ostreeReference{
+ image: image,
+ branchName: encodeOStreeRef(image),
+ repo: resolved,
+ }, nil
+}
+
+func (ref ostreeReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref ostreeReference) StringWithinTransport() string {
+ return fmt.Sprintf("%s@%s", ref.image, ref.repo)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref ostreeReference) DockerReference() reference.Named {
+ return nil
+}
+
+func (ref ostreeReference) PolicyConfigurationIdentity() string {
+ return fmt.Sprintf("%s:%s", ref.repo, ref.image)
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
+ repo, _, gotTag := strings.Cut(ref.image, ":")
+ if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
+ panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
+ }
+ name := repo
+ res := []string{}
+ for {
+ res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
+
+ lastSlash := strings.LastIndex(name, "/")
+ if lastSlash == -1 {
+ break
+ }
+ name = name[:lastSlash]
+ }
+ return res
+}
+
+func (s *ostreeImageCloser) Size() (int64, error) {
+ return s.size, nil
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ var tmpDir string
+ if sys == nil || sys.OSTreeTmpDirPath == "" {
+ tmpDir = os.TempDir()
+ } else {
+ tmpDir = sys.OSTreeTmpDirPath
+ }
+ return newImageSource(tmpDir, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ var tmpDir string
+ if sys == nil || sys.OSTreeTmpDirPath == "" {
+ tmpDir = os.TempDir()
+ } else {
+ tmpDir = sys.OSTreeTmpDirPath
+ }
+ return newImageDestination(ref, tmpDir)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for ostree: images")
+}
+
+var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`)
+
+func encodeOStreeRef(in string) string {
+ var buffer bytes.Buffer
+ for i := range in {
+ sub := in[i : i+1]
+ if ostreeRefRegexp.MatchString(sub) {
+ buffer.WriteString(sub)
+ } else {
+ buffer.WriteString(fmt.Sprintf("_%02X", sub[0]))
+ }
+
+ }
+ return buffer.String()
+}
+
+// manifestPath returns a path for the manifest within a ostree using our conventions.
+func (ref ostreeReference) manifestPath() string {
+ return filepath.Join("manifest", "manifest.json")
+}
+
+// signaturePath returns a path for a signature within a ostree using our conventions.
+func (ref ostreeReference) signaturePath(index int) string {
+ return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1))
+}
diff --git a/ostree/ostree_transport_test.go b/ostree/ostree_transport_test.go
new file mode 100644
index 0000000..ae639ed
--- /dev/null
+++ b/ostree/ostree_transport_test.go
@@ -0,0 +1,305 @@
+//go:build containers_image_ostree
+// +build containers_image_ostree
+
+package ostree
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256digest = "@sha256:" + sha256digestHex
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "ostree", Transport.Name())
+}
+
+// A helper to replace $TMP in a repo path with a real temporary directory
+func withTmpDir(repo string, tmpDir string) string {
+ return strings.ReplaceAll(repo, "$TMP", tmpDir)
+}
+
+// A common list of repo suffixes to test for the various ImageReference methods.
+var repoSuffixes = []struct{ repoSuffix, resolvedRepo string }{
+ {"", "/ostree/repo"},
+ {"@/ostree/repo", "/ostree/repo"}, // /ostree/repo is accepted even if neither /ostree/repo nor /ostree exists, as a special case.
+ {"@$TMP/at@sign@repo", "$TMP/at@sign@repo"},
+ // Rejected as ambiguous: /repo:with:colons could either be an (/repo, with:colons) policy configuration identity, or a (/repo:with, colons) policy configuration namespace.
+ {"@$TMP/repo:with:colons", ""},
+}
+
+// A common list of cases for image name parsing and normalization
+var imageNameTestcases = []struct{ input, normalized, branchName string }{
+ {"busybox:notlatest", "busybox:notlatest", "busybox_3Anotlatest"}, // Explicit tag
+ {"busybox", "busybox:latest", "busybox_3Alatest"}, // Default tag
+ {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "docker.io_2Flibrary_2Fbusybox_3Alatest"}, // A hierarchical name
+ {"127.0.0.1:5000/busybox:latest", "127.0.0.1:5000/busybox:latest", "127.0.0.1_3A5000_2Fbusybox_3Alatest"}, // Port usage
+ {"busybox" + sha256digest, "busybox" + sha256digest, "busybox_40sha256_3A0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
+ {"UPPERCASEISINVALID", "", ""}, // Invalid input
+ {"busybox:invalid+tag", "", ""}, // Invalid tag value
+ {"busybox:tag:with:colons", "", ""}, // Multiple colons - treated as a tag which contains a colon, which is invalid
+ {"", "", ""}, // Empty input is rejected (invalid repository.Named)
+}
+
+func TestTransportParseReference(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range imageNameTestcases {
+ for _, suffix := range repoSuffixes {
+ fullInput := c.input + withTmpDir(suffix.repoSuffix, tmpDir)
+ ref, err := Transport.ParseReference(fullInput)
+ if c.normalized == "" || suffix.resolvedRepo == "" {
+ assert.Error(t, err, fullInput)
+ } else {
+ require.NoError(t, err, fullInput)
+ ostreeRef, ok := ref.(ostreeReference)
+ require.True(t, ok, fullInput)
+ assert.Equal(t, c.normalized, ostreeRef.image, fullInput)
+ assert.Equal(t, c.branchName, ostreeRef.branchName, fullInput)
+ assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, fullInput)
+ }
+ }
+ }
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "/etc:docker.io/library/busybox:notlatest", // This also demonstrates that two colons are interpreted as repo:name:tag.
+ "/etc:docker.io/library/busybox",
+ "/etc:docker.io/library",
+ "/etc:docker.io",
+ "/etc:repo",
+ "/this/does/not/exist:notlatest",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "/colon missing as a path-reference delimiter",
+ "relative/path:busybox",
+ "/double//slashes:busybox",
+ "/has/./dot:busybox",
+ "/has/dot/../dot:busybox",
+ "/trailing/slash/:busybox",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestNewReference(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range imageNameTestcases {
+ for _, suffix := range repoSuffixes {
+ if suffix.repoSuffix == "" {
+ continue
+ }
+ caseName := c.input + suffix.repoSuffix
+ ref, err := NewReference(c.input, withTmpDir(strings.TrimPrefix(suffix.repoSuffix, "@"), tmpDir))
+ if c.normalized == "" || suffix.resolvedRepo == "" {
+ assert.Error(t, err, caseName)
+ } else {
+ require.NoError(t, err, caseName)
+ ostreeRef, ok := ref.(ostreeReference)
+ require.True(t, ok, caseName)
+ assert.Equal(t, c.normalized, ostreeRef.image, caseName)
+ assert.Equal(t, c.branchName, ostreeRef.branchName, caseName)
+ assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, caseName)
+ }
+ }
+ }
+
+ for _, path := range []string{
+ "/",
+ "/etc",
+ tmpDir,
+ "relativepath",
+ tmpDir + "/thisdoesnotexist",
+ } {
+ _, err := NewReference("busybox", path)
+ require.NoError(t, err, path)
+ }
+
+ _, err := NewReference("busybox", tmpDir+"/thisparentdoesnotexist/something")
+ assert.Error(t, err)
+}
+
+// A common list of reference formats to test for the various ImageReference methods.
+var validReferenceTestCases = []struct{ input, stringWithinTransport, policyConfigurationIdentity string }{
+ {"busybox", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // Everything implied
+ {"busybox:latest@/ostree/repo", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // All implied values explicitly specified
+ {"example.com/ns/foo:bar@$TMP/non-DEFAULT", "example.com/ns/foo:bar@$TMP/non-DEFAULT", "$TMP/non-DEFAULT:example.com/ns/foo:bar"}, // All values explicitly specified, a hierarchical name
+ // A non-canonical path. Testing just one, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ {"busybox@$TMP/.", "busybox:latest@$TMP", "$TMP:busybox:latest"},
+ // "/" as a corner case
+ {"busybox@/", "busybox:latest@/", "/:busybox:latest"},
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
+ require.NoError(t, err, c.input)
+ stringRef := ref.StringWithinTransport()
+ assert.Equal(t, withTmpDir(c.stringWithinTransport, tmpDir), stringRef, c.input)
+ // Do one more round to verify that the output can be parsed, to an equal value.
+ ref2, err := Transport.ParseReference(stringRef)
+ require.NoError(t, err, c.input)
+ stringRef2 := ref2.StringWithinTransport()
+ assert.Equal(t, stringRef, stringRef2, c.input)
+ }
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
+ require.NoError(t, err, c.input)
+ dockerRef := ref.DockerReference()
+ assert.Nil(t, dockerRef, c.input)
+ }
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
+ require.NoError(t, err, c.input)
+ assert.Equal(t, withTmpDir(c.policyConfigurationIdentity, tmpDir), ref.PolicyConfigurationIdentity(), c.input)
+ }
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix),
+ // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are
+ // consistent.
+ for inputName, expectedNS := range map[string][]string{
+ "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"},
+ "example.com/repo": {"example.com/repo", "example.com"},
+ "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"},
+ "localhost/repo": {"localhost/repo", "localhost"},
+ "ns/repo": {"ns/repo", "ns"},
+ "repo": {"repo"},
+ } {
+ // Test with a known path which should exist. Test just one non-canonical
+ // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ for _, repoInput := range []string{tmpDir, tmpDir + "/./."} {
+ fullName := inputName + ":notlatest"
+ ref, err := NewReference(fullName, repoInput)
+ require.NoError(t, err, fullName)
+
+ identity := ref.PolicyConfigurationIdentity()
+ assert.Equal(t, tmpDir+":"+expectedNS[0]+":notlatest", identity, fullName)
+
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns, fullName)
+ require.Len(t, ns, len(expectedNS), fullName)
+ moreSpecific := identity
+ for i := range expectedNS {
+ assert.Equal(t, tmpDir+":"+expectedNS[i], ns[i], fmt.Sprintf("%s item %d", fullName, i))
+ assert.True(t, strings.HasPrefix(moreSpecific, ns[i]))
+ moreSpecific = ns[i]
+ }
+ }
+ }
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ _, err = ref.NewImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ defer src.Close()
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ otherTmpDir := t.TempDir()
+
+ for _, c := range []struct {
+ sys *types.SystemContext
+ tmpDir string
+ }{
+ {nil, os.TempDir()},
+ {&types.SystemContext{}, os.TempDir()},
+ {&types.SystemContext{OSTreeTmpDirPath: otherTmpDir}, otherTmpDir},
+ } {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ dest, err := ref.NewImageDestination(context.Background(), c.sys)
+ require.NoError(t, err)
+ ostreeDest, ok := dest.(*ostreeImageDestination)
+ require.True(t, ok)
+ assert.Equal(t, c.tmpDir+"/busybox_3Alatest", ostreeDest.tmpDirPath)
+ defer dest.Close()
+ }
+}
+
+func TestReferenceDeleteImage(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ ref, err := Transport.ParseReference(withTmpDir("busybox@$TMP/this-repo-does-not-exist", tmpDir))
+ require.NoError(t, err)
+ err = ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestEncodeOSTreeRef(t *testing.T) {
+ // Just a smoke test
+ assert.Equal(t, "busybox_3Alatest", encodeOStreeRef("busybox:latest"))
+}
+
+func TestReferenceManifestPath(t *testing.T) {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ ostreeRef, ok := ref.(ostreeReference)
+ require.True(t, ok)
+ assert.Equal(t, fmt.Sprintf("manifest%cmanifest.json", filepath.Separator), ostreeRef.manifestPath())
+}
+
+func TestReferenceSignaturePath(t *testing.T) {
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ ostreeRef, ok := ref.(ostreeReference)
+ require.True(t, ok)
+ for _, c := range []struct {
+ input int
+ suffix string
+ }{
+ {0, "-1"},
+ {42, "-43"},
+ } {
+ assert.Equal(t, fmt.Sprintf("manifest%csignature%s", filepath.Separator, c.suffix), ostreeRef.signaturePath(c.input), fmt.Sprintf("%d", c.input))
+ }
+}
diff --git a/pkg/blobcache/blobcache.go b/pkg/blobcache/blobcache.go
new file mode 100644
index 0000000..2bbf488
--- /dev/null
+++ b/pkg/blobcache/blobcache.go
@@ -0,0 +1,148 @@
+package blobcache
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ compressedNote = ".compressed"
+ decompressedNote = ".decompressed"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+//
+// Implements types.ImageReference.
+type BlobCache struct {
+ reference types.ImageReference
+ // WARNING: The contents of this directory may be accessed concurrently,
+ // both within this process and by multiple different processes
+ directory string
+ compress types.LayerCompression
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specified directory or a temporary directory.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) {
+ if directory == "" {
+ return nil, fmt.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
+ }
+ switch compress {
+ case types.Compress, types.Decompress, types.PreserveOriginal:
+ // valid value, accept it
+ default:
+ return nil, fmt.Errorf("unhandled LayerCompression value %v", compress)
+ }
+ return &BlobCache{
+ reference: ref,
+ directory: directory,
+ compress: compress,
+ }, nil
+}
+
+func (b *BlobCache) Transport() types.ImageTransport {
+ return b.reference.Transport()
+}
+
+func (b *BlobCache) StringWithinTransport() string {
+ return b.reference.StringWithinTransport()
+}
+
+func (b *BlobCache) DockerReference() reference.Named {
+ return b.reference.DockerReference()
+}
+
+func (b *BlobCache) PolicyConfigurationIdentity() string {
+ return b.reference.PolicyConfigurationIdentity()
+}
+
+func (b *BlobCache) PolicyConfigurationNamespaces() []string {
+ return b.reference.PolicyConfigurationNamespaces()
+}
+
+func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return b.reference.DeleteImage(ctx, sys)
+}
+
+// blobPath returns the path appropriate for storing a blob with digest.
+func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string {
+ baseName := digest.String()
+ if isConfig {
+ baseName += ".config"
+ }
+ return filepath.Join(b.directory, baseName)
+}
+
+// findBlob checks if we have a blob for info in cache (whether a config or not)
+// and if so, returns it path and size, and whether it was stored as a config.
+// It returns ("", -1, nil) if the blob is not
+func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) {
+ if info.Digest == "" {
+ return "", -1, false, nil
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ path := b.blobPath(info.Digest, isConfig)
+ fileInfo, err := os.Stat(path)
+ if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) {
+ return path, fileInfo.Size(), isConfig, nil
+ }
+ if !os.IsNotExist(err) {
+ return "", -1, false, fmt.Errorf("checking size: %w", err)
+ }
+ }
+
+ return "", -1, false, nil
+
+}
+
+func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ path, size, _, err := b.findBlob(blobinfo)
+ if err != nil {
+ return false, -1, err
+ }
+ if path != "" {
+ return true, size, nil
+ }
+ return false, -1, nil
+}
+
+func (b *BlobCache) Directory() string {
+ return b.directory
+}
+
+func (b *BlobCache) ClearCache() error {
+ f, err := os.Open(b.directory)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ return fmt.Errorf("error reading directory %q: %w", b.directory, err)
+ }
+ for _, name := range names {
+ pathname := filepath.Join(b.directory, name)
+ if err = os.RemoveAll(pathname); err != nil {
+ return fmt.Errorf("clearing cache for %q: %w", transports.ImageName(b), err)
+ }
+ }
+ return nil
+}
+
+func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, b)
+}
diff --git a/pkg/blobcache/blobcache_test.go b/pkg/blobcache/blobcache_test.go
new file mode 100644
index 0000000..fa20d00
--- /dev/null
+++ b/pkg/blobcache/blobcache_test.go
@@ -0,0 +1,250 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ cp "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ digest "github.com/opencontainers/go-digest"
+ specs "github.com/opencontainers/image-spec/specs-go"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ _ types.ImageReference = &BlobCache{}
+ _ types.ImageSource = &blobCacheSource{}
+ _ private.ImageSource = (*blobCacheSource)(nil)
+ _ types.ImageDestination = &blobCacheDestination{}
+ _ private.ImageDestination = (*blobCacheDestination)(nil)
+)
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if testing.Verbose() {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+// Create a layer containing a single file with the specified name (and its
+// name as its contents), compressed using the specified compression type, and
+// return the .
+func makeLayer(filename string, repeat int, compression archive.Compression) ([]byte, digest.Digest, error) {
+ var compressed, uncompressed bytes.Buffer
+ layer, err := archive.Generate(filename, strings.Repeat(filename, repeat))
+ if err != nil {
+ return nil, "", err
+ }
+ writer, err := archive.CompressStream(&compressed, compression)
+ if err != nil {
+ return nil, "", err
+ }
+ reader := io.TeeReader(layer, &uncompressed)
+ _, err = io.Copy(writer, reader)
+ writer.Close()
+ if err != nil {
+ return nil, "", err
+ }
+ return compressed.Bytes(), digest.FromBytes(uncompressed.Bytes()), nil
+}
+
+func TestBlobCache(t *testing.T) {
+ cacheDir := t.TempDir()
+
+ systemContext := types.SystemContext{BlobInfoCacheDir: "/dev/null/this/does/not/exist"}
+
+ for _, repeat := range []int{1, 10000} {
+ for _, desiredCompression := range []types.LayerCompression{types.PreserveOriginal, types.Compress, types.Decompress} {
+ for _, layerCompression := range []archive.Compression{archive.Uncompressed, archive.Gzip} {
+ // Create a layer with the specified layerCompression.
+ blobBytes, diffID, err := makeLayer(fmt.Sprintf("layer-content-%d", int(layerCompression)), repeat, layerCompression)
+ if err != nil {
+ t.Fatalf("error making layer: %v", err)
+ }
+ blobInfo := types.BlobInfo{
+ Digest: digest.FromBytes(blobBytes),
+ Size: int64(len(blobBytes)),
+ }
+ // Create a configuration that includes the diffID for the layer and not much else.
+ config := v1.Image{
+ RootFS: v1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{diffID},
+ },
+ }
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ t.Fatalf("error encoding image configuration: %v", err)
+ }
+ configInfo := types.BlobInfo{
+ Digest: digest.FromBytes(configBytes),
+ Size: int64(len(configBytes)),
+ }
+ // Create a manifest that uses this configuration and layer.
+ manifest := v1.Manifest{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ MediaType: v1.MediaTypeImageManifest,
+ Config: v1.Descriptor{
+ MediaType: v1.MediaTypeImageConfig,
+ Digest: configInfo.Digest,
+ Size: configInfo.Size,
+ },
+ Layers: []v1.Descriptor{{
+ MediaType: v1.MediaTypeImageLayer,
+ Digest: blobInfo.Digest,
+ Size: blobInfo.Size,
+ }},
+ }
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ t.Fatalf("error encoding image manifest: %v", err)
+ }
+ // Write this image to a "dir" destination with blob caching using this directory.
+ srcdir := t.TempDir()
+ srcRef, err := directory.NewReference(srcdir)
+ if err != nil {
+ t.Fatalf("error creating source image name reference for %q: %v", srcdir, err)
+ }
+ cachedSrcRef, err := NewBlobCache(srcRef, cacheDir, desiredCompression)
+ if err != nil {
+ t.Fatalf("failed to wrap reference in cache: %v", err)
+ }
+ destImage, err := cachedSrcRef.NewImageDestination(context.TODO(), nil)
+ if err != nil {
+ t.Fatalf("error opening source image for writing: %v", err)
+ }
+ _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(blobBytes), blobInfo, none.NoCache, false)
+ if err != nil {
+ t.Fatalf("error writing layer blob to source image: %v", err)
+ }
+ _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(configBytes), configInfo, none.NoCache, true)
+ if err != nil {
+ t.Fatalf("error writing config blob to source image: %v", err)
+ }
+ srcImage, err := srcRef.NewImageSource(context.TODO(), &systemContext)
+ if err != nil {
+ t.Fatalf("error opening source image: %v", err)
+ }
+ defer func() {
+ err := srcImage.Close()
+ if err != nil {
+ t.Fatalf("error closing source image: %v", err)
+ }
+ }()
+ err = destImage.PutManifest(context.TODO(), manifestBytes, nil)
+ if err != nil {
+ t.Fatalf("error writing manifest to source image: %v", err)
+ }
+ err = destImage.Commit(context.TODO(), image.UnparsedInstance(srcImage, nil))
+ if err != nil {
+ t.Fatalf("error committing source image: %v", err)
+ }
+ if err = destImage.Close(); err != nil {
+ t.Fatalf("error closing source image: %v", err)
+ }
+ // Check that the cache was populated.
+ cache, err := os.Open(cacheDir)
+ if err != nil {
+ t.Fatalf("error opening cache directory %q: %v", cacheDir, err)
+ }
+ defer cache.Close()
+ cachedNames, err := cache.Readdirnames(-1)
+ if err != nil {
+ t.Fatalf("error reading contents of cache directory %q: %v", cacheDir, err)
+ }
+ // Expect a layer blob, a config blob, and the manifest.
+ expected := 3
+ if layerCompression != archive.Uncompressed {
+ // Expect a compressed blob, an uncompressed blob, notes for each about the other, a config blob, and the manifest.
+ expected = 6
+ }
+ if len(cachedNames) != expected {
+ t.Fatalf("expected %d items in cache directory %q, got %d: %v", expected, cacheDir, len(cachedNames), cachedNames)
+ }
+ // Check that the blobs were all correctly stored.
+ for _, cachedName := range cachedNames {
+ if digest.Digest(cachedName).Validate() == nil {
+ cacheMember := filepath.Join(cacheDir, cachedName)
+ cacheMemberBytes, err := os.ReadFile(cacheMember)
+ if err != nil {
+ t.Fatalf("error reading cache member %q: %v", cacheMember, err)
+ }
+ if digest.FromBytes(cacheMemberBytes).String() != cachedName {
+ t.Fatalf("cache member %q was stored incorrectly!", cacheMember)
+ }
+ }
+ }
+ // Clear out anything in the source directory that probably isn't a manifest, so that we'll
+ // have to depend on the cached copies of some of the blobs.
+ srcNameDir, err := os.Open(srcdir)
+ if err != nil {
+ t.Fatalf("error opening source directory %q: %v", srcdir, err)
+ }
+ defer srcNameDir.Close()
+ srcNames, err := srcNameDir.Readdirnames(-1)
+ if err != nil {
+ t.Fatalf("error reading contents of source directory %q: %v", srcdir, err)
+ }
+ for _, name := range srcNames {
+ if !strings.HasPrefix(name, "manifest") {
+ os.Remove(filepath.Join(srcdir, name))
+ }
+ }
+ // Now that we've deleted some of the contents, try to copy from the source image
+ // to a second image. It should fail because the source is missing some blobs.
+ destdir := t.TempDir()
+ destRef, err := directory.NewReference(destdir)
+ if err != nil {
+ t.Fatalf("error creating destination image reference for %q: %v", destdir, err)
+ }
+ options := cp.Options{
+ SourceCtx: &systemContext,
+ DestinationCtx: &systemContext,
+ }
+ policyContext, err := signature.NewPolicyContext(&signature.Policy{
+ Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()},
+ })
+ if err != nil {
+ t.Fatalf("error creating signature policy context: %v", err)
+ }
+ _, err = cp.Image(context.TODO(), policyContext, destRef, srcRef, &options)
+ if err == nil {
+ t.Fatalf("expected an error copying the image, but got success")
+ } else {
+ if errors.Is(err, fs.ErrNotExist) {
+ t.Logf("ok: got expected does-not-exist error copying the image with blobs missing: %v", err)
+ } else {
+ t.Logf("got an error copying the image with missing blobs, but not sure which error: %v", err)
+ }
+ }
+ _, err = cp.Image(context.TODO(), policyContext, destRef, cachedSrcRef, &options)
+ if err != nil {
+ t.Fatalf("unexpected error copying the image using the cache: %v", err)
+ }
+ if err = cachedSrcRef.ClearCache(); err != nil {
+ t.Fatalf("error clearing cache: %v", err)
+ }
+ }
+ }
+ }
+}
diff --git a/pkg/blobcache/dest.go b/pkg/blobcache/dest.go
new file mode 100644
index 0000000..9bda085
--- /dev/null
+++ b/pkg/blobcache/dest.go
@@ -0,0 +1,295 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+type blobCacheDestination struct {
+ impl.Compat
+
+ reference *BlobCache
+ destination private.ImageDestination
+}
+
+func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ dest, err := b.reference.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, fmt.Errorf("error creating new image destination %q: %w", transports.ImageName(b.reference), err)
+ }
+ logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory)
+ d := &blobCacheDestination{reference: b, destination: imagedestination.FromPublic(dest)}
+ d.Compat = impl.AddCompat(d)
+ return d, nil
+}
+
+func (d *blobCacheDestination) Reference() types.ImageReference {
+ return d.reference
+}
+
+func (d *blobCacheDestination) Close() error {
+ logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
+ return d.destination.Close()
+}
+
+func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
+ return d.destination.SupportedManifestMIMETypes()
+}
+
+func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
+ return d.destination.SupportsSignatures(ctx)
+}
+
+func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.destination.DesiredLayerCompression()
+}
+
+func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
+ return d.destination.AcceptsForeignLayerURLs()
+}
+
+func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
+ return d.destination.MustMatchRuntimeOS()
+}
+
+func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.destination.IgnoresEmbeddedDockerReference()
+}
+
+// Decompress and save the contents of the decompressReader stream into the passed-in temporary
+// file. If we successfully save all of the data, rename the file to match the digest of the data,
+// and make notes about the relationship between the file that holds a copy of the compressed data
+// and this new file.
+func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
+ defer wg.Done()
+ // Decompress from and digest the reading end of that pipe.
+ decompressed, err3 := archive.DecompressStream(decompressReader)
+ digester := digest.Canonical.Digester()
+ if err3 == nil {
+ // Read the decompressed data through the filter over the pipe, blocking until the
+ // writing end is closed.
+ _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
+ } else {
+ // Drain the pipe to keep from stalling the PutBlob() thread.
+ if _, err := io.Copy(io.Discard, decompressReader); err != nil {
+ logrus.Debugf("error draining the pipe: %v", err)
+ }
+ }
+ decompressReader.Close()
+ decompressed.Close()
+ tempFile.Close()
+ // Determine the name that we should give to the uncompressed copy of the blob.
+ decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig)
+ if err3 == nil {
+ // Rename the temporary file.
+ if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
+ logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ } else {
+ *alternateDigest = digester.Digest()
+ // Note the relationship between the two files.
+ if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
+ }
+ if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
+ }
+ }
+ } else {
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ }
+}
+
+func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
+ return d.destination.HasThreadSafePutBlob()
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
+func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ var tempfile *os.File
+ var err error
+ var n int
+ var alternateDigest digest.Digest
+ var closer io.Closer
+ wg := new(sync.WaitGroup)
+ needToWait := false
+ compression := archive.Uncompressed
+ if inputInfo.Digest != "" {
+ filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig)
+ tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+ if err == nil {
+ stream = io.TeeReader(stream, tempfile)
+ defer func() {
+ if err == nil {
+ if err = os.Rename(tempfile.Name(), filename); err != nil {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ err = fmt.Errorf("error renaming new layer for blob %q into place at %q: %w", inputInfo.Digest.String(), filename, err)
+ }
+ } else {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ }
+ tempfile.Close()
+ }()
+ } else {
+ logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err)
+ }
+ if !options.IsConfig {
+ initial := make([]byte, 8)
+ n, err = stream.Read(initial)
+ if n > 0 {
+ // Build a Reader that will still return the bytes that we just
+ // read, for PutBlob()'s sake.
+ stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
+ if n >= len(initial) {
+ compression = archive.DetectCompression(initial[:n])
+ }
+ if compression == archive.Gzip {
+ // The stream is compressed, so create a file which we'll
+ // use to store a decompressed copy.
+ decompressedTemp, err2 := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+ if err2 != nil {
+ logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err2)
+ } else {
+ // Write a copy of the compressed data to a pipe,
+ // closing the writing end of the pipe after
+ // PutBlob() returns.
+ decompressReader, decompressWriter := io.Pipe()
+ closer = decompressWriter
+ stream = io.TeeReader(stream, decompressWriter)
+ // Let saveStream() close the reading end and handle the temporary file.
+ wg.Add(1)
+ needToWait = true
+ go d.saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, options.IsConfig, &alternateDigest)
+ }
+ }
+ }
+ }
+ }
+ newBlobInfo, err := d.destination.PutBlobWithOptions(ctx, stream, inputInfo, options)
+ if closer != nil {
+ closer.Close()
+ }
+ if needToWait {
+ wg.Wait()
+ }
+ if err != nil {
+ return newBlobInfo, fmt.Errorf("error storing blob to image destination for cache %q: %w", transports.ImageName(d.reference), err)
+ }
+ if alternateDigest.Validate() == nil {
+ logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
+ } else {
+ logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
+ }
+ return newBlobInfo, nil
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (d *blobCacheDestination) SupportsPutBlobPartial() bool {
+ return d.destination.SupportsPutBlobPartial()
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
+ return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)
+ if err != nil || present {
+ return present, reusedInfo, err
+ }
+
+ blobPath, _, isConfig, err := d.reference.findBlob(info)
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ defer f.Close()
+ uploadedInfo, err := d.destination.PutBlobWithOptions(ctx, f, info, private.PutBlobOptions{
+ Cache: options.Cache,
+ IsConfig: isConfig,
+ EmptyLayer: options.EmptyLayer,
+ LayerIndex: options.LayerIndex,
+ })
+ if err != nil {
+ return false, private.ReusedBlob{}, err
+ }
+ return true, private.ReusedBlob{Digest: uploadedInfo.Digest, Size: uploadedInfo.Size}, nil
+ }
+ }
+
+ return false, private.ReusedBlob{}, nil
+}
+
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error {
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
+ } else {
+ filename := d.reference.blobPath(manifestDigest, false)
+ if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
+ logrus.Warnf("error saving manifest as %q: %v", filename, err)
+ }
+ }
+ return d.destination.PutManifest(ctx, manifestBytes, instanceDigest)
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (d *blobCacheDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ return d.destination.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
+}
+
+func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ return d.destination.Commit(ctx, unparsedToplevel)
+}
diff --git a/pkg/blobcache/src.go b/pkg/blobcache/src.go
new file mode 100644
index 0000000..2fe108c
--- /dev/null
+++ b/pkg/blobcache/src.go
@@ -0,0 +1,270 @@
+package blobcache
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type blobCacheSource struct {
+ impl.Compat
+
+ reference *BlobCache
+ source private.ImageSource
+ sys types.SystemContext
+ // this mutex synchronizes the counters below
+ mu sync.Mutex
+ cacheHits int64
+ cacheMisses int64
+ cacheErrors int64
+}
+
+func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ src, err := b.reference.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, fmt.Errorf("error creating new image source %q: %w", transports.ImageName(b.reference), err)
+ }
+ logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress)
+ s := &blobCacheSource{reference: b, source: imagesource.FromPublic(src), sys: *sys}
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+func (s *blobCacheSource) Reference() types.ImageReference {
+ return s.reference
+}
+
+func (s *blobCacheSource) Close() error {
+ logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
+ return s.source.Close()
+}
+
+func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ filename := s.reference.blobPath(*instanceDigest, false)
+ manifestBytes, err := os.ReadFile(filename)
+ if err == nil {
+ s.cacheHits++
+ return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, "", fmt.Errorf("checking for manifest file: %w", err)
+ }
+ }
+ s.cacheMisses++
+ return s.source.GetManifest(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) HasThreadSafeGetBlob() bool {
+ return s.source.HasThreadSafeGetBlob()
+}
+
+func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ blobPath, size, _, err := s.reference.findBlob(blobinfo)
+ if err != nil {
+ return nil, -1, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ s.mu.Lock()
+ s.cacheHits++
+ s.mu.Unlock()
+ return f, size, nil
+ }
+ if !os.IsNotExist(err) {
+ s.mu.Lock()
+ s.cacheErrors++
+ s.mu.Unlock()
+ return nil, -1, fmt.Errorf("checking for cache: %w", err)
+ }
+ }
+ s.mu.Lock()
+ s.cacheMisses++
+ s.mu.Unlock()
+ rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
+ if err != nil {
+ return rc, size, fmt.Errorf("error reading blob from source image %q: %w", transports.ImageName(s.reference), err)
+ }
+ return rc, size, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *blobCacheSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ return s.source.GetSignaturesWithFormat(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("error checking if image %q has signatures: %w", transports.ImageName(s.reference), err)
+ }
+ canReplaceBlobs := len(signatures) == 0
+
+ infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("error getting layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err)
+ }
+ if infos == nil {
+ img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest))
+ if err != nil {
+ return nil, fmt.Errorf("error opening image to get layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err)
+ }
+ infos = img.LayerInfos()
+ }
+
+ if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
+ replacedInfos := make([]types.BlobInfo, 0, len(infos))
+ for _, info := range infos {
+ var replaceDigest []byte
+ var err error
+ blobFile := s.reference.blobPath(info.Digest, false)
+ var alternate string
+ switch s.reference.compress {
+ case types.Compress:
+ alternate = blobFile + compressedNote
+ replaceDigest, err = os.ReadFile(alternate)
+ case types.Decompress:
+ alternate = blobFile + decompressedNote
+ replaceDigest, err = os.ReadFile(alternate)
+ }
+ if err == nil && digest.Digest(replaceDigest).Validate() == nil {
+ alternate = s.reference.blobPath(digest.Digest(replaceDigest), false)
+ fileInfo, err := os.Stat(alternate)
+ if err == nil {
+ switch info.MediaType {
+ case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = v1.MediaTypeImageLayerGzip
+ info.CompressionAlgorithm = &compression.Gzip
+ case types.Decompress: // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
+ info.MediaType = v1.MediaTypeImageLayer
+ info.CompressionAlgorithm = nil
+ }
+ case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = manifest.DockerV2Schema2LayerMediaType
+ info.CompressionAlgorithm = &compression.Gzip
+ case types.Decompress:
+ // nope, not going to suggest anything, it's not allowed by the spec
+ replacedInfos = append(replacedInfos, info)
+ continue
+ }
+ }
+ logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String())
+ info.CompressionOperation = s.reference.compress
+ info.Digest = digest.Digest(replaceDigest)
+ info.Size = fileInfo.Size()
+ logrus.Debugf("info = %#v", info)
+ }
+ }
+ replacedInfos = append(replacedInfos, info)
+ }
+ infos = replacedInfos
+ }
+
+ return infos, nil
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *blobCacheSource) SupportsGetBlobAt() bool {
+ return s.source.SupportsGetBlobAt()
+}
+
+// streamChunksFromFile generates the channels returned by GetBlobAt for chunks of seekable file
+func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.ReadSeekCloser,
+ chunks []private.ImageSourceChunk) {
+ defer close(streams)
+ defer close(errs)
+ defer file.Close()
+
+ for _, c := range chunks {
+ // Always seek to the desired offset; that way we don’t need to care about the consumer
+ // not reading all of the chunk, or about the position going backwards.
+ if _, err := file.Seek(int64(c.Offset), io.SeekStart); err != nil {
+ errs <- err
+ break
+ }
+ s := signalCloseReader{
+ closed: make(chan struct{}),
+ stream: io.LimitReader(file, int64(c.Length)),
+ }
+ streams <- s
+
+ // Wait until the stream is closed before going to the next chunk
+ <-s.closed
+ }
+}
+
+type signalCloseReader struct {
+ closed chan struct{}
+ stream io.Reader
+}
+
+func (s signalCloseReader) Read(p []byte) (int, error) {
+ return s.stream.Read(p)
+}
+
+func (s signalCloseReader) Close() error {
+ close(s.closed)
+ return nil
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ blobPath, _, _, err := s.reference.findBlob(info)
+ if err != nil {
+ return nil, nil, err
+ }
+ if blobPath != "" {
+ f, err := os.Open(blobPath)
+ if err == nil {
+ s.mu.Lock()
+ s.cacheHits++
+ s.mu.Unlock()
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ go streamChunksFromFile(streams, errs, f, chunks)
+ return streams, errs, nil
+ }
+ if !os.IsNotExist(err) {
+ s.mu.Lock()
+ s.cacheErrors++
+ s.mu.Unlock()
+ return nil, nil, fmt.Errorf("checking for cache: %w", err)
+ }
+ }
+ s.mu.Lock()
+ s.cacheMisses++
+ s.mu.Unlock()
+ streams, errs, err := s.source.GetBlobAt(ctx, info, chunks)
+ if err != nil {
+ return streams, errs, fmt.Errorf("error reading blob chunks from source image %q: %w", transports.ImageName(s.reference), err)
+ }
+ return streams, errs, nil
+}
diff --git a/pkg/blobcache/src_test.go b/pkg/blobcache/src_test.go
new file mode 100644
index 0000000..a15498c
--- /dev/null
+++ b/pkg/blobcache/src_test.go
@@ -0,0 +1,56 @@
+package blobcache
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/stretchr/testify/assert"
+)
+
+func readNextStream(streams chan io.ReadCloser, errs chan error) ([]byte, error) {
+ select {
+ case r := <-streams:
+ if r == nil {
+ return nil, nil
+ }
+ defer r.Close()
+ return io.ReadAll(r)
+ case err := <-errs:
+ return nil, err
+ }
+}
+
+// readSeekerNopCloser adds a no-op Close() method to a readSeeker
+type readSeekerNopCloser struct {
+ io.ReadSeeker
+}
+
+func (c *readSeekerNopCloser) Close() error {
+ return nil
+}
+
+func TestStreamChunksFromFile(t *testing.T) {
+ file := &readSeekerNopCloser{bytes.NewReader([]byte("123456789"))}
+ streams := make(chan io.ReadCloser)
+ errs := make(chan error)
+ chunks := []private.ImageSourceChunk{
+ {Offset: 1, Length: 2},
+ {Offset: 4, Length: 1},
+ }
+ go streamChunksFromFile(streams, errs, file, chunks)
+
+ for _, c := range []struct {
+ expectedData []byte
+ expectedError error
+ }{
+ {[]byte("23"), nil},
+ {[]byte("5"), nil},
+ {[]byte(nil), nil},
+ } {
+ data, err := readNextStream(streams, errs)
+ assert.Equal(t, c.expectedData, data)
+ assert.Equal(t, c.expectedError, err)
+ }
+}
diff --git a/pkg/blobinfocache/boltdb/boltdb.go b/pkg/blobinfocache/boltdb/boltdb.go
new file mode 100644
index 0000000..4f66bce
--- /dev/null
+++ b/pkg/blobinfocache/boltdb/boltdb.go
@@ -0,0 +1,419 @@
+// Package boltdb implements a BlobInfoCache backed by BoltDB.
+package boltdb
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ bolt "go.etcd.io/bbolt"
+)
+
+var (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+
+ // FIXME: For CRI-O, does this need to hide information between different users?
+
+ // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
+ uncompressedDigestBucket = []byte("uncompressedDigest")
+ // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
+ // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
+ digestCompressorBucket = []byte("digestCompressor")
+ // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
+ // (as a set of key=digest, value="" pairs)
+ digestByUncompressedBucket = []byte("digestByUncompressed")
+ // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
+ // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
+ knownLocationsBucket = []byte("knownLocations")
+)
+
+// Concurrency:
+// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
+// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
+
+// pathLock contains a lock for a specific BoltDB database path.
+type pathLock struct {
+ refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
+ mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
+}
+
+var (
+ // pathLocks contains a lock for each currently open file.
+ // This must be global so that independently created instances of boltDBCache exclude each other.
+ // The map is protected by pathLocksMutex.
+ // FIXME? Should this be based on device:inode numbers instead of paths instead?
+ pathLocks = map[string]*pathLock{}
+ pathLocksMutex = sync.Mutex{}
+)
+
+// lockPath obtains the pathLock for path.
+// The caller must call unlockPath eventually.
+func lockPath(path string) {
+ pl := func() *pathLock { // A scope for defer
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if ok {
+ pl.refCount++
+ } else {
+ pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
+ pathLocks[path] = pl
+ }
+ return pl
+ }()
+ pl.mutex.Lock()
+}
+
+// unlockPath releases the pathLock for path.
+func unlockPath(path string) {
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if !ok {
+ // Should this return an error instead? BlobInfoCache ultimately ignores errors…
+ panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
+ }
+ pl.mutex.Unlock()
+ pl.refCount--
+ if pl.refCount == 0 {
+ delete(pathLocks, path)
+ }
+}
+
+// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+//
+// Note that we don’t keep the database open across operations, because that would lock the file and block any other
+// users; instead, we need to open/close it for every single write or lookup.
+type cache struct {
+ path string
+}
+
+// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
+//
+// Most users should call blobinfocache.DefaultCache instead.
+//
+// Deprecated: The BoltDB implementation triggers a panic() on some database format errors; that does not allow
+// practical error recovery / fallback.
+//
+// Use blobinfocache.DefaultCache if at all possible; if not, the pkg/blobinfocache/sqlite implementation.
+func New(path string) types.BlobInfoCache {
+ return new2(path)
+}
+func new2(path string) *cache {
+ return &cache{path: path}
+}
+
+// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+func (bdc *cache) Open() {
+}
+
+// Close destroys state created by Open().
+func (bdc *cache) Close() {
+}
+
+// view returns runs the specified fn within a read-only transaction on the database.
+func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+ // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
+ // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
+ // a read lock, blocking any future writes.
+ // Hence this preliminary check, which is RACY: Another process could remove the file
+ // between the Lstat call and opening the database.
+ if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
+ return err
+ }
+
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.View(fn)
+}
+
+// update returns runs the specified fn within a read-write transaction on the database.
+func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, nil)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.Update(fn)
+}
+
+// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
+func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+ if b := tx.Bucket(uncompressedDigestBucket); b != nil {
+ if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
+ d, err := digest.Parse(string(uncompressedBytes))
+ if err == nil {
+ return d
+ }
+ // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ }
+ // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if b := tx.Bucket(digestByUncompressedBucket); b != nil {
+ if b = b.Bucket([]byte(anyDigest.String())); b != nil {
+ c := b.Cursor()
+ if k, _ := c.First(); k != nil { // The bucket is non-empty
+ return anyDigest
+ }
+ }
+ }
+ return ""
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ var res digest.Digest
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ res = bdc.uncompressedDigest(tx, anyDigest)
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ previous, err := digest.Parse(string(previousBytes))
+ if err != nil {
+ return err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if err := b.Put(key, []byte(uncompressed.String())); err != nil {
+ return err
+ }
+
+ b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
+// compressor, or is blobinfocache.Uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ if string(previousBytes) != compressorName {
+ logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
+ }
+ }
+ if compressorName == blobinfocache.UnknownCompression {
+ return b.Delete(key)
+ }
+ return b.Put(key, []byte(compressorName))
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
+ if err != nil {
+ return err
+ }
+ value, err := time.Now().MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket
+// (which might be nil) with corresponding compression
+// info from compressionBucket (which might be nil), and returns the result of appending them
+// to candidates.
+// v2Output allows including candidates with unknown location, and filters out candidates
+// with unknown compression.
+func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
+ digestKey := []byte(digest.String())
+ compressorName := blobinfocache.UnknownCompression
+ if compressionBucket != nil {
+ // the bucket won't exist if the cache was created by a v1 implementation and
+ // hasn't yet been updated by a v2 implementation
+ if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
+ compressorName = string(compressorNameValue)
+ }
+ }
+ if compressorName == blobinfocache.UnknownCompression && v2Output {
+ return candidates
+ }
+ var b *bolt.Bucket
+ if scopeBucket != nil {
+ b = scopeBucket.Bucket(digestKey)
+ }
+ if b != nil {
+ _ = b.ForEach(func(k, v []byte) error {
+ t := time.Time{}
+ if err := t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressorName,
+ Location: types.BICLocationReference{Opaque: string(k)},
+ },
+ LastSeen: t,
+ })
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+ } else if v2Output {
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressorName,
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
+ },
+ LastSeen: time.Time{},
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
+ return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
+}
+
+func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+ res := []prioritize.CandidateWithTime{}
+ var uncompressedDigestValue digest.Digest // = ""
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ scopeBucket := tx.Bucket(knownLocationsBucket)
+ if scopeBucket != nil {
+ scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
+ }
+ if scopeBucket != nil {
+ scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
+ }
+ // compressionBucket won't have been created if previous writers never recorded info about compression,
+ // and we don't want to fail just because of that
+ compressionBucket := tx.Bucket(digestCompressorBucket)
+
+ res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Output)
+ if canSubstitute {
+ if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
+ b := tx.Bucket(digestByUncompressedBucket)
+ if b != nil {
+ b = b.Bucket([]byte(uncompressedDigestValue.String()))
+ if b != nil {
+ if err := b.ForEach(func(k, _ []byte) error {
+ d, err := digest.Parse(string(k))
+ if err != nil {
+ return err
+ }
+ if d != primaryDigest && d != uncompressedDigestValue {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Output)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ if uncompressedDigestValue != primaryDigest {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Output)
+ }
+ }
+ }
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
+}
diff --git a/pkg/blobinfocache/boltdb/boltdb_test.go b/pkg/blobinfocache/boltdb/boltdb_test.go
new file mode 100644
index 0000000..50b4784
--- /dev/null
+++ b/pkg/blobinfocache/boltdb/boltdb_test.go
@@ -0,0 +1,26 @@
+package boltdb
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/test"
+)
+
+var _ blobinfocache.BlobInfoCache2 = &cache{}
+
+func newTestCache(t *testing.T) blobinfocache.BlobInfoCache2 {
+ // We need a separate temporary directory here, because bolt.Open(…, &bolt.Options{Readonly:true}) can't deal with
+ // an existing but empty file, and incorrectly fails without releasing the lock - which in turn causes
+ // any future writes to hang. Creating a temporary directory allows us to use a path to a
+ // non-existent file, thus replicating the expected conditions for creating a new DB.
+ dir := t.TempDir()
+ return new2(filepath.Join(dir, "db"))
+}
+
+func TestNew(t *testing.T) {
+ test.GenericCache(t, newTestCache)
+}
+
+// FIXME: Tests for the various corner cases / failure cases of boltDBCache should be added here.
diff --git a/pkg/blobinfocache/default.go b/pkg/blobinfocache/default.go
new file mode 100644
index 0000000..037572b
--- /dev/null
+++ b/pkg/blobinfocache/default.go
@@ -0,0 +1,76 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/v5/internal/rootless"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/pkg/blobinfocache/sqlite"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // blobInfoCacheFilename is the file name used for blob info caches.
+ // If the format changes in an incompatible way, increase the version number.
+ blobInfoCacheFilename = "blob-info-cache-v1.sqlite"
+ // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
+ systemBlobInfoCacheDir = "/var/lib/containers/cache"
+)
+
+// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid.
+// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
+func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
+ if sys != nil && sys.BlobInfoCacheDir != "" {
+ return sys.BlobInfoCacheDir, nil
+ }
+
+ // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
+ // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
+ if euid == 0 {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
+ }
+ return systemBlobInfoCacheDir, nil
+ }
+
+ // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ dataDir = filepath.Join(home, ".local", "share")
+ }
+ return filepath.Join(dataDir, "containers", "cache"), nil
+}
+
+// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
+func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
+ dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
+ if err != nil {
+ logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
+ return memory.New()
+ }
+ path := filepath.Join(dir, blobInfoCacheFilename)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err)
+ return memory.New()
+ }
+
+ // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open
+ // as global singleton, for the vast majority of callers who don’t override thde cache location.
+ // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely,
+ // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%.
+
+ cache, err := sqlite.New(path)
+ if err != nil {
+ logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err)
+ return memory.New()
+ }
+ logrus.Debugf("Using SQLite blob info cache at %s", path)
+ return cache
+}
diff --git a/pkg/blobinfocache/default_test.go b/pkg/blobinfocache/default_test.go
new file mode 100644
index 0000000..41c32fe
--- /dev/null
+++ b/pkg/blobinfocache/default_test.go
@@ -0,0 +1,134 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/pkg/blobinfocache/sqlite"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBlobInfoCacheDir(t *testing.T) {
+ const nondefaultDir = "/this/is/not/the/default/cache/dir"
+ const rootPrefix = "/root/prefix"
+ const homeDir = "/fake/home/directory"
+ const xdgDataHome = "/fake/home/directory/XDG"
+
+ t.Setenv("HOME", homeDir)
+ t.Setenv("XDG_DATA_HOME", xdgDataHome)
+
+ // The default paths and explicit overrides
+ for _, c := range []struct {
+ sys *types.SystemContext
+ euid int
+ expected string
+ }{
+ // The common case
+ {nil, 0, systemBlobInfoCacheDir},
+ {nil, 1, filepath.Join(xdgDataHome, "containers", "cache")},
+ // There is a context, but it does not override the path.
+ {&types.SystemContext{}, 0, systemBlobInfoCacheDir},
+ {&types.SystemContext{}, 1, filepath.Join(xdgDataHome, "containers", "cache")},
+ // Path overridden
+ {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 0, nondefaultDir},
+ {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 1, nondefaultDir},
+ // Root overridden
+ {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 0, filepath.Join(rootPrefix, systemBlobInfoCacheDir)},
+ {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 1, filepath.Join(xdgDataHome, "containers", "cache")},
+ // Root and path overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ BlobInfoCacheDir: nondefaultDir,
+ },
+ 0, nondefaultDir,
+ },
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ BlobInfoCacheDir: nondefaultDir,
+ },
+ 1, nondefaultDir,
+ },
+ } {
+ path, err := blobInfoCacheDir(c.sys, c.euid)
+ require.NoError(t, err)
+ assert.Equal(t, c.expected, path)
+ }
+
+ // Paths used by unprivileged users
+ for caseIndex, c := range []struct {
+ xdgDH, home, expected string
+ }{
+ {"", homeDir, filepath.Join(homeDir, ".local", "share", "containers", "cache")}, // HOME only
+ {xdgDataHome, "", filepath.Join(xdgDataHome, "containers", "cache")}, // XDG_DATA_HOME only
+ {xdgDataHome, homeDir, filepath.Join(xdgDataHome, "containers", "cache")}, // both
+ {"", "", ""}, // neither
+ } {
+ t.Run(fmt.Sprintf("unprivileged %d", caseIndex), func(t *testing.T) {
+ // Always use t.Setenv() to ensure the environment variable is restored to the original value after the test.
+ // Then, in cases where the test needs the variable unset (not just set to empty), use a raw os.Unsetenv()
+ // to override the situation. (Sadly there isn’t a t.Unsetenv() as of Go 1.17.)
+ t.Setenv("XDG_DATA_HOME", c.xdgDH)
+ if c.xdgDH == "" {
+ os.Unsetenv("XDG_DATA_HOME")
+ }
+ t.Setenv("HOME", c.home)
+ if c.home == "" {
+ os.Unsetenv("HOME")
+ }
+ for _, sys := range []*types.SystemContext{nil, {}} {
+ path, err := blobInfoCacheDir(sys, 1)
+ if c.expected != "" {
+ require.NoError(t, err)
+ assert.Equal(t, c.expected, path)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+ })
+ }
+}
+
+func TestDefaultCache(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Success
+ normalDir := filepath.Join(tmpDir, "normal")
+ c := DefaultCache(&types.SystemContext{BlobInfoCacheDir: normalDir})
+ // This is ugly hard-coding internals of sqlite.cache
+ sqliteCache, err := sqlite.New(filepath.Join(normalDir, blobInfoCacheFilename))
+ require.NoError(t, err)
+ assert.Equal(t, sqliteCache, c)
+
+ // Error running blobInfoCacheDir:
+ // Use t.Setenv() just as a way to set up cleanup to original values; then os.Unsetenv() to test a situation where the values are not set.
+ t.Setenv("HOME", "")
+ os.Unsetenv("HOME")
+ t.Setenv("XDG_DATA_HOME", "")
+ os.Unsetenv("XDG_DATA_HOME")
+ c = DefaultCache(nil)
+ assert.IsType(t, memory.New(), c)
+
+ // Error creating the parent directory:
+ unwritableDir := filepath.Join(tmpDir, "unwritable")
+ err = os.Mkdir(unwritableDir, 0700)
+ require.NoError(t, err)
+ defer func() {
+ err = os.Chmod(unwritableDir, 0700) // To make it possible to remove it again
+ require.NoError(t, err)
+ }()
+ err = os.Chmod(unwritableDir, 0500)
+ require.NoError(t, err)
+ st, _ := os.Stat(unwritableDir)
+ logrus.Errorf("%s: %#v", unwritableDir, st)
+ c = DefaultCache(&types.SystemContext{BlobInfoCacheDir: filepath.Join(unwritableDir, "subdirectory")})
+ assert.IsType(t, memory.New(), c)
+}
diff --git a/pkg/blobinfocache/internal/prioritize/prioritize.go b/pkg/blobinfocache/internal/prioritize/prioritize.go
new file mode 100644
index 0000000..9756268
--- /dev/null
+++ b/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -0,0 +1,139 @@
+// Package prioritize provides utilities for prioritizing locations in
+// types.BlobInfoCache.CandidateLocations.
+package prioritize
+
+import (
+ "sort"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/opencontainers/go-digest"
+)
+
+// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
+// This is a heuristic/guess, and could well use a different value.
+const replacementAttempts = 5
+
+// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
+// This is a heuristic/guess, and could well use a different value.
+const replacementUnknownLocationAttempts = 2
+
+// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type CandidateWithTime struct {
+ Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
+ LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
+}
+
+// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
+// along with the specially-treated digest values for the implementation of sort.Interface.Less
+type candidateSortState struct {
+ cs []CandidateWithTime // The entries to sort
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+
+func (css *candidateSortState) Len() int {
+ return len(css.cs)
+}
+
+func (css *candidateSortState) Less(i, j int) bool {
+ xi := css.cs[i]
+ xj := css.cs[j]
+
+ // primaryDigest entries come first, more recent first.
+ // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
+ // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
+
+ // First, deal with the primaryDigest/uncompressedDigest cases:
+ if xi.Candidate.Digest != xj.Candidate.Digest {
+ // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
+ if xi.Candidate.Digest == css.primaryDigest {
+ return true
+ }
+ if xj.Candidate.Digest == css.primaryDigest {
+ return false
+ }
+ if css.uncompressedDigest != "" {
+ if xi.Candidate.Digest == css.uncompressedDigest {
+ return false
+ }
+ if xj.Candidate.Digest == css.uncompressedDigest {
+ return true
+ }
+ }
+ } else { // xi.Candidate.Digest == xj.Candidate.Digest
+ // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
+ if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
+ return xi.LastSeen.After(xj.LastSeen)
+ }
+ }
+
+ // Neither of the digests are primaryDigest/uncompressedDigest:
+ if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
+ return xi.LastSeen.After(xj.LastSeen)
+ }
+ // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
+ return xi.Candidate.Digest < xj.Candidate.Digest
+}
+
+func (css *candidateSortState) Swap(i, j int) {
+ css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
+// number of entries to limit for known and unknown location separately, only to make testing simpler.
+// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
+// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
+ // split unknown candidates and known candidates
+ // and limit them seperately.
+ var knownLocationCandidates []CandidateWithTime
+ var unknownLocationCandidates []CandidateWithTime
+ // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
+ // compare equal.
+ // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
+ sort.Sort(&candidateSortState{
+ cs: cs,
+ primaryDigest: primaryDigest,
+ uncompressedDigest: uncompressedDigest,
+ })
+ for _, candidate := range cs {
+ if candidate.Candidate.UnknownLocation {
+ unknownLocationCandidates = append(unknownLocationCandidates, candidate)
+ } else {
+ knownLocationCandidates = append(knownLocationCandidates, candidate)
+ }
+ }
+
+ knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
+ remainingCapacity := totalLimit - knownLocationCandidatesUsed
+ unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
+ res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
+ for i := 0; i < knownLocationCandidatesUsed; i++ {
+ res[i] = knownLocationCandidates[i].Candidate
+ }
+ // If candidates with unknown location are found, lets add them to final list
+ for i := 0; i < unknownLocationCandidatesUsed; i++ {
+ res = append(res, unknownLocationCandidates[i].Candidate)
+ }
+ return res
+}
+
+// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
+// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+//
+// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
+// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
+func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
+}
diff --git a/pkg/blobinfocache/internal/prioritize/prioritize_test.go b/pkg/blobinfocache/internal/prioritize/prioritize_test.go
new file mode 100644
index 0000000..f66d842
--- /dev/null
+++ b/pkg/blobinfocache/internal/prioritize/prioritize_test.go
@@ -0,0 +1,183 @@
+package prioritize
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222")
+ digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333")
+ digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444")
+ digestCompressedPrimary = digest.Digest("sha256:6666666666666666666666666666666666666666666666666666666666666666")
+)
+
+var (
+ // cssLiteral contains a non-trivial candidateSortState shared among several tests below.
+ cssLiteral = candidateSortState{
+ cs: []CandidateWithTime{
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressorName: compressiontypes.XzAlgorithmName}, time.Unix(1, 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(1, 1)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressorName: blobinfocache.Uncompressed}, time.Unix(1, 1)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}, CompressorName: blobinfocache.UnknownCompression}, time.Unix(1, 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressorName: compressiontypes.Bzip2AlgorithmName}, time.Unix(1, 1)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(1, 1)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressorName: blobinfocache.Uncompressed}, time.Unix(2, 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression}, time.Unix(1, 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
+ {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}},
+ },
+ primaryDigest: digestCompressedPrimary,
+ uncompressedDigest: digestUncompressed,
+ }
+ // cssExpectedReplacementCandidates is the fully-sorted, unlimited, result of prioritizing cssLiteral.
+ cssExpectedReplacementCandidates = []blobinfocache.BICReplacementCandidate2{
+ {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressorName: compressiontypes.GzipAlgorithmName},
+ {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}, CompressorName: blobinfocache.UnknownCompression},
+ {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressorName: blobinfocache.Uncompressed},
+ {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}, CompressorName: blobinfocache.Uncompressed},
+ {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}, CompressorName: compressiontypes.Bzip2AlgorithmName},
+ {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressorName: compressiontypes.XzAlgorithmName},
+ {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressorName: compressiontypes.GzipAlgorithmName},
+ {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression},
+ {Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
+ {Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
+ {Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
+ {Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression},
+ }
+)
+
+func TestCandidateSortStateLen(t *testing.T) {
+ css := cssLiteral
+ assert.Equal(t, 12, css.Len())
+
+ css.cs = []CandidateWithTime{}
+ assert.Equal(t, 0, css.Len())
+}
+
+func TestCandidateSortStateLess(t *testing.T) {
+ type p struct {
+ d digest.Digest
+ t int64
+ }
+
+ // Primary criteria: Also ensure that time does not matter
+ for _, c := range []struct {
+ name string
+ res int
+ d0, d1 digest.Digest
+ }{
+ {"primary < any", -1, digestCompressedPrimary, digestCompressedA},
+ {"any < uncompressed", -1, digestCompressedA, digestUncompressed},
+ {"primary < uncompressed", -1, digestCompressedPrimary, digestUncompressed},
+ } {
+ for _, tms := range [][2]int64{{1, 2}, {2, 1}, {1, 1}} {
+ caseName := fmt.Sprintf("%s %v", c.name, tms)
+ css := candidateSortState{
+ cs: []CandidateWithTime{
+ {blobinfocache.BICReplacementCandidate2{Digest: c.d0, Location: types.BICLocationReference{Opaque: "L0"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(tms[0], 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: c.d1, Location: types.BICLocationReference{Opaque: "L1"}, CompressorName: compressiontypes.ZstdAlgorithmName}, time.Unix(tms[1], 0)},
+ },
+ primaryDigest: digestCompressedPrimary,
+ uncompressedDigest: digestUncompressed,
+ }
+ assert.Equal(t, c.res < 0, css.Less(0, 1), caseName)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), caseName)
+
+ if c.d0 != digestUncompressed && c.d1 != digestUncompressed {
+ css.uncompressedDigest = ""
+ assert.Equal(t, c.res < 0, css.Less(0, 1), caseName)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), caseName)
+
+ css.uncompressedDigest = css.primaryDigest
+ assert.Equal(t, c.res < 0, css.Less(0, 1), caseName)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), caseName)
+ }
+ }
+ }
+
+ // Ordering within the three primary groups
+ for _, c := range []struct {
+ name string
+ res int
+ p0, p1 p
+ }{
+ {"primary: t=2 < t=1", -1, p{digestCompressedPrimary, 2}, p{digestCompressedPrimary, 1}},
+ {"primary: t=1 == t=1", 0, p{digestCompressedPrimary, 1}, p{digestCompressedPrimary, 1}},
+ {"uncompressed: t=2 < t=1", -1, p{digestUncompressed, 2}, p{digestUncompressed, 1}},
+ {"uncompressed: t=1 == t=1", 0, p{digestUncompressed, 1}, p{digestUncompressed, 1}},
+ {"any: t=2 < t=1, [d=A vs. d=B lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedB, 1}},
+ {"any: t=2 < t=1, [d=B vs. d=A lower-priority]", -1, p{digestCompressedB, 2}, p{digestCompressedA, 1}},
+ {"any: t=2 < t=1, [d=A vs. d=A lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedA, 1}},
+ {"any: t=1 == t=1, d=A < d=B", -1, p{digestCompressedA, 1}, p{digestCompressedB, 1}},
+ {"any: t=1 == t=1, d=A == d=A", 0, p{digestCompressedA, 1}, p{digestCompressedA, 1}},
+ } {
+ css := candidateSortState{
+ cs: []CandidateWithTime{
+ {blobinfocache.BICReplacementCandidate2{Digest: c.p0.d, Location: types.BICLocationReference{Opaque: "L0"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(c.p0.t, 0)},
+ {blobinfocache.BICReplacementCandidate2{Digest: c.p1.d, Location: types.BICLocationReference{Opaque: "L1"}, CompressorName: compressiontypes.ZstdAlgorithmName}, time.Unix(c.p1.t, 0)},
+ },
+ primaryDigest: digestCompressedPrimary,
+ uncompressedDigest: digestUncompressed,
+ }
+ assert.Equal(t, c.res < 0, css.Less(0, 1), c.name)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), c.name)
+
+ if c.p0.d != digestUncompressed && c.p1.d != digestUncompressed {
+ css.uncompressedDigest = ""
+ assert.Equal(t, c.res < 0, css.Less(0, 1), c.name)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), c.name)
+
+ css.uncompressedDigest = css.primaryDigest
+ assert.Equal(t, c.res < 0, css.Less(0, 1), c.name)
+ assert.Equal(t, c.res > 0, css.Less(1, 0), c.name)
+ }
+ }
+}
+
+func TestCandidateSortStateSwap(t *testing.T) {
+ freshCSS := func() candidateSortState { // Return a deep copy of cssLiteral which is safe to modify.
+ res := cssLiteral
+ res.cs = slices.Clone(cssLiteral.cs)
+ return res
+ }
+
+ css := freshCSS()
+ css.Swap(0, 1)
+ assert.Equal(t, cssLiteral.cs[1], css.cs[0])
+ assert.Equal(t, cssLiteral.cs[0], css.cs[1])
+ assert.Equal(t, cssLiteral.cs[2], css.cs[2])
+
+ css = freshCSS()
+ css.Swap(1, 1)
+ assert.Equal(t, cssLiteral, css)
+}
+
+func TestDestructivelyPrioritizeReplacementCandidatesWithMax(t *testing.T) {
+ totalUnknownLocationCandidates := 4
+ for _, totalLimit := range []int{0, 1, replacementAttempts, 100, replacementUnknownLocationAttempts} {
+ for _, noLocationLimit := range []int{0, 1, replacementAttempts, 100, replacementUnknownLocationAttempts} {
+ totalKnownLocationCandidates := len(cssExpectedReplacementCandidates) - totalUnknownLocationCandidates
+ allowedUnknown := min(noLocationLimit, totalUnknownLocationCandidates)
+ expectedLen := min(totalKnownLocationCandidates+allowedUnknown, totalLimit)
+ res := destructivelyPrioritizeReplacementCandidatesWithMax(slices.Clone(cssLiteral.cs), digestCompressedPrimary, digestUncompressed, totalLimit, noLocationLimit)
+ assert.Equal(t, cssExpectedReplacementCandidates[:expectedLen], res)
+ }
+ }
+}
+
+func TestDestructivelyPrioritizeReplacementCandidates(t *testing.T) {
+ // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess
+ res := DestructivelyPrioritizeReplacementCandidates(slices.Clone(cssLiteral.cs), digestCompressedPrimary, digestUncompressed)
+ assert.Equal(t, cssExpectedReplacementCandidates[:replacementAttempts], res)
+}
diff --git a/pkg/blobinfocache/internal/test/test.go b/pkg/blobinfocache/internal/test/test.go
new file mode 100644
index 0000000..c310bb6
--- /dev/null
+++ b/pkg/blobinfocache/internal/test/test.go
@@ -0,0 +1,389 @@
+// Package test provides generic BlobInfoCache test helpers.
+package test
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ digestUnknown = digest.Digest("sha256:1111111111111111111111111111111111111111111111111111111111111111")
+ digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222")
+ digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333")
+ digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444")
+ digestUncompressedC = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777")
+ digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555")
+ compressorNameU = "compressorName/U"
+ compressorNameA = "compressorName/A"
+ compressorNameB = "compressorName/B"
+ compressorNameCU = "compressorName/CU"
+)
+
+// GenericCache runs an implementation-independent set of tests, given a
+// newTestCache, which can be called repeatedly and always returns a fresh cache instance
+func GenericCache(t *testing.T, newTestCache func(t *testing.T) blobinfocache.BlobInfoCache2) {
+ subs := []struct {
+ name string
+ fn func(t *testing.T, cache blobinfocache.BlobInfoCache2)
+ }{
+ {"UncompressedDigest", testGenericUncompressedDigest},
+ {"RecordDigestUncompressedPair", testGenericRecordDigestUncompressedPair},
+ {"RecordKnownLocations", testGenericRecordKnownLocations},
+ {"CandidateLocations", testGenericCandidateLocations},
+ {"CandidateLocations2", testGenericCandidateLocations2},
+ }
+
+ // Without Open()/Close()
+ for _, s := range subs {
+ t.Run("no Open: "+s.name, func(t *testing.T) {
+ cache := newTestCache(t)
+ s.fn(t, cache)
+ })
+ }
+
+ // With Open()/Close()
+ for _, s := range subs {
+ t.Run("with Open: "+s.name, func(t *testing.T) {
+ cache := newTestCache(t)
+ cache.Open()
+ defer cache.Close()
+ s.fn(t, cache)
+ })
+ }
+}
+
+func testGenericUncompressedDigest(t *testing.T, cache blobinfocache.BlobInfoCache2) {
+ // Nothing is known.
+ assert.Equal(t, digest.Digest(""), cache.UncompressedDigest(digestUnknown))
+
+ cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed)
+ cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed)
+ // Known compressed→uncompressed mapping
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA))
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB))
+ // This implicitly marks digestUncompressed as uncompressed.
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed))
+
+ // Known uncompressed→self mapping
+ cache.RecordDigestUncompressedPair(digestCompressedUnrelated, digestCompressedUnrelated)
+ assert.Equal(t, digestCompressedUnrelated, cache.UncompressedDigest(digestCompressedUnrelated))
+}
+
+func testGenericRecordDigestUncompressedPair(t *testing.T, cache blobinfocache.BlobInfoCache2) {
+ for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things.
+ // Known compressed→uncompressed mapping
+ cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed)
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA))
+ // Two mappings to the same uncompressed digest
+ cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed)
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB))
+
+ // Mapping an uncompressed digest to self
+ cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed)
+ assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed))
+ }
+}
+
+func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoCache2) {
+ transport := mocks.NameImageTransport("==BlobInfocache transport mock")
+ for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things.
+ for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other.
+ scope := types.BICTransportScope{Opaque: scopeName}
+ for _, digest := range []digest.Digest{digestCompressedA, digestCompressedB} { // Two different digests should not affect each other either.
+ lr1 := types.BICLocationReference{Opaque: scopeName + "1"}
+ lr2 := types.BICLocationReference{Opaque: scopeName + "2"}
+ cache.RecordKnownLocation(transport, scope, digest, lr2)
+ cache.RecordKnownLocation(transport, scope, digest, lr1)
+ assert.Equal(t, []types.BICReplacementCandidate{
+ {Digest: digest, Location: lr1},
+ {Digest: digest, Location: lr2},
+ }, cache.CandidateLocations(transport, scope, digest, false))
+ assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digest, false))
+ }
+ }
+ }
+}
+
+// candidate is a shorthand for types.BICReplacementCandidate
+type candidate struct {
+ d digest.Digest
+ cn string
+ lr string
+}
+
+func assertCandidatesMatch(t *testing.T, scopeName string, expected []candidate, actual []types.BICReplacementCandidate) {
+ e := make([]types.BICReplacementCandidate, len(expected))
+ for i, ev := range expected {
+ e[i] = types.BICReplacementCandidate{Digest: ev.d, Location: types.BICLocationReference{Opaque: scopeName + ev.lr}}
+ }
+ assert.Equal(t, e, actual)
+}
+
+func assertCandidatesMatch2(t *testing.T, scopeName string, expected []candidate, actual []blobinfocache.BICReplacementCandidate2) {
+ e := make([]blobinfocache.BICReplacementCandidate2, len(expected))
+ for i, ev := range expected {
+ e[i] = blobinfocache.BICReplacementCandidate2{Digest: ev.d, CompressorName: ev.cn, Location: types.BICLocationReference{Opaque: scopeName + ev.lr}}
+ }
+ assert.Equal(t, e, actual)
+}
+
+func testGenericCandidateLocations(t *testing.T, cache blobinfocache.BlobInfoCache2) {
+ transport := mocks.NameImageTransport("==BlobInfocache transport mock")
+ cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed)
+ cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed)
+ cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed)
+ digestNameSet := []struct {
+ n string
+ d digest.Digest
+ }{
+ {"U", digestUncompressed},
+ {"A", digestCompressedA},
+ {"B", digestCompressedB},
+ {"CU", digestCompressedUnrelated},
+ }
+
+ for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other.
+ scope := types.BICTransportScope{Opaque: scopeName}
+ // Nothing is known.
+ assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, false))
+ assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, true))
+
+ // Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older)
+ for _, suffix := range []string{"2", "1"} {
+ for _, e := range digestNameSet {
+ cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n + suffix})
+ }
+ }
+
+ // No substitutions allowed:
+ for _, e := range digestNameSet {
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: e.d, lr: e.n + "1"}, {d: e.d, lr: e.n + "2"},
+ }, cache.CandidateLocations(transport, scope, e.d, false))
+ }
+
+ // With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one.
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
+ {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+ {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedA, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"},
+ {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"},
+ {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedB, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestUncompressed, lr: "U1"}, {d: digestUncompressed, lr: "U2"},
+ // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedB, lr: "B2"},
+ // Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"},
+ }, cache.CandidateLocations(transport, scope, digestUncompressed, true))
+
+ // Locations are known, but no relationships
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedUnrelated, lr: "CU1"}, {d: digestCompressedUnrelated, lr: "CU2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true))
+ }
+}
+
+func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCache2) {
+ transport := mocks.NameImageTransport("==BlobInfocache transport mock")
+ cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed)
+ cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed)
+ cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed)
+ digestNameSet := []struct {
+ n string
+ d digest.Digest
+ m string
+ }{
+ {"U", digestUncompressed, compressorNameU},
+ {"A", digestCompressedA, compressorNameA},
+ {"B", digestCompressedB, compressorNameB},
+ {"CU", digestCompressedUnrelated, compressorNameCU},
+ }
+
+ for scopeIndex, scopeName := range []string{"A", "B", "C"} { // Run the test in two different scopes to verify they don't affect each other.
+ scope := types.BICTransportScope{Opaque: scopeName}
+
+ // Nothing is known.
+ assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, false))
+ assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, true))
+
+ // If a record exists with compression without Location then
+ // then return a record without location and with `UnknownLocation: true`
+ cache.RecordDigestCompressorName(digestUncompressedC, "somecompression")
+ assert.Equal(t, []blobinfocache.BICReplacementCandidate2{
+ {
+ Digest: digestUncompressedC,
+ CompressorName: "somecompression",
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
+ }}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true))
+ // When another entry with scope and Location is set then it should be returned as it has higher
+ // priority.
+ cache.RecordKnownLocation(transport, scope, digestUncompressedC, types.BICLocationReference{Opaque: "somelocation"})
+ assert.Equal(t, []blobinfocache.BICReplacementCandidate2{
+ {
+ Digest: digestUncompressedC,
+ CompressorName: "somecompression",
+ UnknownLocation: false,
+ Location: types.BICLocationReference{Opaque: "somelocation"},
+ }}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true))
+
+ // Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older)
+ for _, suffix := range []string{"2", "1"} {
+ for _, e := range digestNameSet {
+ cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n + suffix})
+ }
+ }
+
+ // Clear any "known" compression values, except on the first loop where they've never been set.
+ // This probably triggers “Compressor for blob with digest … previously recorded as …, now unknown” warnings here, for test purposes;
+ // that shouldn’t happen in real-world usage.
+ if scopeIndex != 0 {
+ for _, e := range digestNameSet {
+ cache.RecordDigestCompressorName(e.d, blobinfocache.UnknownCompression)
+ }
+ }
+
+ // No substitutions allowed:
+ for _, e := range digestNameSet {
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: e.d, lr: e.n + "1"},
+ {d: e.d, lr: e.n + "2"},
+ }, cache.CandidateLocations(transport, scope, e.d, false))
+ assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, e.d, false))
+ }
+
+ // With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one.
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedA, lr: "A2"},
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedB, lr: "B2"},
+ {d: digestUncompressed, lr: "U1"},
+ // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedA, true))
+ // Unknown compression -> no candidates
+ assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedA, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedB, lr: "B2"},
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedA, lr: "A2"},
+ {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedB, true))
+ // Unknown compression -> no candidates
+ assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedB, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestUncompressed, lr: "U1"},
+ {d: digestUncompressed, lr: "U2"},
+ // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedB, lr: "B2"},
+ // Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"},
+ }, cache.CandidateLocations(transport, scope, digestUncompressed, true))
+ // Unknown compression -> no candidates
+ assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestUncompressed, true))
+
+ // Locations are known, but no relationships
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedUnrelated, lr: "CU1"},
+ {d: digestCompressedUnrelated, lr: "CU2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true))
+ // Unknown compression -> no candidates
+ assertCandidatesMatch2(t, scopeName, []candidate{}, cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, true))
+
+ // Set the "known" compression values
+ for _, e := range digestNameSet {
+ cache.RecordDigestCompressorName(e.d, e.m)
+ }
+
+ // No substitutions allowed:
+ for _, e := range digestNameSet {
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: e.d, lr: e.n + "1"},
+ {d: e.d, lr: e.n + "2"},
+ }, cache.CandidateLocations(transport, scope, e.d, false))
+ assertCandidatesMatch2(t, scopeName, []candidate{
+ {d: e.d, cn: e.m, lr: e.n + "1"},
+ {d: e.d, cn: e.m, lr: e.n + "2"},
+ }, cache.CandidateLocations2(transport, scope, e.d, false))
+ }
+
+ // With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one.
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedA, lr: "A2"},
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedB, lr: "B2"},
+ {d: digestUncompressed, lr: "U1"},
+ // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedA, true))
+ assertCandidatesMatch2(t, scopeName, []candidate{
+ {d: digestCompressedA, cn: compressorNameA, lr: "A1"},
+ {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
+ {d: digestCompressedB, cn: compressorNameB, lr: "B1"},
+ {d: digestCompressedB, cn: compressorNameB, lr: "B2"},
+ {d: digestUncompressed, cn: compressorNameU, lr: "U1"},
+ // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameCU, lr: "U2"},
+ }, cache.CandidateLocations2(transport, scope, digestCompressedA, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedB, lr: "B2"},
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedA, lr: "A2"},
+ {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedB, true))
+ assertCandidatesMatch2(t, scopeName, []candidate{
+ {d: digestCompressedB, cn: compressorNameB, lr: "B1"},
+ {d: digestCompressedB, cn: compressorNameB, lr: "B2"},
+ {d: digestCompressedA, cn: compressorNameA, lr: "A1"},
+ {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
+ {d: digestUncompressed, cn: compressorNameU, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, cn: compressorNameU, lr: "U2"},
+ }, cache.CandidateLocations2(transport, scope, digestCompressedB, true))
+
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestUncompressed, lr: "U1"},
+ {d: digestUncompressed, lr: "U2"},
+ // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+ {d: digestCompressedB, lr: "B1"},
+ {d: digestCompressedA, lr: "A1"},
+ {d: digestCompressedB, lr: "B2"},
+ // Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"},
+ }, cache.CandidateLocations(transport, scope, digestUncompressed, true))
+ assertCandidatesMatch2(t, scopeName, []candidate{
+ {d: digestUncompressed, cn: compressorNameU, lr: "U1"},
+ {d: digestUncompressed, cn: compressorNameU, lr: "U2"},
+ // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order
+ {d: digestCompressedB, cn: compressorNameB, lr: "B1"},
+ {d: digestCompressedA, cn: compressorNameA, lr: "A1"},
+ {d: digestCompressedB, cn: compressorNameB, lr: "B2"},
+ // Beyond the replacementAttempts limit: {d: digestCompressedA, cn: compressorNameA, lr: "A2"},
+ }, cache.CandidateLocations2(transport, scope, digestUncompressed, true))
+
+ // Locations are known, but no relationships
+ assertCandidatesMatch(t, scopeName, []candidate{
+ {d: digestCompressedUnrelated, lr: "CU1"},
+ {d: digestCompressedUnrelated, lr: "CU2"},
+ }, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true))
+ assertCandidatesMatch2(t, scopeName, []candidate{
+ {d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU1"},
+ {d: digestCompressedUnrelated, cn: compressorNameCU, lr: "CU2"},
+ }, cache.CandidateLocations2(transport, scope, digestCompressedUnrelated, true))
+ }
+}
diff --git a/pkg/blobinfocache/memory/memory.go b/pkg/blobinfocache/memory/memory.go
new file mode 100644
index 0000000..cfad16b
--- /dev/null
+++ b/pkg/blobinfocache/memory/memory.go
@@ -0,0 +1,216 @@
+// Package memory implements an in-memory BlobInfoCache.
+package memory
+
+import (
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// locationKey only exists to make lookup in knownLocations easier.
+type locationKey struct {
+ transport string
+ scope types.BICTransportScope
+ blobDigest digest.Digest
+}
+
+// cache implements an in-memory-only BlobInfoCache.
+type cache struct {
+ mutex sync.Mutex
+ // The following fields can only be accessed with mutex held.
+ uncompressedDigests map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+ compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
+}
+
+// New returns a BlobInfoCache implementation which is in-memory only.
+//
+// This is primarily intended for tests, but also used as a fallback
+// if blobinfocache.DefaultCache can’t determine, or set up, the
+// location for a persistent cache. Most users should use
+// blobinfocache.DefaultCache. instead of calling this directly.
+// Manual users of types.{ImageSource,ImageDestination} might also use
+// this instead of a persistent cache.
+func New() types.BlobInfoCache {
+ return new2()
+}
+
+func new2() *cache {
+ return &cache{
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ compressors: map[digest.Digest]string{},
+ }
+}
+
+// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+func (mem *cache) Open() {
+}
+
+// Close destroys state created by Open().
+func (mem *cache) Close() {
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ return mem.uncompressedDigestLocked(anyDigest)
+}
+
+// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
+func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
+ if d, ok := mem.uncompressedDigests[anyDigest]; ok {
+ return d
+ }
+ // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
+ return anyDigest
+ }
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigests[anyDigest] = uncompressed
+
+ anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
+ if !ok {
+ anyDigestSet = set.New[digest.Digest]()
+ mem.digestsByUncompressed[uncompressed] = anyDigestSet
+ }
+ anyDigestSet.Add(anyDigest)
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
+ locationScope, ok := mem.knownLocations[key]
+ if !ok {
+ locationScope = map[types.BICLocationReference]time.Time{}
+ mem.knownLocations[key] = locationScope
+ }
+ locationScope[location] = time.Now() // Possibly overwriting an older entry.
+}
+
+// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
+// algorithm, or uncompressed, or that we no longer know.
+func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
+ logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
+ }
+ if compressorName == blobinfocache.UnknownCompression {
+ delete(mem.compressors, blobDigest)
+ return
+ }
+ mem.compressors[blobDigest] = compressorName
+}
+
+// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
+// with corresponding compression info from mem.compressors, and returns the result of appending
+// them to candidates. v2Output allows including candidates with unknown location, and filters out
+// candidates with unknown compression.
+func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
+ compressorName := blobinfocache.UnknownCompression
+ if v, ok := mem.compressors[digest]; ok {
+ compressorName = v
+ }
+ if compressorName == blobinfocache.UnknownCompression && v2Output {
+ return candidates
+ }
+ locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
+ if len(locations) > 0 {
+ for l, t := range locations {
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressorName,
+ Location: l,
+ },
+ LastSeen: t,
+ })
+ }
+ } else if v2Output {
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressorName,
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
+ },
+ LastSeen: time.Time{},
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
+}
+
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
+ return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
+}
+
+func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+ mem.mutex.Lock()
+ defer mem.mutex.Unlock()
+ res := []prioritize.CandidateWithTime{}
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
+ var uncompressedDigest digest.Digest // = ""
+ if canSubstitute {
+ if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
+ otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
+ if otherDigests != nil {
+ for _, d := range otherDigests.Values() {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
+ }
+ }
+ }
+ if uncompressedDigest != primaryDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
+ }
+ }
+ }
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+}
diff --git a/pkg/blobinfocache/memory/memory_test.go b/pkg/blobinfocache/memory/memory_test.go
new file mode 100644
index 0000000..322f008
--- /dev/null
+++ b/pkg/blobinfocache/memory/memory_test.go
@@ -0,0 +1,18 @@
+package memory
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/test"
+)
+
+var _ blobinfocache.BlobInfoCache2 = &cache{}
+
+func newTestCache(t *testing.T) blobinfocache.BlobInfoCache2 {
+ return new2()
+}
+
+func TestNew(t *testing.T) {
+ test.GenericCache(t, newTestCache)
+}
diff --git a/pkg/blobinfocache/none/none.go b/pkg/blobinfocache/none/none.go
new file mode 100644
index 0000000..4b7122f
--- /dev/null
+++ b/pkg/blobinfocache/none/none.go
@@ -0,0 +1,50 @@
+// Package none implements a dummy BlobInfoCache which records no data.
+package none
+
+import (
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// noCache implements a dummy BlobInfoCache which records no data.
+type noCache struct {
+}
+
+// NoCache implements BlobInfoCache by not recording any data.
+//
+// This exists primarily for implementations of configGetter for
+// Manifest.Inspect, because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a
+// short-lived cache, ideally blobinfocache.DefaultCache.
+var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{})
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return nil
+}
diff --git a/pkg/blobinfocache/none/none_test.go b/pkg/blobinfocache/none/none_test.go
new file mode 100644
index 0000000..57d2010
--- /dev/null
+++ b/pkg/blobinfocache/none/none_test.go
@@ -0,0 +1,7 @@
+package none
+
+import (
+ "github.com/containers/image/v5/types"
+)
+
+var _ types.BlobInfoCache = &noCache{}
diff --git a/pkg/blobinfocache/sqlite/sqlite.go b/pkg/blobinfocache/sqlite/sqlite.go
new file mode 100644
index 0000000..2b446a6
--- /dev/null
+++ b/pkg/blobinfocache/sqlite/sqlite.go
@@ -0,0 +1,575 @@
+// Package boltdb implements a BlobInfoCache backed by SQLite.
+package sqlite
+
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
+ "github.com/containers/image/v5/types"
+ _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+ // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema
+ // (which would require compatibility in both directions).
+
+ // Assembled sqlite options used when opening the database.
+ sqliteOptions = "?" +
+ // Deal with timezone automatically.
+ // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset.
+ // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or)
+ // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location;
+ // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used).
+ "_loc=auto" +
+ // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous).
+ "&_sync=FULL" +
+ // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys).
+ // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons).
+ "&_foreign_keys=1" +
+ // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html);
+ // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock,
+ // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously).
+ //
+ // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error,
+ // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous
+ // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented.
+ // Compare https://github.com/mattn/go-sqlite3/issues/274 .
+ //
+ // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167
+ // or https://github.com/mattn/go-sqlite3/issues/400 .
+ // The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings,
+ // which seems rather wasteful.
+ "&_txlock=exclusive"
+)
+
+// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path.
+type cache struct {
+ path string
+
+ // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
+ // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily
+ // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
+ // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have
+ // a Close method, so creating a lot of single-use caches could leak data.
+ //
+ // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image.
+ // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open.
+ // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single
+ // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every
+ // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation,
+ // somewhere around "700").
+
+ lock sync.Mutex
+ // The following fields can only be accessed with lock held.
+ refCount int // number of outstanding Open() calls
+ db *sql.DB // nil if not set (may happen even if refCount > 0 on errors)
+}
+
+// New returns BlobInfoCache implementation which uses a SQLite file at path.
+//
+// Most users should call blobinfocache.DefaultCache instead.
+func New(path string) (types.BlobInfoCache, error) {
+ return new2(path)
+}
+
+func new2(path string) (*cache, error) {
+ db, err := rawOpen(path)
+ if err != nil {
+ return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
+ }
+ defer db.Close()
+
+ // We don’t check the schema before every operation, because that would be costly
+ // and because we assume schema changes will be handled by using a different path.
+ if err := ensureDBHasCurrentSchema(db); err != nil {
+ return nil, err
+ }
+
+ return &cache{
+ path: path,
+ refCount: 0,
+ db: nil,
+ }, nil
+}
+
+// rawOpen returns a new *sql.DB for path.
+// The caller should arrange for it to be .Close()d.
+func rawOpen(path string) (*sql.DB, error) {
+ // This exists to centralize the use of sqliteOptions.
+ return sql.Open("sqlite3", path+sqliteOptions)
+}
+
+// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
+// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
+func (sqc *cache) Open() {
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ if sqc.refCount == 0 {
+ db, err := rawOpen(sqc.path)
+ if err != nil {
+ logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
+ db = nil // But still increase sqc.refCount, because a .Close() will happen
+ }
+ sqc.db = db
+ }
+ sqc.refCount++
+}
+
+// Close destroys state created by Open().
+func (sqc *cache) Close() {
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ switch sqc.refCount {
+ case 0:
+ logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()")
+ return
+ case 1:
+ if sqc.db != nil {
+ sqc.db.Close()
+ sqc.db = nil
+ }
+ }
+ sqc.refCount--
+}
+
+type void struct{} // So that we don’t have to write struct{}{} all over the place
+
+// transaction calls fn within a read-write transaction in sqc.
+func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
+ db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
+ sqc.lock.Lock()
+ defer sqc.lock.Unlock()
+
+ if sqc.db != nil {
+ return sqc.db, func() {}, nil
+ }
+ db, err := rawOpen(sqc.path)
+ if err != nil {
+ return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
+ }
+ return db, func() { db.Close() }, nil
+ }()
+ if err != nil {
+ var zeroRes T // A zero value of T
+ return zeroRes, err
+ }
+ defer closeDB()
+
+ return dbTransaction(db, fn)
+}
+
+// dbTransaction calls fn within a read-write transaction in db.
+func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
+ // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
+
+ var zeroRes T // A zero value of T
+
+ tx, err := db.Begin()
+ if err != nil {
+ return zeroRes, fmt.Errorf("beginning transaction: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ if err := tx.Rollback(); err != nil {
+ logrus.Errorf("Rolling back transaction: %v", err)
+ }
+ }
+ }()
+
+ res, err := fn(tx)
+ if err != nil {
+ return zeroRes, err
+ }
+ if err := tx.Commit(); err != nil {
+ return zeroRes, fmt.Errorf("committing transaction: %w", err)
+ }
+
+ succeeded = true
+ return res, nil
+}
+
+// querySingleValue executes a SELECT which is expected to return at most one row with a single column.
+// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned.
+func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) {
+ var value T
+ if err := tx.QueryRow(query, params...).Scan(&value); err != nil {
+ var zeroValue T // A zero value of T
+ if errors.Is(err, sql.ErrNoRows) {
+ return zeroValue, false, nil
+ }
+ return zeroValue, false, err
+ }
+ return value, true, nil
+}
+
+// ensureDBHasCurrentSchema adds the necessary tables and indices to a database.
+// This is typically used when creating a previously-nonexistent database.
+// We don’t really anticipate schema migrations; with c/image usually vendored, not using
+// shared libraries, migrating a schema on an existing database would affect old-version users.
+// Instead, schema changes are likely to be implemented by using a different cache file name,
+// and leaving existing caches around for old users.
+func ensureDBHasCurrentSchema(db *sql.DB) error {
+ // Considered schema design alternatives:
+ //
+ // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring
+ // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.)
+ //
+ // * This schema uses the text representation of digests.
+ //
+ // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation;
+ // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm
+ // is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that
+ // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm
+ // — and that would require us to implement two code paths, one of them basically never exercised / never tested.
+ //
+ // * There are two separate items for recording the uncompressed digest and digest compressors.
+ // Alternatively, we could have a single "digest facts" table with NULLable columns.
+ //
+ // The way the BlobInfoCache API works, we are only going to write one value at a time, so
+ // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples).
+ // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs
+ // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then
+ // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key).
+ //
+ // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to
+ // do a more verbose ON CONFLICT(…) DO UPDATE SET … = ….
+ //
+ // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
+ //
+ // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
+ // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
+ // is probably costlier than comparing a few more bytes of data.
+ //
+ // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
+ // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally).
+ //
+ items := []struct{ itemName, command string }{
+ {
+ "DigestUncompressedPairs",
+ `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` +
+ // index implied by PRIMARY KEY
+ `anyDigest TEXT PRIMARY KEY NOT NULL,` +
+ // DigestUncompressedPairs_index_uncompressedDigest
+ `uncompressedDigest TEXT NOT NULL
+ )`,
+ },
+ {
+ "DigestUncompressedPairs_index_uncompressedDigest",
+ `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`,
+ },
+ {
+ "DigestCompressors",
+ `CREATE TABLE IF NOT EXISTS DigestCompressors(` +
+ // index implied by PRIMARY KEY
+ `digest TEXT PRIMARY KEY NOT NULL,` +
+ // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
+ `compressor TEXT NOT NULL
+ )`,
+ },
+ {
+ "KnownLocations",
+ `CREATE TABLE IF NOT EXISTS KnownLocations(
+ transport TEXT NOT NULL,
+ scope TEXT NOT NULL,
+ digest TEXT NOT NULL,
+ location TEXT NOT NULL,` +
+ // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics)
+ // format "2006-01-02 15:04:05.999999999-07:00".
+ // See also the _loc option in the sql.Open data source name.
+ `time TIMESTAMP NOT NULL,` +
+ // Implies an index.
+ // We also search by (transport, scope, digest), that doesn’t need an extra index
+ // because it is a prefix of the implied primary-key index.
+ `PRIMARY KEY (transport, scope, digest, location)
+ )`,
+ },
+ }
+
+ _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
+ // If the the last-created item exists, assume nothing needs to be done.
+ lastItemName := items[len(items)-1].itemName
+ _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName)
+ if err != nil {
+ return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err)
+ }
+ if !found {
+ // Item does not exist, assuming a fresh database.
+ for _, i := range items {
+ if _, err := tx.Exec(i.command); err != nil {
+ return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err)
+ }
+ }
+ }
+ return void{}, nil
+ })
+ return err
+}
+
+// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction.
+func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) {
+ uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
+ if err != nil {
+ return "", err
+ }
+ if found {
+ d, err := digest.Parse(uncompressedString)
+ if err != nil {
+ return "", err
+ }
+ return d, nil
+
+ }
+ // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String())
+ if err != nil {
+ return "", err
+ }
+ if found {
+ return anyDigest, nil
+ }
+ return "", nil
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
+ return sqc.uncompressedDigest(tx, anyDigest)
+ })
+ if err != nil {
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest)
+ }
+ if gotPrevious {
+ previous, err := digest.Parse(previousString)
+ if err != nil {
+ return void{}, err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)",
+ anyDigest.String(), uncompressed.String()); err != nil {
+ return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err)
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)",
+ transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry.
+ return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w",
+ location.Opaque, transport.Name(), scope.Opaque, digest.String(), err)
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordDigestCompressorName records a compressor for the blob with the specified digest,
+// or Uncompressed or UnknownCompression.
+// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
+// digest just because some remote author claims so (e.g. because a manifest says so);
+// otherwise the cache could be poisoned and cause us to make incorrect edits to type
+// information in a manifest.
+func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
+ _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
+ previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
+ if err != nil {
+ return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
+ }
+ if gotPrevious && previous != compressorName {
+ logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
+ }
+ if compressorName == blobinfocache.UnknownCompression {
+ if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
+ return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
+ }
+ } else {
+ if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
+ anyDigest.String(), compressorName); err != nil {
+ return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
+ }
+ }
+ return void{}, nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
+// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
+// location, and filters out candidates with unknown compression.
+func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
+ var rows *sql.Rows
+ var err error
+ if v2Output {
+ rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
+ "ON KnownLocations.digest = DigestCompressors.digest "+
+ "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
+ transport.Name(), scope.Opaque, digest.String())
+ } else {
+ rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
+ "LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
+ "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
+ blobinfocache.UnknownCompression,
+ transport.Name(), scope.Opaque, digest.String())
+ }
+ if err != nil {
+ return nil, fmt.Errorf("looking up candidate locations: %w", err)
+ }
+ defer rows.Close()
+
+ res := []prioritize.CandidateWithTime{}
+ for rows.Next() {
+ var location string
+ var time time.Time
+ var compressorName string
+ if err := rows.Scan(&location, &time, &compressorName); err != nil {
+ return nil, fmt.Errorf("scanning candidate: %w", err)
+ }
+ res = append(res, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressorName,
+ Location: types.BICLocationReference{Opaque: location},
+ },
+ LastSeen: time,
+ })
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("iterating through locations: %w", err)
+ }
+
+ if len(res) == 0 && v2Output {
+ compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
+ if err != nil {
+ return nil, fmt.Errorf("scanning compressorName: %w", err)
+ }
+ if found {
+ res = append(res, prioritize.CandidateWithTime{
+ Candidate: blobinfocache.BICReplacementCandidate2{
+ Digest: digest,
+ CompressorName: compressor,
+ UnknownLocation: true,
+ Location: types.BICLocationReference{Opaque: ""},
+ },
+ LastSeen: time.Time{},
+ })
+ }
+ }
+ candidates = append(candidates, res...)
+ return candidates, nil
+}
+
+// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
+// that could possibly be reused within the specified (transport scope) (if they still
+// exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
+// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
+// up variants of the blob which have the same uncompressed digest.
+//
+// The CompressorName fields in returned data must never be UnknownCompression.
+func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
+ return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
+}
+
+func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
+ var uncompressedDigest digest.Digest // = ""
+ res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
+ res := []prioritize.CandidateWithTime{}
+ res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
+ if err != nil {
+ return nil, err
+ }
+ if canSubstitute {
+ uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
+ // (In the extreme, we could turn _everything_ this function does into a single query.
+ // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
+ // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
+ rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
+ if err != nil {
+ return nil, fmt.Errorf("querying for other digests: %w", err)
+ }
+ defer rows.Close()
+ for rows.Next() {
+ var otherDigestString string
+ if err := rows.Scan(&otherDigestString); err != nil {
+ return nil, fmt.Errorf("scanning other digest: %w", err)
+ }
+ otherDigest, err := digest.Parse(otherDigestString)
+ if err != nil {
+ return nil, err
+ }
+ if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("iterating through other digests: %w", err)
+ }
+
+ if uncompressedDigest != primaryDigest {
+ res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return res, nil
+ })
+ if err != nil {
+ return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
+}
diff --git a/pkg/blobinfocache/sqlite/sqlite_test.go b/pkg/blobinfocache/sqlite/sqlite_test.go
new file mode 100644
index 0000000..76f6bc6
--- /dev/null
+++ b/pkg/blobinfocache/sqlite/sqlite_test.go
@@ -0,0 +1,25 @@
+package sqlite
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/pkg/blobinfocache/internal/test"
+ "github.com/stretchr/testify/require"
+)
+
+var _ blobinfocache.BlobInfoCache2 = &cache{}
+
+func newTestCache(t *testing.T) blobinfocache.BlobInfoCache2 {
+ dir := t.TempDir()
+ cache, err := new2(filepath.Join(dir, "db.sqlite"))
+ require.NoError(t, err)
+ return cache
+}
+
+func TestNew(t *testing.T) {
+ test.GenericCache(t, newTestCache)
+}
+
+// FIXME: Tests for the various corner cases / failure cases of sqlite.cache should be added here.
diff --git a/pkg/cli/environment/environment.go b/pkg/cli/environment/environment.go
new file mode 100644
index 0000000..1126283
--- /dev/null
+++ b/pkg/cli/environment/environment.go
@@ -0,0 +1,31 @@
+package environment
+
+import (
+ "errors"
+ "os"
+
+ "github.com/containers/image/v5/types"
+)
+
+// UpdateRegistriesConf sets the SystemRegistriesConfPath in the system
+// context, unless already set. Possible values are, in priority and only if
+// set, the CONTAINERS_REGISTRIES_CONF or REGISTRIES_CONFIG_PATH environment
+// variable.
+func UpdateRegistriesConf(sys *types.SystemContext) error {
+ if sys == nil {
+ return errors.New("internal error: UpdateRegistriesConf: nil argument")
+ }
+ if sys.SystemRegistriesConfPath != "" {
+ return nil
+ }
+ if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok {
+ sys.SystemRegistriesConfPath = envOverride
+ return nil
+ }
+ if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok {
+ sys.SystemRegistriesConfPath = envOverride
+ return nil
+ }
+
+ return nil
+}
diff --git a/pkg/cli/passphrase.go b/pkg/cli/passphrase.go
new file mode 100644
index 0000000..c46650c
--- /dev/null
+++ b/pkg/cli/passphrase.go
@@ -0,0 +1,36 @@
+package cli
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+)
+
+// ReadPassphraseFile returns the first line of the specified path.
+// For convenience, an empty string is returned if the path is empty.
+func ReadPassphraseFile(path string) (string, error) {
+ if path == "" {
+ return "", nil
+ }
+
+ logrus.Debugf("Reading user-specified passphrase for signing from %s", path)
+
+ ppf, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer ppf.Close()
+
+ // Read the *first* line in the passphrase file, just as gpg(1) does.
+ buf, err := bufio.NewReader(ppf).ReadBytes('\n')
+ if err != nil && !errors.Is(err, io.EOF) {
+ return "", fmt.Errorf("reading passphrase file: %w", err)
+ }
+
+ return strings.TrimSuffix(string(buf), "\n"), nil
+}
diff --git a/pkg/cli/sigstore/params/sigstore.go b/pkg/cli/sigstore/params/sigstore.go
new file mode 100644
index 0000000..0151b9a
--- /dev/null
+++ b/pkg/cli/sigstore/params/sigstore.go
@@ -0,0 +1,75 @@
+package params
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+
+ "gopkg.in/yaml.v3"
+)
+
+// SigningParameterFile collects parameters used for creating sigstore signatures.
+//
+// To consume such a file, most callers should use c/image/pkg/cli/sigstore instead
+// of dealing with this type explicitly using ParseFile.
+//
+// This type is exported primarily to allow creating parameter files programmatically
+// (and eventually this subpackage should provide an API to convert this type into
+// the appropriate file contents, so that callers don’t need to do that manually).
+type SigningParameterFile struct {
+ // Keep this in sync with docs/containers-sigstore-signing-params.yaml.5.md !
+
+ PrivateKeyFile string `yaml:"privateKeyFile,omitempty"` // If set, sign using a private key stored in this file.
+ PrivateKeyPassphraseFile string `yaml:"privateKeyPassphraseFile,omitempty"` // A file that contains the passprase required for PrivateKeyFile.
+
+ Fulcio *SigningParameterFileFulcio `yaml:"fulcio,omitempty"` // If set, sign using a short-lived key and a Fulcio-issued certificate.
+
+ RekorURL string `yaml:"rekorURL,omitempty"` // If set, upload the signature to the specified Rekor server, and include a log inclusion proof in the signature.
+}
+
+// SigningParameterFileFulcio is a subset of SigningParameterFile dedicated to Fulcio parameters.
+type SigningParameterFileFulcio struct {
+ // Keep this in sync with docs/containers-sigstore-signing-params.yaml.5.md !
+
+ FulcioURL string `yaml:"fulcioURL,omitempty"` // URL of the Fulcio server. Required.
+
+ // How to obtain the OIDC ID token required by Fulcio. Required.
+ OIDCMode OIDCMode `yaml:"oidcMode,omitempty"`
+
+ // oidcMode = staticToken
+ OIDCIDToken string `yaml:"oidcIDToken,omitempty"`
+
+ // oidcMode = deviceGrant || interactive
+ OIDCIssuerURL string `yaml:"oidcIssuerURL,omitempty"` //
+ OIDCClientID string `yaml:"oidcClientID,omitempty"`
+ OIDCClientSecret string `yaml:"oidcClientSecret,omitempty"`
+}
+
+type OIDCMode string
+
+const (
+ // OIDCModeStaticToken means the parameter file contains an user-provided OIDC ID token value.
+ OIDCModeStaticToken OIDCMode = "staticToken"
+ // OIDCModeDeviceGrant specifies the OIDC ID token should be obtained using a device authorization grant (RFC 8628).
+ OIDCModeDeviceGrant OIDCMode = "deviceGrant"
+ // OIDCModeInteractive specifies the OIDC ID token should be obtained interactively (automatically opening a browser,
+ // or interactively prompting the user.)
+ OIDCModeInteractive OIDCMode = "interactive"
+)
+
+// ParseFile parses a SigningParameterFile at the specified path.
+//
+// Most consumers of the parameter file should use c/image/pkg/cli/sigstore to obtain a *signer.Signer instead.
+func ParseFile(path string) (*SigningParameterFile, error) {
+ var res SigningParameterFile
+ source, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("reading %q: %w", path, err)
+ }
+ dec := yaml.NewDecoder(bytes.NewReader(source))
+ dec.KnownFields(true)
+ if err = dec.Decode(&res); err != nil {
+ return nil, fmt.Errorf("parsing %q: %w", path, err)
+ }
+ return &res, nil
+}
diff --git a/pkg/cli/sigstore/sigstore.go b/pkg/cli/sigstore/sigstore.go
new file mode 100644
index 0000000..62520c2
--- /dev/null
+++ b/pkg/cli/sigstore/sigstore.go
@@ -0,0 +1,117 @@
+package sigstore
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+
+ "github.com/containers/image/v5/pkg/cli"
+ "github.com/containers/image/v5/pkg/cli/sigstore/params"
+ "github.com/containers/image/v5/signature/signer"
+ "github.com/containers/image/v5/signature/sigstore"
+ "github.com/containers/image/v5/signature/sigstore/fulcio"
+ "github.com/containers/image/v5/signature/sigstore/rekor"
+)
+
+// Options collects data that the caller should provide to NewSignerFromParameterFile.
+// The caller should set all fields unless documented otherwise.
+type Options struct {
+ PrivateKeyPassphrasePrompt func(keyFile string) (string, error) // A function to call to interactively prompt for a passphrase
+ Stdin io.Reader
+ Stdout io.Writer
+}
+
+// NewSignerFromParameterFile returns a signature.Signer which creates sigstore signatures based a parameter file at the specified path.
+//
+// The caller must call Close() on the returned Signer.
+func NewSignerFromParameterFile(path string, options *Options) (*signer.Signer, error) {
+ params, err := params.ParseFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("setting up signing using parameter file %q: %w", path, err)
+ }
+ return newSignerFromParameterData(params, options)
+}
+
+// newSignerFromParameterData returns a signature.Signer which creates sigstore signatures based on parameter file contents.
+//
+// The caller must call Close() on the returned Signer.
+func newSignerFromParameterData(params *params.SigningParameterFile, options *Options) (*signer.Signer, error) {
+ opts := []sigstore.Option{}
+ if params.PrivateKeyFile != "" {
+ var getPassphrase func(keyFile string) (string, error)
+ switch {
+ case params.PrivateKeyPassphraseFile != "":
+ getPassphrase = func(_ string) (string, error) {
+ return cli.ReadPassphraseFile(params.PrivateKeyPassphraseFile)
+ }
+ case options.PrivateKeyPassphrasePrompt != nil:
+ getPassphrase = options.PrivateKeyPassphrasePrompt
+ default: // This shouldn’t happen, the caller is expected to set options.PrivateKeyPassphrasePrompt
+ return nil, fmt.Errorf("private key %s specified, but no way to get a passphrase", params.PrivateKeyFile)
+ }
+ passphrase, err := getPassphrase(params.PrivateKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts, sigstore.WithPrivateKeyFile(params.PrivateKeyFile, []byte(passphrase)))
+ }
+
+ if params.Fulcio != nil {
+ fulcioOpt, err := fulcioOption(params.Fulcio, options)
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts, fulcioOpt)
+ }
+
+ if params.RekorURL != "" {
+ rekorURL, err := url.Parse(params.RekorURL)
+ if err != nil {
+ return nil, fmt.Errorf("parsing rekorURL %q: %w", params.RekorURL, err)
+ }
+ opts = append(opts, rekor.WithRekor(rekorURL))
+ }
+
+ return sigstore.NewSigner(opts...)
+}
+
+// fulcioOption returns a sigstore.Option for Fulcio use based on f.
+func fulcioOption(f *params.SigningParameterFileFulcio, options *Options) (sigstore.Option, error) {
+ if f.FulcioURL == "" {
+ return nil, errors.New("missing fulcioURL")
+ }
+ fulcioURL, err := url.Parse(f.FulcioURL)
+ if err != nil {
+ return nil, fmt.Errorf("parsing fulcioURL %q: %w", f.FulcioURL, err)
+ }
+
+ if f.OIDCMode == params.OIDCModeStaticToken {
+ if f.OIDCIDToken == "" {
+ return nil, errors.New("missing oidcToken")
+ }
+ return fulcio.WithFulcioAndPreexistingOIDCIDToken(fulcioURL, f.OIDCIDToken), nil
+ }
+
+ if f.OIDCIssuerURL == "" {
+ return nil, errors.New("missing oidcIssuerURL")
+ }
+ oidcIssuerURL, err := url.Parse(f.OIDCIssuerURL)
+ if err != nil {
+ return nil, fmt.Errorf("parsing oidcIssuerURL %q: %w", f.OIDCIssuerURL, err)
+ }
+ switch f.OIDCMode {
+ case params.OIDCModeDeviceGrant:
+ return fulcio.WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL, oidcIssuerURL, f.OIDCClientID, f.OIDCClientSecret,
+ options.Stdout), nil
+ case params.OIDCModeInteractive:
+ return fulcio.WithFulcioAndInteractiveOIDC(fulcioURL, oidcIssuerURL, f.OIDCClientID, f.OIDCClientSecret,
+ options.Stdin, options.Stdout), nil
+ case "":
+ return nil, errors.New("missing oidcMode")
+ case params.OIDCModeStaticToken:
+ return nil, errors.New("internal inconsistency: SigningParameterFileOIDCModeStaticToken was supposed to already be handled")
+ default:
+ return nil, fmt.Errorf("unknown oidcMode value %q", f.OIDCMode)
+ }
+}
diff --git a/pkg/compression/compression.go b/pkg/compression/compression.go
new file mode 100644
index 0000000..4443dda
--- /dev/null
+++ b/pkg/compression/compression.go
@@ -0,0 +1,165 @@
+package compression
+
+import (
+ "bytes"
+ "compress/bzip2"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/pkg/compression/internal"
+ "github.com/containers/image/v5/pkg/compression/types"
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/klauspost/pgzip"
+ "github.com/sirupsen/logrus"
+ "github.com/ulikunitz/xz"
+)
+
+// Algorithm is a compression algorithm that can be used for CompressStream.
+type Algorithm = types.Algorithm
+
+var (
+ // Gzip compression.
+ Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
+ []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
+ // Bzip2 compression.
+ Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
+ []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
+ // Xz compression.
+ Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
+ []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
+ // Zstd compression.
+ Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
+ []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
+ // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
+ ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
+ nil, ZstdDecompressor, compressor.ZstdCompressor)
+
+ compressionAlgorithms = map[string]Algorithm{
+ Gzip.Name(): Gzip,
+ Bzip2.Name(): Bzip2,
+ Xz.Name(): Xz,
+ Zstd.Name(): Zstd,
+ ZstdChunked.Name(): ZstdChunked,
+ }
+)
+
+// AlgorithmByName returns the compressor by its name
+func AlgorithmByName(name string) (Algorithm, error) {
+ algorithm, ok := compressionAlgorithms[name]
+ if ok {
+ return algorithm, nil
+ }
+ return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name)
+}
+
+// DecompressorFunc returns the decompressed stream, given a compressed stream.
+// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
+type DecompressorFunc = internal.DecompressorFunc
+
+// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
+func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
+ return pgzip.NewReader(r)
+}
+
+// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
+func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
+ return io.NopCloser(bzip2.NewReader(r)), nil
+}
+
+// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
+func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
+ r, err := xz.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ return io.NopCloser(r), nil
+}
+
+// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
+func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level != nil {
+ return pgzip.NewWriterLevel(r, *level)
+ }
+ return pgzip.NewWriter(r), nil
+}
+
+// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
+func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ return nil, fmt.Errorf("bzip2 compression not supported")
+}
+
+// xzCompressor is a CompressorFunc for the xz compression algorithm.
+func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ return xz.NewWriter(r)
+}
+
+// CompressStream returns the compressor by its name
+func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
+ m := map[string]string{}
+ return internal.AlgorithmCompressor(algo)(dest, m, level)
+}
+
+// CompressStreamWithMetadata returns the compressor by its name. If the compression
+// generates any metadata, it is written to the provided metadata map.
+func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) {
+ return internal.AlgorithmCompressor(algo)(dest, metadata, level)
+}
+
+// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid
+// value and nil otherwise.
+// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
+func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) {
+ buffer := [8]byte{}
+
+ n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
+ // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
+ return Algorithm{}, nil, nil, err
+ }
+
+ var retAlgo Algorithm
+ var decompressor DecompressorFunc
+ for _, algo := range compressionAlgorithms {
+ prefix := internal.AlgorithmPrefix(algo)
+ if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
+ logrus.Debugf("Detected compression format %s", algo.Name())
+ retAlgo = algo
+ decompressor = internal.AlgorithmDecompressor(algo)
+ break
+ }
+ }
+ if decompressor == nil {
+ logrus.Debugf("No compression detected")
+ }
+
+ return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
+}
+
+// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
+// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
+func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
+ _, d, r, e := DetectCompressionFormat(input)
+ return d, r, e
+}
+
+// AutoDecompress takes a stream and returns an uncompressed version of the
+// same stream.
+// The caller must call Close() on the returned stream (even if the input does not need,
+// or does not even support, closing!).
+func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
+ decompressor, stream, err := DetectCompression(stream)
+ if err != nil {
+ return nil, false, fmt.Errorf("detecting compression: %w", err)
+ }
+ var res io.ReadCloser
+ if decompressor != nil {
+ res, err = decompressor(stream)
+ if err != nil {
+ return nil, false, fmt.Errorf("initializing decompression: %w", err)
+ }
+ } else {
+ res = io.NopCloser(stream)
+ }
+ return res, decompressor != nil, nil
+}
diff --git a/pkg/compression/compression_test.go b/pkg/compression/compression_test.go
new file mode 100644
index 0000000..4eeff95
--- /dev/null
+++ b/pkg/compression/compression_test.go
@@ -0,0 +1,128 @@
+package compression
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDetectCompression(t *testing.T) {
+ cases := []string{
+ "fixtures/Hello.uncompressed",
+ "fixtures/Hello.gz",
+ "fixtures/Hello.bz2",
+ "fixtures/Hello.xz",
+ "fixtures/Hello.zst",
+ }
+
+ // The original stream is preserved.
+ for _, c := range cases {
+ originalContents, err := os.ReadFile(c)
+ require.NoError(t, err, c)
+
+ stream, err := os.Open(c)
+ require.NoError(t, err, c)
+ defer stream.Close()
+
+ _, updatedStream, err := DetectCompression(stream)
+ require.NoError(t, err, c)
+
+ updatedContents, err := io.ReadAll(updatedStream)
+ require.NoError(t, err, c)
+ assert.Equal(t, originalContents, updatedContents, c)
+ }
+
+ // The correct decompressor is chosen, and the result is as expected.
+ for _, c := range cases {
+ stream, err := os.Open(c)
+ require.NoError(t, err, c)
+ defer stream.Close()
+
+ decompressor, updatedStream, err := DetectCompression(stream)
+ require.NoError(t, err, c)
+
+ var uncompressedStream io.Reader
+ if decompressor == nil {
+ uncompressedStream = updatedStream
+ } else {
+ s, err := decompressor(updatedStream)
+ require.NoError(t, err)
+ defer s.Close()
+ uncompressedStream = s
+ }
+
+ uncompressedContents, err := io.ReadAll(uncompressedStream)
+ require.NoError(t, err, c)
+ assert.Equal(t, []byte("Hello"), uncompressedContents, c)
+ }
+
+ // Empty input is handled reasonably.
+ decompressor, updatedStream, err := DetectCompression(bytes.NewReader([]byte{}))
+ require.NoError(t, err)
+ assert.Nil(t, decompressor)
+ updatedContents, err := io.ReadAll(updatedStream)
+ require.NoError(t, err)
+ assert.Equal(t, []byte{}, updatedContents)
+
+ // Error reading input
+ reader, writer := io.Pipe()
+ defer reader.Close()
+ err = writer.CloseWithError(errors.New("Expected error reading input in DetectCompression"))
+ assert.NoError(t, err)
+ _, _, err = DetectCompression(reader)
+ assert.Error(t, err)
+}
+
+func TestAutoDecompress(t *testing.T) {
+ cases := []struct {
+ filename string
+ isCompressed bool
+ }{
+ {"fixtures/Hello.uncompressed", false},
+ {"fixtures/Hello.gz", true},
+ {"fixtures/Hello.bz2", true},
+ {"fixtures/Hello.xz", true},
+ }
+
+ // The correct decompressor is chosen, and the result is as expected.
+ for _, c := range cases {
+ stream, err := os.Open(c.filename)
+ require.NoError(t, err, c.filename)
+ defer stream.Close()
+
+ uncompressedStream, isCompressed, err := AutoDecompress(stream)
+ require.NoError(t, err, c.filename)
+ defer uncompressedStream.Close()
+
+ assert.Equal(t, c.isCompressed, isCompressed)
+
+ uncompressedContents, err := io.ReadAll(uncompressedStream)
+ require.NoError(t, err, c.filename)
+ assert.Equal(t, []byte("Hello"), uncompressedContents, c.filename)
+ }
+
+ // Empty input is handled reasonably.
+ uncompressedStream, isCompressed, err := AutoDecompress(bytes.NewReader([]byte{}))
+ require.NoError(t, err)
+ assert.False(t, isCompressed)
+ uncompressedContents, err := io.ReadAll(uncompressedStream)
+ require.NoError(t, err)
+ assert.Equal(t, []byte{}, uncompressedContents)
+
+ // Error initializing a decompressor (for a detected format)
+ _, _, err = AutoDecompress(bytes.NewReader([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}))
+ assert.Error(t, err)
+
+ // Error reading input
+ reader, writer := io.Pipe()
+ defer reader.Close()
+ err = writer.CloseWithError(errors.New("Expected error reading input in AutoDecompress"))
+ require.NoError(t, err)
+ _, _, err = AutoDecompress(reader)
+ assert.Error(t, err)
+}
diff --git a/pkg/compression/fixtures/Hello.bz2 b/pkg/compression/fixtures/Hello.bz2
new file mode 100644
index 0000000..e822f5e
--- /dev/null
+++ b/pkg/compression/fixtures/Hello.bz2
Binary files differ
diff --git a/pkg/compression/fixtures/Hello.gz b/pkg/compression/fixtures/Hello.gz
new file mode 100644
index 0000000..22c895b
--- /dev/null
+++ b/pkg/compression/fixtures/Hello.gz
Binary files differ
diff --git a/pkg/compression/fixtures/Hello.uncompressed b/pkg/compression/fixtures/Hello.uncompressed
new file mode 100644
index 0000000..5ab2f8a
--- /dev/null
+++ b/pkg/compression/fixtures/Hello.uncompressed
@@ -0,0 +1 @@
+Hello \ No newline at end of file
diff --git a/pkg/compression/fixtures/Hello.xz b/pkg/compression/fixtures/Hello.xz
new file mode 100644
index 0000000..6e9b0b6
--- /dev/null
+++ b/pkg/compression/fixtures/Hello.xz
Binary files differ
diff --git a/pkg/compression/fixtures/Hello.zst b/pkg/compression/fixtures/Hello.zst
new file mode 100644
index 0000000..02770a6
--- /dev/null
+++ b/pkg/compression/fixtures/Hello.zst
Binary files differ
diff --git a/pkg/compression/internal/types.go b/pkg/compression/internal/types.go
new file mode 100644
index 0000000..ba619be
--- /dev/null
+++ b/pkg/compression/internal/types.go
@@ -0,0 +1,65 @@
+package internal
+
+import "io"
+
+// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
+// The caller must call Close() on the stream (even if the input stream does not need closing!).
+type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
+
+// DecompressorFunc returns the decompressed stream, given a compressed stream.
+// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
+type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
+
+// Algorithm is a compression algorithm that can be used for CompressStream.
+type Algorithm struct {
+ name string
+ mime string
+ prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
+ decompressor DecompressorFunc
+ compressor CompressorFunc
+}
+
+// NewAlgorithm creates an Algorithm instance.
+// This function exists so that Algorithm instances can only be created by code that
+// is allowed to import this internal subpackage.
+func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
+ return Algorithm{
+ name: name,
+ mime: mime,
+ prefix: prefix,
+ decompressor: decompressor,
+ compressor: compressor,
+ }
+}
+
+// Name returns the name for the compression algorithm.
+func (c Algorithm) Name() string {
+ return c.name
+}
+
+// InternalUnstableUndocumentedMIMEQuestionMark ???
+// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
+func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
+ return c.mime
+}
+
+// AlgorithmCompressor returns the compressor field of algo.
+// This is a function instead of a public method so that it is only callable by code
+// that is allowed to import this internal subpackage.
+func AlgorithmCompressor(algo Algorithm) CompressorFunc {
+ return algo.compressor
+}
+
+// AlgorithmDecompressor returns the decompressor field of algo.
+// This is a function instead of a public method so that it is only callable by code
+// that is allowed to import this internal subpackage.
+func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
+ return algo.decompressor
+}
+
+// AlgorithmPrefix returns the prefix field of algo.
+// This is a function instead of a public method so that it is only callable by code
+// that is allowed to import this internal subpackage.
+func AlgorithmPrefix(algo Algorithm) []byte {
+ return algo.prefix
+}
diff --git a/pkg/compression/types/types.go b/pkg/compression/types/types.go
new file mode 100644
index 0000000..43d03b6
--- /dev/null
+++ b/pkg/compression/types/types.go
@@ -0,0 +1,41 @@
+package types
+
+import (
+ "github.com/containers/image/v5/pkg/compression/internal"
+)
+
+// DecompressorFunc returns the decompressed stream, given a compressed stream.
+// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
+type DecompressorFunc = internal.DecompressorFunc
+
+// Algorithm is a compression algorithm provided and supported by pkg/compression.
+// It can’t be supplied from the outside.
+type Algorithm = internal.Algorithm
+
+const (
+ // GzipAlgorithmName is the name used by pkg/compression.Gzip.
+ // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ GzipAlgorithmName = "gzip"
+ // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
+ // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ Bzip2AlgorithmName = "bzip2"
+ // XzAlgorithmName is the name used by pkg/compression.Xz.
+ // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ XzAlgorithmName = "Xz"
+ // ZstdAlgorithmName is the name used by pkg/compression.Zstd.
+ // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdAlgorithmName = "zstd"
+ // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
+ // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdChunkedAlgorithmName = "zstd:chunked"
+)
diff --git a/pkg/compression/zstd.go b/pkg/compression/zstd.go
new file mode 100644
index 0000000..39ae014
--- /dev/null
+++ b/pkg/compression/zstd.go
@@ -0,0 +1,59 @@
+package compression
+
+import (
+ "io"
+
+ "github.com/klauspost/compress/zstd"
+)
+
+type wrapperZstdDecoder struct {
+ decoder *zstd.Decoder
+}
+
+func (w *wrapperZstdDecoder) Close() error {
+ w.decoder.Close()
+ return nil
+}
+
+func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) {
+ return w.decoder.DecodeAll(input, dst)
+}
+
+func (w *wrapperZstdDecoder) Read(p []byte) (int, error) {
+ return w.decoder.Read(p)
+}
+
+func (w *wrapperZstdDecoder) Reset(r io.Reader) error {
+ return w.decoder.Reset(r)
+}
+
+func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) {
+ return w.decoder.WriteTo(wr)
+}
+
+func zstdReader(buf io.Reader) (io.ReadCloser, error) {
+ decoder, err := zstd.NewReader(buf)
+ return &wrapperZstdDecoder{decoder: decoder}, err
+}
+
+func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
+ return zstd.NewWriter(dest)
+}
+
+func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
+ el := zstd.EncoderLevelFromZstd(level)
+ return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
+}
+
+// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
+func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level == nil {
+ return zstdWriter(r)
+ }
+ return zstdWriterWithLevel(r, *level)
+}
+
+// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm.
+func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) {
+ return zstdReader(r)
+}
diff --git a/pkg/docker/config/config.go b/pkg/docker/config/config.go
new file mode 100644
index 0000000..c61065c
--- /dev/null
+++ b/pkg/docker/config/config.go
@@ -0,0 +1,943 @@
+package config
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/ioutils"
+ helperclient "github.com/docker/docker-credential-helpers/client"
+ "github.com/docker/docker-credential-helpers/credentials"
+ "github.com/hashicorp/go-multierror"
+ "github.com/sirupsen/logrus"
+)
+
+type dockerAuthConfig struct {
+ Auth string `json:"auth,omitempty"`
+ IdentityToken string `json:"identitytoken,omitempty"`
+}
+
+type dockerConfigFile struct {
+ AuthConfigs map[string]dockerAuthConfig `json:"auths"`
+ CredHelpers map[string]string `json:"credHelpers,omitempty"`
+}
+
+var (
+ defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
+ xdgConfigHomePath = filepath.FromSlash("containers/auth.json")
+ xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
+ dockerHomePath = filepath.FromSlash(".docker/config.json")
+ dockerLegacyHomePath = ".dockercfg"
+ nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json")
+
+ // ErrNotLoggedIn is returned for users not logged into a registry
+ // that they are trying to logout of
+ ErrNotLoggedIn = errors.New("not logged in")
+ // ErrNotSupported is returned for unsupported methods
+ ErrNotSupported = errors.New("not supported")
+)
+
+// authPath combines a path to a file with container registry credentials,
+// along with expected properties of that path (currently just whether it's
+// legacy format or not).
+type authPath struct {
+ path string
+ legacyFormat bool
+}
+
+// newAuthPathDefault constructs an authPath in non-legacy format.
+func newAuthPathDefault(path string) authPath {
+ return authPath{path: path, legacyFormat: false}
+}
+
+// GetAllCredentials returns the registry credentials for all registries stored
+// in any of the configured credential helpers.
+func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
+ // To keep things simple, let's first extract all registries from all
+ // possible sources, and then call `GetCredentials` on them. That
+ // prevents us from having to reverse engineer the logic in
+ // `GetCredentials`.
+ allKeys := set.New[string]()
+
+ // To use GetCredentials, we must at least convert the URL forms into host names.
+ // While we're at it, we’ll also canonicalize docker.io to the standard format.
+ normalizedDockerIORegistry := normalizeRegistry("docker.io")
+
+ helpers, err := sysregistriesv2.CredentialHelpers(sys)
+ if err != nil {
+ return nil, err
+ }
+ for _, helper := range helpers {
+ switch helper {
+ // Special-case the built-in helper for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ for _, path := range getAuthFilePaths(sys, homedir.Get()) {
+ // parse returns an empty map in case the path doesn't exist.
+ fileContents, err := path.parse()
+ if err != nil {
+ return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
+ }
+ // Credential helpers in the auth file have a
+ // direct mapping to a registry, so we can just
+ // walk the map.
+ for registry := range fileContents.CredHelpers {
+ allKeys.Add(registry)
+ }
+ for key := range fileContents.AuthConfigs {
+ key := normalizeAuthFileKey(key, path.legacyFormat)
+ if key == normalizedDockerIORegistry {
+ key = "docker.io"
+ }
+ allKeys.Add(key)
+ }
+ }
+ // External helpers.
+ default:
+ creds, err := listCredsInCredHelper(helper)
+ if err != nil {
+ logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
+ if errors.Is(err, exec.ErrNotFound) {
+ creds = nil // It's okay if the helper doesn't exist.
+ } else {
+ return nil, err
+ }
+ }
+ for registry := range creds {
+ allKeys.Add(registry)
+ }
+ }
+ }
+
+ // Now use `GetCredentials` to the specific auth configs for each
+ // previously listed registry.
+ allCreds := make(map[string]types.DockerAuthConfig)
+ for _, key := range allKeys.Values() {
+ creds, err := GetCredentials(sys, key)
+ if err != nil {
+ // Note: we rely on the logging in `GetCredentials`.
+ return nil, err
+ }
+ if creds != (types.DockerAuthConfig{}) {
+ allCreds[key] = creds
+ }
+ }
+
+ return allCreds, nil
+}
+
+// getAuthFilePaths returns a slice of authPaths based on the system context
+// in the order they should be searched. Note that some paths may not exist.
+// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden
+// by tests.
+func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
+ paths := []authPath{}
+ pathToAuth, userSpecifiedPath, err := getPathToAuth(sys)
+ if err == nil {
+ paths = append(paths, pathToAuth)
+ } else {
+ // Error means that the path set for XDG_RUNTIME_DIR does not exist
+ // but we don't want to completely fail in the case that the user is pulling a public image
+ // Logging the error as a warning instead and moving on to pulling the image
+ logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
+ }
+ if !userSpecifiedPath {
+ xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
+ if xdgCfgHome == "" {
+ xdgCfgHome = filepath.Join(homeDir, ".config")
+ }
+ paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath)))
+ if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
+ paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json")))
+ } else {
+ paths = append(paths,
+ newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)),
+ )
+ }
+ paths = append(paths,
+ authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
+ )
+ }
+ return paths
+}
+
+// GetCredentials returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
+//
+// GetCredentialsForRef should almost always be used in favor of this API.
+func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) {
+ return getCredentialsWithHomeDir(sys, key, homedir.Get())
+}
+
+// GetCredentialsForRef returns the registry credentials necessary for
+// accessing ref on the registry ref points to,
+// appropriate for sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
+ return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get())
+}
+
+// getCredentialsWithHomeDir is an internal implementation detail of
+// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
+// with an artificial home directory.
+func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) {
+ _, err := validateKey(key)
+ if err != nil {
+ return types.DockerAuthConfig{}, err
+ }
+
+ if sys != nil && sys.DockerAuthConfig != nil {
+ logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key)
+ return *sys.DockerAuthConfig, nil
+ }
+
+ var registry string // We compute this once because it is used in several places.
+ if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 {
+ registry = key[:firstSlash]
+ } else {
+ registry = key
+ }
+
+ // Anonymous function to query credentials from auth files.
+ getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
+ for _, path := range getAuthFilePaths(sys, homeDir) {
+ creds, err := findCredentialsInFile(key, registry, path)
+ if err != nil {
+ return types.DockerAuthConfig{}, "", err
+ }
+
+ if creds != (types.DockerAuthConfig{}) {
+ return creds, path.path, nil
+ }
+ }
+ return types.DockerAuthConfig{}, "", nil
+ }
+
+ helpers, err := sysregistriesv2.CredentialHelpers(sys)
+ if err != nil {
+ return types.DockerAuthConfig{}, err
+ }
+
+ var multiErr error
+ for _, helper := range helpers {
+ var (
+ creds types.DockerAuthConfig
+ helperKey string
+ credHelperPath string
+ err error
+ )
+ switch helper {
+ // Special-case the built-in helper for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ helperKey = key
+ creds, credHelperPath, err = getCredentialsFromAuthFiles()
+ // External helpers.
+ default:
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers, but a "registry" is a valid parent of "key".
+ helperKey = registry
+ creds, err = getCredsFromCredHelper(helper, registry)
+ }
+ if err != nil {
+ logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
+ multiErr = multierror.Append(multiErr, err)
+ continue
+ }
+ if creds != (types.DockerAuthConfig{}) {
+ msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper)
+ if credHelperPath != "" {
+ msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
+ }
+ logrus.Debug(msg)
+ return creds, nil
+ }
+ }
+ if multiErr != nil {
+ return types.DockerAuthConfig{}, multiErr
+ }
+
+ logrus.Debugf("No credentials for %s found", key)
+ return types.DockerAuthConfig{}, nil
+}
+
+// GetAuthentication returns the registry credentials matching key, appropriate for
+// sys and the users’ configuration.
+// If an entry is not found, an empty struct is returned.
+// A valid key is a repository, a namespace within a registry, or a registry hostname.
+//
+// Deprecated: This API only has support for username and password. To get the
+// support for oauth2 in container registry authentication, we added the new
+// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to
+// maintain backward compatibility.
+func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) {
+ return getAuthenticationWithHomeDir(sys, key, homedir.Get())
+}
+
+// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
+// it exists only to allow testing it with an artificial home directory.
+func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
+ creds, err := getCredentialsWithHomeDir(sys, key, homeDir)
+ if err != nil {
+ return "", "", err
+ }
+ if creds.IdentityToken != "" {
+ return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
+ }
+ return creds.Username, creds.Password, nil
+}
+
+// SetCredentials stores the username and password in a location
+// appropriate for sys and the users’ configuration.
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
+// Returns a human-readable description of the location that was updated.
+// NOTE: The return value is only intended to be read by humans; its form is not an API,
+// it may change (or new forms can be added) any time.
+func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
+ helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
+ if err != nil {
+ return "", err
+ }
+
+ // Make sure to collect all errors.
+ var multiErr error
+ for _, helper := range helpers {
+ var desc string
+ var err error
+ switch helper {
+ // Special-case the built-in helpers for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ if ch, exists := fileContents.CredHelpers[key]; exists {
+ if isNamespaced {
+ return false, "", unsupportedNamespaceErr(ch)
+ }
+ desc, err := setCredsInCredHelper(ch, key, username, password)
+ if err != nil {
+ return false, "", err
+ }
+ return false, desc, nil
+ }
+ creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
+ newCreds := dockerAuthConfig{Auth: creds}
+ fileContents.AuthConfigs[key] = newCreds
+ return true, "", nil
+ })
+ // External helpers.
+ default:
+ if isNamespaced {
+ err = unsupportedNamespaceErr(helper)
+ } else {
+ desc, err = setCredsInCredHelper(helper, key, username, password)
+ }
+ }
+ if err != nil {
+ multiErr = multierror.Append(multiErr, err)
+ logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
+ continue
+ }
+ logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
+ return desc, nil
+ }
+ return "", multiErr
+}
+
+func unsupportedNamespaceErr(helper string) error {
+ return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
+}
+
+// SetAuthentication stores the username and password in the credential helper or file
+// See the documentation of SetCredentials for format of "key"
+func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
+ _, err := SetCredentials(sys, key, username, password)
+ return err
+}
+
+// RemoveAuthentication removes credentials for `key` from all possible
+// sources such as credential helpers and auth files.
+// A valid key is a repository, a namespace within a registry, or a registry hostname;
+// using forms other than just a registry may fail depending on configuration.
+func RemoveAuthentication(sys *types.SystemContext, key string) error {
+ helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
+ if err != nil {
+ return err
+ }
+
+ var multiErr error
+ isLoggedIn := false
+
+ removeFromCredHelper := func(helper string) {
+ if isNamespaced {
+ logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
+ return
+ }
+ err := deleteCredsFromCredHelper(helper, key)
+ if err == nil {
+ logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
+ isLoggedIn = true
+ return
+ }
+ if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
+ logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
+ return
+ }
+ multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
+ }
+
+ for _, helper := range helpers {
+ var err error
+ switch helper {
+ // Special-case the built-in helper for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ if innerHelper, exists := fileContents.CredHelpers[key]; exists {
+ removeFromCredHelper(innerHelper)
+ }
+ if _, ok := fileContents.AuthConfigs[key]; ok {
+ isLoggedIn = true
+ delete(fileContents.AuthConfigs, key)
+ }
+ return true, "", multiErr
+ })
+ if err != nil {
+ multiErr = multierror.Append(multiErr, err)
+ }
+ // External helpers.
+ default:
+ removeFromCredHelper(helper)
+ }
+ }
+
+ if multiErr != nil {
+ return multiErr
+ }
+ if !isLoggedIn {
+ return ErrNotLoggedIn
+ }
+
+ return nil
+}
+
+// RemoveAllAuthentication deletes all the credentials stored in credential
+// helpers and auth files.
+func RemoveAllAuthentication(sys *types.SystemContext) error {
+ helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false)
+ if err != nil {
+ return err
+ }
+
+ var multiErr error
+ for _, helper := range helpers {
+ var err error
+ switch helper {
+ // Special-case the built-in helper for auth files.
+ case sysregistriesv2.AuthenticationFileHelper:
+ _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
+ for registry, helper := range fileContents.CredHelpers {
+ // Helpers in auth files are expected
+ // to exist, so no special treatment
+ // for them.
+ if err := deleteCredsFromCredHelper(helper, registry); err != nil {
+ return false, "", err
+ }
+ }
+ fileContents.CredHelpers = make(map[string]string)
+ fileContents.AuthConfigs = make(map[string]dockerAuthConfig)
+ return true, "", nil
+ })
+ // External helpers.
+ default:
+ var creds map[string]string
+ creds, err = listCredsInCredHelper(helper)
+ if err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ // It's okay if the helper doesn't exist.
+ continue
+ } else {
+ break
+ }
+ }
+ for registry := range creds {
+ err = deleteCredsFromCredHelper(helper, registry)
+ if err != nil {
+ break
+ }
+ }
+ }
+ if err != nil {
+ logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
+ multiErr = multierror.Append(multiErr, err)
+ continue
+ }
+ logrus.Debugf("All credentials removed from credential helper %s", helper)
+ }
+
+ return multiErr
+}
+
+// prepareForEdit processes sys and key (if keyRelevant) to return:
+// - a list of credential helpers
+// - a function which can be used to edit the JSON file
+// - the key value to actually use in credential helpers / JSON
+// - a boolean which is true if key is namespaced (and should not be used with credential helpers).
+func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) {
+ var isNamespaced bool
+ if keyRelevant {
+ ns, err := validateKey(key)
+ if err != nil {
+ return nil, nil, "", false, err
+ }
+ isNamespaced = ns
+ }
+
+ if sys != nil && sys.DockerCompatAuthFilePath != "" {
+ if sys.AuthFilePath != "" {
+ return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
+ }
+ if keyRelevant {
+ if isNamespaced {
+ return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key)
+ }
+ if key == "docker.io" {
+ key = "https://index.docker.io/v1/"
+ }
+ }
+
+ // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them.
+ return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil
+ }
+
+ helpers, err := sysregistriesv2.CredentialHelpers(sys)
+ if err != nil {
+ return nil, nil, "", false, err
+ }
+
+ return helpers, modifyJSON, key, isNamespaced, nil
+}
+
+func listCredsInCredHelper(credHelper string) (map[string]string, error) {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ return helperclient.List(p)
+}
+
+// getPathToAuth gets the path of the auth.json file used for reading and writing credentials,
+// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults)
+func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) {
+ return getPathToAuthWithOS(sys, runtime.GOOS)
+}
+
+// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
+// it exists only to allow testing it with an artificial runtime.GOOS.
+func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) {
+ if sys != nil {
+ if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" {
+ return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
+ }
+ if sys.AuthFilePath != "" {
+ return newAuthPathDefault(sys.AuthFilePath), true, nil
+ }
+ // When reading, we can process auth.json and Docker’s config.json with the same code.
+ // When writing, prepareForEdit chooses an appropriate jsonEditor implementation.
+ if sys.DockerCompatAuthFilePath != "" {
+ return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil
+ }
+ if sys.LegacyFormatAuthFilePath != "" {
+ return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
+ }
+ // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME
+ if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" {
+ return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
+ }
+ }
+ if goOS != "linux" {
+ return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
+ }
+
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+ if runtimeDir != "" {
+ // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway.
+ // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
+ _, err := os.Stat(runtimeDir)
+ if os.IsNotExist(err) {
+ // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
+ // or made a typo while setting the environment variable,
+ // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
+ return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
+ } // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
+ return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil
+ }
+ return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
+}
+
+// parse unmarshals the credentials stored in the auth.json file and returns it
+// or returns an empty dockerConfigFile data structure if auth.json does not exist
+// if the file exists and is empty, this function returns an error.
+func (path authPath) parse() (dockerConfigFile, error) {
+ var fileContents dockerConfigFile
+
+ raw, err := os.ReadFile(path.path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
+ return fileContents, nil
+ }
+ return dockerConfigFile{}, err
+ }
+
+ if path.legacyFormat {
+ if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil {
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
+ }
+ return fileContents, nil
+ }
+
+ if err = json.Unmarshal(raw, &fileContents); err != nil {
+ return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
+ }
+
+ if fileContents.AuthConfigs == nil {
+ fileContents.AuthConfigs = map[string]dockerAuthConfig{}
+ }
+ if fileContents.CredHelpers == nil {
+ fileContents.CredHelpers = make(map[string]string)
+ }
+
+ return fileContents, nil
+}
+
+// modifyJSON finds an auth.json file, calls editor on the contents, and
+// writes it back if editor returns true.
+// Returns a human-readable description of the file, to be returned by SetCredentials.
+//
+// The editor may also return a human-readable description of the updated location; if it is "",
+// the file itself is used.
+func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
+ path, _, err := getPathToAuth(sys)
+ if err != nil {
+ return "", err
+ }
+ if path.legacyFormat {
+ return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path)
+ }
+
+ dir := filepath.Dir(path.path)
+ if err = os.MkdirAll(dir, 0700); err != nil {
+ return "", err
+ }
+
+ fileContents, err := path.parse()
+ if err != nil {
+ return "", fmt.Errorf("reading JSON file %q: %w", path.path, err)
+ }
+
+ updated, description, err := editor(&fileContents)
+ if err != nil {
+ return "", fmt.Errorf("updating %q: %w", path.path, err)
+ }
+ if updated {
+ newData, err := json.MarshalIndent(fileContents, "", "\t")
+ if err != nil {
+ return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err)
+ }
+
+ if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil {
+ return "", fmt.Errorf("writing to file %q: %w", path.path, err)
+ }
+ }
+
+ if description == "" {
+ description = path.path
+ }
+ return description, nil
+}
+
+// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and
+// writes it back if editor returns true.
+// Returns a human-readable description of the file, to be returned by SetCredentials.
+//
+// The editor may also return a human-readable description of the updated location; if it is "",
+// the file itself is used.
+func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
+ if sys == nil || sys.DockerCompatAuthFilePath == "" {
+ return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set")
+ }
+ path := sys.DockerCompatAuthFilePath
+
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return "", err
+ }
+
+ // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions.
+ var rawContents map[string]json.RawMessage
+ originalBytes, err := os.ReadFile(path)
+ switch {
+ case err == nil:
+ if err := json.Unmarshal(originalBytes, &rawContents); err != nil {
+ return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
+ }
+ case errors.Is(err, fs.ErrNotExist):
+ rawContents = map[string]json.RawMessage{}
+ default: // err != nil
+ return "", err
+ }
+
+ syntheticContents := dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{},
+ CredHelpers: map[string]string{},
+ }
+ // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably
+ // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with
+ // unexpected case.
+ if rawAuths, ok := rawContents["auths"]; ok {
+ // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field
+ // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials).
+ // It might make sense to track which entries of "auths" we actually modified, and to not touch any others.
+ if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil {
+ return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err)
+ }
+ }
+ if rawCH, ok := rawContents["credHelpers"]; ok {
+ if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil {
+ return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err)
+
+ }
+ }
+
+ updated, description, err := editor(&syntheticContents)
+ if err != nil {
+ return "", fmt.Errorf("updating %q: %w", path, err)
+ }
+ if updated {
+ rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t")
+ if err != nil {
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
+ }
+ rawContents["auths"] = rawAuths
+ // We never modify syntheticContents.CredHelpers, so we don’t need to update it.
+ newData, err := json.MarshalIndent(rawContents, "", "\t")
+ if err != nil {
+ return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
+ }
+
+ if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
+ return "", fmt.Errorf("writing to file %q: %w", path, err)
+ }
+ }
+
+ if description == "" {
+ description = path
+ }
+ return description, nil
+}
+
+func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ creds, err := helperclient.Get(p, registry)
+ if err != nil {
+ if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
+ logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper)
+ err = nil
+ }
+ return types.DockerAuthConfig{}, err
+ }
+
+ switch creds.Username {
+ case "<token>":
+ return types.DockerAuthConfig{
+ IdentityToken: creds.Secret,
+ }, nil
+ default:
+ return types.DockerAuthConfig{
+ Username: creds.Username,
+ Password: creds.Secret,
+ }, nil
+ }
+}
+
+// setCredsInCredHelper stores (username, password) for registry in credHelper.
+// Returns a human-readable description of the destination, to be returned by SetCredentials.
+func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ creds := &credentials.Credentials{
+ ServerURL: registry,
+ Username: username,
+ Secret: password,
+ }
+ if err := helperclient.Store(p, creds); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("credential helper: %s", credHelper), nil
+}
+
+func deleteCredsFromCredHelper(credHelper, registry string) error {
+ helperName := fmt.Sprintf("docker-credential-%s", credHelper)
+ p := helperclient.NewShellProgramFunc(helperName)
+ return helperclient.Erase(p, registry)
+}
+
+// findCredentialsInFile looks for credentials matching "key"
+// (which is "registry" or a namespace in "registry") in "path".
+func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) {
+ fileContents, err := path.parse()
+ if err != nil {
+ return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err)
+ }
+
+ // First try cred helpers. They should always be normalized.
+ // This intentionally uses "registry", not "key"; we don't support namespaced
+ // credentials in helpers.
+ if ch, exists := fileContents.CredHelpers[registry]; exists {
+ logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path)
+ return getCredsFromCredHelper(ch, registry)
+ }
+
+ // Support sub-registry namespaces in auth.
+ // (This is not a feature of ~/.docker/config.json; we support it even for
+ // those files as an extension.)
+ var keys []string
+ if !path.legacyFormat {
+ keys = authKeysForKey(key)
+ } else {
+ keys = []string{registry}
+ }
+
+ // Repo or namespace keys are only supported as exact matches. For registry
+ // keys we prefer exact matches as well.
+ for _, key := range keys {
+ if val, exists := fileContents.AuthConfigs[key]; exists {
+ return decodeDockerAuth(path.path, key, val)
+ }
+ }
+
+ // bad luck; let's normalize the entries first
+ // This primarily happens for legacyFormat, which for a time used API URLs
+ // (http[s:]//…/v1/) as keys.
+ // Secondarily, (docker login) accepted URLs with no normalization for
+ // several years, and matched registry hostnames against that, so support
+ // those entries even in non-legacyFormat ~/.docker/config.json.
+ // The docker.io registry still uses the /v1/ key with a special host name,
+ // so account for that as well.
+ registry = normalizeRegistry(registry)
+ for k, v := range fileContents.AuthConfigs {
+ if normalizeAuthFileKey(k, path.legacyFormat) == registry {
+ return decodeDockerAuth(path.path, k, v)
+ }
+ }
+
+ // Only log this if we found nothing; getCredentialsWithHomeDir logs the
+ // source of found data.
+ logrus.Debugf("No credentials matching %s found in %s", key, path.path)
+ return types.DockerAuthConfig{}, nil
+}
+
+// authKeysForKey returns the keys matching a provided auth file key, in order
+// from the best match to worst. For example,
+// when given a repository key "quay.io/repo/ns/image", it returns
+// - quay.io/repo/ns/image
+// - quay.io/repo/ns
+// - quay.io/repo
+// - quay.io
+func authKeysForKey(key string) (res []string) {
+ for {
+ res = append(res, key)
+
+ lastSlash := strings.LastIndex(key, "/")
+ if lastSlash == -1 {
+ break
+ }
+ key = key[:lastSlash]
+ }
+
+ return res
+}
+
+// decodeDockerAuth decodes the username and password from conf,
+// which is entry key in path.
+func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
+ decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
+ if err != nil {
+ return types.DockerAuthConfig{}, err
+ }
+
+ user, passwordPart, valid := strings.Cut(string(decoded), ":")
+ if !valid {
+ // if it's invalid just skip, as docker does
+ if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
+ logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
+ } else {
+ logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
+ }
+ return types.DockerAuthConfig{}, nil
+ }
+
+ password := strings.Trim(passwordPart, "\x00")
+ return types.DockerAuthConfig{
+ Username: user,
+ Password: password,
+ IdentityToken: conf.IdentityToken,
+ }, nil
+}
+
+// normalizeAuthFileKey takes a key, converts it to a host name and normalizes
+// the resulting registry.
+func normalizeAuthFileKey(key string, legacyFormat bool) string {
+ stripped := strings.TrimPrefix(key, "http://")
+ stripped = strings.TrimPrefix(stripped, "https://")
+
+ if legacyFormat || stripped != key {
+ stripped, _, _ = strings.Cut(stripped, "/")
+ }
+
+ return normalizeRegistry(stripped)
+}
+
+// normalizeRegistry converts the provided registry if a known docker.io host
+// is provided.
+func normalizeRegistry(registry string) string {
+ switch registry {
+ case "registry-1.docker.io", "docker.io":
+ return "index.docker.io"
+ }
+ return registry
+}
+
+// validateKey verifies that the input key does not have a prefix that is not
+// allowed and returns an indicator if the key is namespaced.
+func validateKey(key string) (bool, error) {
+ if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
+ return false, fmt.Errorf("key %s contains http[s]:// prefix", key)
+ }
+
+ // Ideally this should only accept explicitly valid keys, compare
+ // validateIdentityRemappingPrefix. For now, just reject values that look
+ // like tagged or digested values.
+ if strings.ContainsRune(key, '@') {
+ return false, fmt.Errorf(`key %s contains a '@' character`, key)
+ }
+
+ firstSlash := strings.IndexRune(key, '/')
+ isNamespaced := firstSlash != -1
+ // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo.
+ if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') {
+ return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key)
+ }
+ // check if the provided key contains one or more subpaths.
+ return isNamespaced, nil
+}
diff --git a/pkg/docker/config/config_test.go b/pkg/docker/config/config_test.go
new file mode 100644
index 0000000..5b5402e
--- /dev/null
+++ b/pkg/docker/config/config_test.go
@@ -0,0 +1,1059 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ dockerReference "github.com/distribution/reference"
+ "github.com/docker/cli/cli/config"
+ configtypes "github.com/docker/cli/cli/config/types"
+ "github.com/docker/docker/registry"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetPathToAuth(t *testing.T) {
+ const linux = "linux"
+ const darwin = "darwin"
+ const freebsd = "freebsd"
+
+ uid := fmt.Sprintf("%d", os.Getuid())
+ // We don’t have to override the home directory for this because use of this path does not depend
+ // on any state of the filesystem.
+ darwinDefault := filepath.Join(os.Getenv("HOME"), ".config", "containers", "auth.json")
+
+ tmpDir := t.TempDir()
+
+ for caseIndex, c := range []struct {
+ sys *types.SystemContext
+ os string
+ xrd string
+ expected string
+ legacyFormat bool
+ expectedUserSpecified bool
+ }{
+ // Default paths
+ {&types.SystemContext{}, linux, "", "/run/containers/" + uid + "/auth.json", false, false},
+ {&types.SystemContext{}, darwin, "", darwinDefault, false, false},
+ {&types.SystemContext{}, freebsd, "", darwinDefault, false, false},
+ {nil, linux, "", "/run/containers/" + uid + "/auth.json", false, false},
+ {nil, darwin, "", darwinDefault, false, false},
+ {nil, freebsd, "", darwinDefault, false, false},
+ // SystemContext overrides
+ {&types.SystemContext{AuthFilePath: "/absolute/path"}, linux, "", "/absolute/path", false, true},
+ {&types.SystemContext{AuthFilePath: "/absolute/path"}, darwin, "", "/absolute/path", false, true},
+ {&types.SystemContext{AuthFilePath: "/absolute/path"}, freebsd, "", "/absolute/path", false, true},
+ {&types.SystemContext{LegacyFormatAuthFilePath: "/absolute/path"}, linux, "", "/absolute/path", true, true},
+ {&types.SystemContext{LegacyFormatAuthFilePath: "/absolute/path"}, darwin, "", "/absolute/path", true, true},
+ {&types.SystemContext{LegacyFormatAuthFilePath: "/absolute/path"}, freebsd, "", "/absolute/path", true, true},
+ {&types.SystemContext{RootForImplicitAbsolutePaths: "/prefix"}, linux, "", "/prefix/run/containers/" + uid + "/auth.json", false, false},
+ {&types.SystemContext{RootForImplicitAbsolutePaths: "/prefix"}, darwin, "", darwinDefault, false, false},
+ {&types.SystemContext{RootForImplicitAbsolutePaths: "/prefix"}, freebsd, "", darwinDefault, false, false},
+ // XDG_RUNTIME_DIR defined
+ {nil, linux, tmpDir, tmpDir + "/containers/auth.json", false, false},
+ {nil, darwin, tmpDir, darwinDefault, false, false},
+ {nil, freebsd, tmpDir, darwinDefault, false, false},
+ {nil, linux, tmpDir + "/thisdoesnotexist", "", false, false},
+ {nil, darwin, tmpDir + "/thisdoesnotexist", darwinDefault, false, false},
+ {nil, freebsd, tmpDir + "/thisdoesnotexist", darwinDefault, false, false},
+ } {
+ t.Run(fmt.Sprintf("%d", caseIndex), func(t *testing.T) {
+ // Always use t.Setenv() to ensure XDG_RUNTIME_DIR is restored to the original value after the test.
+ // Then, in cases where the test needs XDG_RUNTIME_DIR unset (not just set to empty), use a raw os.Unsetenv()
+ // to override the situation. (Sadly there isn’t a t.Unsetenv() as of Go 1.17.)
+ t.Setenv("XDG_RUNTIME_DIR", c.xrd)
+ if c.xrd == "" {
+ os.Unsetenv("XDG_RUNTIME_DIR")
+ }
+ res, userSpecified, err := getPathToAuthWithOS(c.sys, c.os)
+ if c.expected == "" {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, authPath{path: c.expected, legacyFormat: c.legacyFormat}, res)
+ assert.Equal(t, c.expectedUserSpecified, userSpecified)
+ }
+ })
+ }
+}
+
+func TestGetAuth(t *testing.T) {
+ tmpXDGRuntimeDir := t.TempDir()
+ t.Logf("using temporary XDG_RUNTIME_DIR directory: %q", tmpXDGRuntimeDir)
+ t.Setenv("XDG_RUNTIME_DIR", tmpXDGRuntimeDir)
+
+ // override PATH for executing credHelper
+ curtDir, err := os.Getwd()
+ require.NoError(t, err)
+ origPath := os.Getenv("PATH")
+ newPath := fmt.Sprintf("%s:%s", filepath.Join(curtDir, "testdata"), origPath)
+ t.Setenv("PATH", newPath)
+ t.Logf("using PATH: %q", newPath)
+
+ tmpHomeDir := t.TempDir()
+ t.Logf("using temporary home directory: %q", tmpHomeDir)
+
+ configDir1 := filepath.Join(tmpXDGRuntimeDir, "containers")
+ if runtime.GOOS != "linux" {
+ configDir1 = filepath.Join(tmpHomeDir, ".config", "containers")
+ }
+ if err := os.MkdirAll(configDir1, 0700); err != nil {
+ t.Fatal(err)
+ }
+ configDir2 := filepath.Join(tmpHomeDir, ".docker")
+ if err := os.MkdirAll(configDir2, 0700); err != nil {
+ t.Fatal(err)
+ }
+ configPaths := [2]string{filepath.Join(configDir1, "auth.json"), filepath.Join(configDir2, "config.json")}
+
+ for _, configPath := range configPaths {
+ for _, tc := range []struct {
+ name string
+ key string
+ path string
+ expected types.DockerAuthConfig
+ sys *types.SystemContext
+ }{
+ {
+ name: "no auth config",
+ key: "index.docker.io",
+ expected: types.DockerAuthConfig{},
+ },
+ {
+ name: "empty hostname",
+ path: filepath.Join("testdata", "example.json"),
+ expected: types.DockerAuthConfig{},
+ },
+ {
+ name: "match one",
+ key: "example.org",
+ path: filepath.Join("testdata", "example.json"),
+ expected: types.DockerAuthConfig{Username: "example", Password: "org"},
+ },
+ {
+ name: "match none",
+ key: "registry.example.org",
+ path: filepath.Join("testdata", "example.json"),
+ expected: types.DockerAuthConfig{},
+ },
+ {
+ name: "match docker.io",
+ key: "docker.io",
+ path: filepath.Join("testdata", "full.json"),
+ expected: types.DockerAuthConfig{Username: "docker", Password: "io"},
+ },
+ {
+ name: "match docker.io normalized",
+ key: "docker.io",
+ path: filepath.Join("testdata", "abnormal.json"),
+ expected: types.DockerAuthConfig{Username: "index", Password: "docker.io"},
+ },
+ {
+ name: "normalize registry",
+ key: "normalize.example.org",
+ path: filepath.Join("testdata", "full.json"),
+ expected: types.DockerAuthConfig{Username: "normalize", Password: "example"},
+ },
+ {
+ name: "match localhost",
+ key: "localhost",
+ path: filepath.Join("testdata", "full.json"),
+ expected: types.DockerAuthConfig{Username: "local", Password: "host"},
+ },
+ {
+ name: "match ip",
+ key: "10.10.30.45:5000",
+ path: filepath.Join("testdata", "full.json"),
+ expected: types.DockerAuthConfig{Username: "10.10", Password: "30.45-5000"},
+ },
+ {
+ name: "match port",
+ key: "localhost:5000",
+ path: filepath.Join("testdata", "abnormal.json"),
+ expected: types.DockerAuthConfig{Username: "local", Password: "host-5000"},
+ },
+ {
+ name: "use system context",
+ key: "example.org",
+ path: filepath.Join("testdata", "example.json"),
+ expected: types.DockerAuthConfig{Username: "foo", Password: "bar"},
+ sys: &types.SystemContext{
+ DockerAuthConfig: &types.DockerAuthConfig{
+ Username: "foo",
+ Password: "bar",
+ },
+ },
+ },
+ {
+ name: "identity token",
+ key: "example.org",
+ path: filepath.Join("testdata", "example_identitytoken.json"),
+ expected: types.DockerAuthConfig{
+ Username: "00000000-0000-0000-0000-000000000000",
+ Password: "",
+ IdentityToken: "some very long identity token",
+ },
+ },
+ {
+ name: "match none (empty.json)",
+ key: "localhost:5000",
+ path: filepath.Join("testdata", "empty.json"),
+ expected: types.DockerAuthConfig{},
+ },
+ {
+ name: "credhelper from registries.conf",
+ key: "registry-a.com",
+ sys: &types.SystemContext{
+ SystemRegistriesConfPath: filepath.Join("testdata", "cred-helper.conf"),
+ SystemRegistriesConfDirPath: filepath.Join("testdata", "IdoNotExist"),
+ },
+ expected: types.DockerAuthConfig{Username: "foo", Password: "bar"},
+ },
+ {
+ name: "identity token credhelper from registries.conf",
+ key: "registry-b.com",
+ sys: &types.SystemContext{
+ SystemRegistriesConfPath: filepath.Join("testdata", "cred-helper.conf"),
+ SystemRegistriesConfDirPath: filepath.Join("testdata", "IdoNotExist"),
+ },
+ expected: types.DockerAuthConfig{IdentityToken: "fizzbuzz"},
+ },
+ {
+ name: "match ref image",
+ key: "example.org/repo/image",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "example", Password: "org"},
+ },
+ {
+ name: "match ref repo",
+ key: "example.org/repo",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "example", Password: "org"},
+ },
+ {
+ name: "match ref host",
+ key: "example.org/image",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "local", Password: "host"},
+ },
+ // Test matching of docker.io/[library/] explicitly, to make sure the docker.io
+ // normalization behavior doesn’t affect the semantics.
+ {
+ name: "docker.io library repo match",
+ key: "docker.io/library/busybox",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "library", Password: "busybox"},
+ },
+ {
+ name: "docker.io library namespace match",
+ key: "docker.io/library/notbusybox",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "library", Password: "other"},
+ },
+ { // This tests that the docker.io/vendor key in auth file is not normalized to docker.io/library/vendor
+ name: "docker.io vendor repo match",
+ key: "docker.io/vendor/product",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "first", Password: "level"},
+ },
+ { // This tests that the docker.io/vendor key in the query is not normalized to docker.io/library/vendor.
+ name: "docker.io vendor namespace match",
+ key: "docker.io/vendor",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "first", Password: "level"},
+ },
+ {
+ name: "docker.io host-only match",
+ key: "docker.io/other-vendor/other-product",
+ path: filepath.Join("testdata", "refpath.json"),
+ expected: types.DockerAuthConfig{Username: "top", Password: "level"},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ if err := os.RemoveAll(configPath); err != nil {
+ t.Fatal(err)
+ }
+
+ if tc.path != "" {
+ contents, err := os.ReadFile(tc.path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(configPath, contents, 0640); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ var sys *types.SystemContext
+ if tc.sys != nil {
+ sys = tc.sys
+ }
+
+ auth, err := getCredentialsWithHomeDir(sys, tc.key, tmpHomeDir)
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected, auth)
+
+ // Verify the previous API also returns data consistent with the current one.
+ username, password, err := getAuthenticationWithHomeDir(sys, tc.key, tmpHomeDir)
+ if tc.expected.IdentityToken != "" {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected.Username, username)
+ assert.Equal(t, tc.expected.Password, password)
+ }
+
+ require.NoError(t, os.RemoveAll(configPath))
+ })
+ }
+ }
+}
+
+func TestGetAuthFromLegacyFile(t *testing.T) {
+ tmpDir := t.TempDir()
+ t.Logf("using temporary home directory: %q", tmpDir)
+
+ configPath := filepath.Join(tmpDir, ".dockercfg")
+ contents, err := os.ReadFile(filepath.Join("testdata", "legacy.json"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tc := range []struct {
+ name string
+ hostname string
+ expected types.DockerAuthConfig
+ }{
+ {
+ name: "ignore schema and path",
+ hostname: "localhost",
+ expected: types.DockerAuthConfig{
+ Username: "local",
+ Password: "host-legacy",
+ },
+ },
+ {
+ name: "normalize registry",
+ hostname: "docker.io",
+ expected: types.DockerAuthConfig{
+ Username: "docker",
+ Password: "io-legacy",
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ if err := os.WriteFile(configPath, contents, 0640); err != nil {
+ t.Fatal(err)
+ }
+
+ auth, err := getCredentialsWithHomeDir(nil, tc.hostname, tmpDir)
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected, auth)
+
+ // Testing for previous APIs
+ username, password, err := getAuthenticationWithHomeDir(nil, tc.hostname, tmpDir)
+ require.NoError(t, err)
+ assert.Equal(t, tc.expected.Username, username)
+ assert.Equal(t, tc.expected.Password, password)
+ })
+ }
+}
+
+func TestGetAuthPreferNewConfig(t *testing.T) {
+ tmpDir := t.TempDir()
+ t.Logf("using temporary home directory: %q", tmpDir)
+
+ configDir := filepath.Join(tmpDir, ".docker")
+ if err := os.Mkdir(configDir, 0750); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, data := range []struct {
+ source string
+ target string
+ }{
+ {
+ source: filepath.Join("testdata", "full.json"),
+ target: filepath.Join(configDir, "config.json"),
+ },
+ {
+ source: filepath.Join("testdata", "legacy.json"),
+ target: filepath.Join(tmpDir, ".dockercfg"),
+ },
+ } {
+ contents, err := os.ReadFile(data.source)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.WriteFile(data.target, contents, 0640); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ auth, err := getCredentialsWithHomeDir(nil, "docker.io", tmpDir)
+ assert.NoError(t, err)
+ assert.Equal(t, "docker", auth.Username)
+ assert.Equal(t, "io", auth.Password)
+}
+
+func TestGetAuthFailsOnBadInput(t *testing.T) {
+ tmpXDGRuntimeDir := t.TempDir()
+ t.Logf("using temporary XDG_RUNTIME_DIR directory: %q", tmpXDGRuntimeDir)
+ t.Setenv("XDG_RUNTIME_DIR", tmpXDGRuntimeDir)
+
+ tmpHomeDir := t.TempDir()
+ t.Logf("using temporary home directory: %q", tmpHomeDir)
+
+ configDir := filepath.Join(tmpXDGRuntimeDir, "containers")
+ if runtime.GOOS != "linux" {
+ configDir = filepath.Join(tmpHomeDir, ".config", "containers")
+ }
+ if err := os.MkdirAll(configDir, 0750); err != nil {
+ t.Fatal(err)
+ }
+ configPath := filepath.Join(configDir, "auth.json")
+
+ // no config file present
+ auth, err := getCredentialsWithHomeDir(nil, "index.docker.io", tmpHomeDir)
+ if err != nil {
+ t.Fatalf("got unexpected error: %#+v", err)
+ }
+ assert.Equal(t, types.DockerAuthConfig{}, auth)
+
+ if err := os.WriteFile(configPath, []byte("Json rocks! Unless it doesn't."), 0640); err != nil {
+ t.Fatalf("failed to write file %q: %v", configPath, err)
+ }
+ _, err = getCredentialsWithHomeDir(nil, "index.docker.io", tmpHomeDir)
+ assert.ErrorContains(t, err, "unmarshaling JSON")
+
+ // remove the invalid config file
+ os.RemoveAll(configPath)
+ // no config file present
+ auth, err = getCredentialsWithHomeDir(nil, "index.docker.io", tmpHomeDir)
+ if err != nil {
+ t.Fatalf("got unexpected error: %#+v", err)
+ }
+ assert.Equal(t, types.DockerAuthConfig{}, auth)
+
+ configPath = filepath.Join(tmpHomeDir, ".dockercfg")
+ if err := os.WriteFile(configPath, []byte("I'm certainly not a json string."), 0640); err != nil {
+ t.Fatalf("failed to write file %q: %v", configPath, err)
+ }
+ _, err = getCredentialsWithHomeDir(nil, "index.docker.io", tmpHomeDir)
+ assert.ErrorContains(t, err, "unmarshaling JSON")
+}
+
+// TestGetCredentialsInteroperability verifies that Docker-created config files can be consumed by GetCredentials.
+func TestGetCredentialsInteroperability(t *testing.T) {
+ const testUser = "some-user"
+ const testPassword = "some-password"
+
+ for _, c := range []struct {
+ loginKey string // or "" for Docker's default. We must special-case that because (docker login docker.io) works, but (docker logout docker.io) doesn't!
+ queryKey string
+ }{
+ {"example.com", "example.com"},
+ {"example.com", "example.com/ns/repo"},
+ {"example.com:8000", "example.com:8000"},
+ {"example.com:8000", "example.com:8000/ns/repo"},
+ {"", "docker.io"},
+ {"", "docker.io/library/busybox"},
+ {"", "docker.io/notlibrary/busybox"},
+ } {
+ configDir := t.TempDir()
+ configPath := filepath.Join(configDir, config.ConfigFileName)
+
+ // Initially, there are no credentials
+ creds, err := GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey)
+ require.NoError(t, err)
+ assert.Equal(t, types.DockerAuthConfig{}, creds)
+
+ // Log in. This is intended to match github.com/docker/cli/command/registry.runLogin
+ serverAddress := c.loginKey
+ if serverAddress == "" {
+ serverAddress = registry.IndexServer
+ }
+ configFile, err := config.Load(configDir)
+ require.NoError(t, err)
+ err = configFile.GetCredentialsStore(serverAddress).Store(configtypes.AuthConfig{
+ ServerAddress: serverAddress,
+ Username: testUser,
+ Password: testPassword,
+ })
+ require.NoError(t, err)
+ // We can find the credentials.
+ creds, err = GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey)
+ require.NoError(t, err)
+ assert.Equal(t, types.DockerAuthConfig{
+ Username: testUser,
+ Password: testPassword,
+ }, creds)
+
+ // Log out. This is intended to match github.com/docker/cli/command/registry.runLogout
+ var regsToLogout []string
+ if c.loginKey == "" {
+ regsToLogout = []string{registry.IndexServer}
+ } else {
+ hostnameAddress := registry.ConvertToHostname(c.loginKey)
+ regsToLogout = []string{c.loginKey, hostnameAddress, "http://" + hostnameAddress, "https://" + hostnameAddress}
+ }
+ succeeded := false
+ for _, r := range regsToLogout {
+ if err := configFile.GetCredentialsStore(r).Erase(r); err == nil {
+ succeeded = true
+ }
+ }
+ require.True(t, succeeded)
+ // We can’t find the credentials any more.
+ creds, err = GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey)
+ require.NoError(t, err)
+ assert.Equal(t, types.DockerAuthConfig{}, creds)
+ }
+}
+
+func TestGetAllCredentials(t *testing.T) {
+ // Create a temporary authentication file.
+ tmpFile, err := os.CreateTemp("", "auth.json.")
+ require.NoError(t, err)
+ authFilePath := tmpFile.Name()
+ defer tmpFile.Close()
+ defer os.Remove(authFilePath)
+ // override PATH for executing credHelper
+ path, err := os.Getwd()
+ require.NoError(t, err)
+ origPath := os.Getenv("PATH")
+ newPath := fmt.Sprintf("%s:%s", filepath.Join(path, "testdata"), origPath)
+ t.Setenv("PATH", newPath)
+ t.Logf("using PATH: %q", newPath)
+ err = os.Chmod(filepath.Join(path, "testdata", "docker-credential-helper-registry"), os.ModePerm)
+ require.NoError(t, err)
+ sys := types.SystemContext{
+ AuthFilePath: authFilePath,
+ SystemRegistriesConfPath: filepath.Join("testdata", "cred-helper-with-auth-files.conf"),
+ SystemRegistriesConfDirPath: filepath.Join("testdata", "IdoNotExist"),
+ }
+
+ // Make sure that we can handle no-creds-found errors.
+ t.Run("no credentials found", func(t *testing.T) {
+ t.Setenv("DOCKER_CONFIG", filepath.Join(path, "testdata"))
+ authConfigs, err := GetAllCredentials(nil)
+ require.NoError(t, err)
+ require.Empty(t, authConfigs)
+ })
+
+ for _, data := range [][]struct {
+ writeKey string
+ expectedKey string
+ username string
+ password string
+ }{
+ { // Basic operation, including a credential helper.
+ {
+ writeKey: "example.org",
+ expectedKey: "example.org",
+ username: "example-user",
+ password: "example-password",
+ },
+ {
+ writeKey: "quay.io",
+ expectedKey: "quay.io",
+ username: "quay-user",
+ password: "quay-password",
+ },
+ {
+ writeKey: "localhost:5000",
+ expectedKey: "localhost:5000",
+ username: "local-user",
+ password: "local-password",
+ },
+ {
+ writeKey: "",
+ expectedKey: "registry-a.com",
+ username: "foo",
+ password: "bar",
+ },
+ },
+ { // docker.io normalization, both namespaced and not
+ {
+ writeKey: "docker.io/vendor",
+ expectedKey: "docker.io/vendor",
+ username: "u1",
+ password: "p1",
+ },
+ {
+ writeKey: "index.docker.io", // Ideally we would even use a HTTPS URL
+ expectedKey: "docker.io",
+ username: "u2",
+ password: "p2",
+ },
+ {
+ writeKey: "",
+ expectedKey: "registry-a.com",
+ username: "foo",
+ password: "bar",
+ },
+ },
+ } {
+ // Write the credentials to the authfile.
+ err := os.WriteFile(authFilePath, []byte{'{', '}'}, 0700)
+ require.NoError(t, err)
+
+ for _, d := range data {
+ if d.writeKey == "" {
+ continue
+ }
+ err := SetAuthentication(&sys, d.writeKey, d.username, d.password)
+ require.NoError(t, err)
+ }
+
+ // Now ask for all credentials and make sure that map includes all
+ // servers and the correct credentials.
+ authConfigs, err := GetAllCredentials(&sys)
+ require.NoError(t, err)
+ require.Equal(t, len(data), len(authConfigs))
+
+ for _, d := range data {
+ conf, exists := authConfigs[d.expectedKey]
+ require.True(t, exists, "%v", d)
+ require.Equal(t, d.username, conf.Username, "%v", d)
+ require.Equal(t, d.password, conf.Password, "%v", d)
+ }
+ }
+}
+
+func TestAuthKeysForKey(t *testing.T) {
+ for _, tc := range []struct {
+ name, input string
+ expected []string
+ }{
+ {
+ name: "a top-level repo",
+ input: "quay.io/image",
+ expected: []string{
+ "quay.io/image",
+ "quay.io",
+ },
+ },
+ {
+ name: "a second-level repo",
+ input: "quay.io/user/image",
+ expected: []string{
+ "quay.io/user/image",
+ "quay.io/user",
+ "quay.io",
+ },
+ },
+ {
+ name: "a deeply-nested repo",
+ input: "quay.io/a/b/c/d/image",
+ expected: []string{
+ "quay.io/a/b/c/d/image",
+ "quay.io/a/b/c/d",
+ "quay.io/a/b/c",
+ "quay.io/a/b",
+ "quay.io/a",
+ "quay.io",
+ },
+ },
+ {
+ name: "docker.io library repo",
+ input: "docker.io/library/busybox",
+ expected: []string{
+ "docker.io/library/busybox",
+ "docker.io/library",
+ "docker.io",
+ },
+ },
+ {
+ name: "docker.io non-library repo",
+ input: "docker.io/vendor/busybox",
+ expected: []string{
+ "docker.io/vendor/busybox",
+ "docker.io/vendor",
+ "docker.io",
+ },
+ },
+ } {
+ result := authKeysForKey(tc.input)
+ require.Equal(t, tc.expected, result, tc.name)
+ }
+}
+
+func TestSetCredentials(t *testing.T) {
+ const (
+ usernamePrefix = "username-"
+ passwordPrefix = "password-"
+ )
+
+ for _, tc := range [][]string{
+ {"quay.io"},
+ {"quay.io/a/b/c/d/image"},
+ {
+ "quay.io/a/b/c",
+ "quay.io/a/b",
+ "quay.io/a",
+ "quay.io",
+ "my-registry.local",
+ "my-registry.local",
+ },
+ {
+ "docker.io",
+ "docker.io/vendor/product",
+ "docker.io/vendor",
+ "docker.io/library/busybox",
+ "docker.io/library",
+ },
+ } {
+ tmpFile, err := os.CreateTemp("", "auth.json.set")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpFile.Name())
+
+ _, err = tmpFile.WriteString("{}")
+ require.NoError(t, err)
+ sys := &types.SystemContext{AuthFilePath: tmpFile.Name()}
+
+ writtenCredentials := map[string]int{}
+ for i, input := range tc {
+ _, err := SetCredentials(
+ sys,
+ input,
+ usernamePrefix+fmt.Sprint(i),
+ passwordPrefix+fmt.Sprint(i),
+ )
+ assert.NoError(t, err)
+ writtenCredentials[input] = i // Possibly overwriting a previous entry
+ }
+
+ // Read the resulting file and verify it contains the expected keys
+ auth, err := newAuthPathDefault(tmpFile.Name()).parse()
+ require.NoError(t, err)
+ assert.Len(t, auth.AuthConfigs, len(writtenCredentials))
+ // auth.AuthConfigs and writtenCredentials are both maps, i.e. their keys are unique;
+ // so A \subset B && len(A) == len(B) implies A == B
+ for key := range writtenCredentials {
+ assert.NotEmpty(t, auth.AuthConfigs[key].Auth)
+ }
+
+ // Verify that the configuration is interpreted as expected
+ for key, i := range writtenCredentials {
+ expected := types.DockerAuthConfig{
+ Username: usernamePrefix + fmt.Sprint(i),
+ Password: passwordPrefix + fmt.Sprint(i),
+ }
+ auth, err := GetCredentials(sys, key)
+ require.NoError(t, err)
+ assert.Equal(t, expected, auth)
+ ref, err := reference.ParseNamed(key)
+ // Full-registry keys and docker.io/top-level-namespace can't be read by GetCredentialsForRef;
+ // We have already tested that above, so ignore that; only verify that the two
+ // return consistent results if both are possible.
+ if err == nil {
+ auth, err := GetCredentialsForRef(sys, ref)
+ require.NoError(t, err)
+ assert.Equal(t, expected, auth, ref.String())
+ }
+ }
+ }
+}
+
+func TestRemoveAuthentication(t *testing.T) {
+ testAuth := dockerAuthConfig{Auth: "ZXhhbXBsZTpvcmc="}
+ for _, tc := range []struct {
+ config dockerConfigFile
+ inputs []string
+ shouldError bool
+ assert func(dockerConfigFile)
+ }{
+ {
+ config: dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{
+ "quay.io": testAuth,
+ },
+ },
+ inputs: []string{"quay.io"},
+ assert: func(auth dockerConfigFile) {
+ assert.Len(t, auth.AuthConfigs, 0)
+ },
+ },
+ {
+ config: dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{
+ "quay.io": testAuth,
+ },
+ },
+ inputs: []string{"quay.io/user/image"},
+ shouldError: true, // not logged in
+ assert: func(auth dockerConfigFile) {
+ assert.Len(t, auth.AuthConfigs, 1)
+ assert.NotEmpty(t, auth.AuthConfigs["quay.io"].Auth)
+ },
+ },
+ {
+ config: dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{
+ "quay.io": testAuth,
+ "my-registry.local": testAuth,
+ },
+ },
+ inputs: []string{"my-registry.local"},
+ assert: func(auth dockerConfigFile) {
+ assert.Len(t, auth.AuthConfigs, 1)
+ assert.NotEmpty(t, auth.AuthConfigs["quay.io"].Auth)
+ },
+ },
+ {
+ config: dockerConfigFile{
+ AuthConfigs: map[string]dockerAuthConfig{
+ "quay.io/a/b/c": testAuth,
+ "quay.io/a/b": testAuth,
+ "quay.io/a": testAuth,
+ "quay.io": testAuth,
+ "my-registry.local": testAuth,
+ },
+ },
+ inputs: []string{
+ "quay.io/a/b",
+ "quay.io",
+ "my-registry.local",
+ },
+ assert: func(auth dockerConfigFile) {
+ assert.Len(t, auth.AuthConfigs, 2)
+ assert.NotEmpty(t, auth.AuthConfigs["quay.io/a/b/c"].Auth)
+ assert.NotEmpty(t, auth.AuthConfigs["quay.io/a"].Auth)
+ },
+ },
+ } {
+ content, err := json.Marshal(&tc.config)
+ require.NoError(t, err)
+
+ tmpFile, err := os.CreateTemp("", "auth.json")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpFile.Name())
+
+ _, err = tmpFile.Write(content)
+ require.NoError(t, err)
+
+ sys := &types.SystemContext{AuthFilePath: tmpFile.Name()}
+
+ for _, input := range tc.inputs {
+ err := RemoveAuthentication(sys, input)
+ if tc.shouldError {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ }
+
+ auth, err := newAuthPathDefault(tmpFile.Name()).parse()
+ require.NoError(t, err)
+
+ tc.assert(auth)
+ }
+}
+
+// TestSetCredentialsInteroperability verifies that our config files can be consumed by Docker.
+func TestSetCredentialsInteroperability(t *testing.T) {
+ const testUser = "some-user"
+ const testPassword = "some-password"
+
+ for _, c := range []struct {
+ loginKey string // or "" for Docker's default. We must special-case that because (docker login docker.io) works, but (docker logout docker.io) doesn't!
+ queryRepo string
+ otherContents bool
+ loginKeyError bool
+ }{
+ {loginKey: "example.com", queryRepo: "example.com/ns/repo"},
+ {loginKey: "example.com:8000", queryRepo: "example.com:8000/ns/repo"},
+ {loginKey: "docker.io", queryRepo: "docker.io/library/busybox"},
+ {loginKey: "docker.io", queryRepo: "docker.io/notlibrary/busybox"},
+ {loginKey: "example.com", queryRepo: "example.com/ns/repo", otherContents: true},
+ {loginKey: "example.com/ns", queryRepo: "example.com/ns/repo", loginKeyError: true},
+ {loginKey: "example.com:8000/ns", queryRepo: "example.com:8000/ns/repo", loginKeyError: true},
+ } {
+ configDir := t.TempDir()
+ configPath := filepath.Join(configDir, config.ConfigFileName)
+
+ // The credential lookups are intended to match github.com/docker/cli/command/image.RunPull .
+ dockerRef, err := dockerReference.ParseNormalizedNamed(c.queryRepo)
+ require.NoError(t, err)
+ dockerRef = dockerReference.TagNameOnly(dockerRef)
+ repoInfo, err := registry.ParseRepositoryInfo(dockerRef)
+ require.NoError(t, err)
+ configKey := repoInfo.Index.Name
+ if repoInfo.Index.Official {
+ configKey = registry.IndexServer
+ }
+
+ if c.otherContents {
+ err := os.WriteFile(configPath, []byte(`{"auths":{"unmodified-domain.example":{"identitytoken":"identity"}},`+
+ `"psFormat":"psFormatValue",`+
+ `"credHelpers":{"helper-domain.example":"helper-name"}`+
+ `}`), 0o700)
+ require.NoError(t, err)
+ }
+
+ // Initially, there are no credentials
+ configFile, err := config.Load(configDir)
+ require.NoError(t, err)
+ creds, err := configFile.GetCredentialsStore(configKey).Get(configKey)
+ require.NoError(t, err)
+ assert.Equal(t, configtypes.AuthConfig{}, creds)
+
+ // Log in.
+ _, err = SetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.loginKey, testUser, testPassword)
+ if c.loginKeyError {
+ assert.Error(t, err)
+ continue
+ }
+ require.NoError(t, err)
+ // We can find the credentials.
+ configFile, err = config.Load(configDir)
+ require.NoError(t, err)
+ creds, err = configFile.GetCredentialsStore(configKey).Get(configKey)
+ require.NoError(t, err)
+ assert.Equal(t, configtypes.AuthConfig{
+ ServerAddress: configKey,
+ Username: testUser,
+ Password: testPassword,
+ }, creds)
+
+ // Log out.
+ err = RemoveAuthentication(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.loginKey)
+ require.NoError(t, err)
+ // We can’t find the credentials any more.
+ configFile, err = config.Load(configDir)
+ require.NoError(t, err)
+ creds, err = configFile.GetCredentialsStore(configKey).Get(configKey)
+ require.NoError(t, err)
+ assert.Equal(t, configtypes.AuthConfig{}, creds)
+
+ if c.otherContents {
+ creds, err = configFile.GetCredentialsStore("unmodified-domain.example").Get("unmodified-domain.example")
+ require.NoError(t, err)
+ assert.Equal(t, configtypes.AuthConfig{
+ ServerAddress: "unmodified-domain.example",
+ IdentityToken: "identity",
+ }, creds)
+ assert.Equal(t, "psFormatValue", configFile.PsFormat)
+ assert.Equal(t, map[string]string{"helper-domain.example": "helper-name"}, configFile.CredentialHelpers)
+ }
+ }
+}
+
+func TestValidateKey(t *testing.T) {
+ // Invalid keys
+ for _, key := range []string{
+ "https://my-registry.local",
+ "host/foo:tag",
+ "host/foo@digest",
+ "localhost:5000/repo:tag",
+ "localhost:5000/repo@digest",
+ } {
+ _, err := validateKey(key)
+ assert.Error(t, err, key)
+ }
+
+ // Valid keys
+ for _, tc := range []struct {
+ key string
+ isNamespaced bool
+ }{
+ {"my-registry.local", false},
+ {"my-registry.local/path", true},
+ {"quay.io/a/b/c/d", true},
+ {"localhost:5000", false},
+ {"localhost:5000/repo", true},
+ } {
+ isNamespaced, err := validateKey(tc.key)
+ require.NoError(t, err, tc.key)
+ assert.Equal(t, tc.isNamespaced, isNamespaced, tc.key)
+ }
+}
+
+func TestSetGetCredentials(t *testing.T) {
+ const (
+ username = "username"
+ password = "password"
+ )
+
+ tmpDir := t.TempDir()
+
+ for _, tc := range []struct {
+ name string
+ set string
+ get string
+ useLegacyFormat bool
+ shouldAuth bool
+ }{
+ {
+ name: "Should match namespace",
+ set: "quay.io/foo",
+ get: "quay.io/foo/a",
+ shouldAuth: true,
+ },
+ {
+ name: "Should match registry if repository provided",
+ set: "quay.io",
+ get: "quay.io/foo",
+ shouldAuth: true,
+ },
+ {
+ name: "Should not match different repository",
+ set: "quay.io/foo",
+ get: "quay.io/bar",
+ shouldAuth: false,
+ },
+ {
+ name: "Should match legacy registry entry (new API)",
+ set: "https://quay.io/v1/",
+ get: "quay.io/foo",
+ shouldAuth: true,
+ },
+ {
+ name: "Should match legacy registry entry (legacy API)",
+ set: "https://quay.io/v1/",
+ get: "quay.io",
+ shouldAuth: true,
+ useLegacyFormat: true,
+ },
+ } {
+ // Create a new empty SystemContext referring an empty auth.json
+ tmpFile, err := os.CreateTemp("", "auth.json-")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpFile.Name())
+
+ sys := &types.SystemContext{}
+ if tc.useLegacyFormat {
+ sys.LegacyFormatAuthFilePath = tmpFile.Name()
+ _, err = tmpFile.WriteString(fmt.Sprintf(
+ `{"%s":{"auth":"dXNlcm5hbWU6cGFzc3dvcmQ="}}`, tc.set,
+ ))
+ } else {
+ sys.AuthFilePath = tmpFile.Name()
+ _, err = tmpFile.WriteString(fmt.Sprintf(
+ `{"auths":{"%s":{"auth":"dXNlcm5hbWU6cGFzc3dvcmQ="}}}`, tc.set,
+ ))
+ }
+ require.NoError(t, err)
+
+ // Try to authenticate against them
+ auth, err := getCredentialsWithHomeDir(sys, tc.get, tmpDir)
+ require.NoError(t, err)
+
+ if tc.shouldAuth {
+ assert.Equal(t, username, auth.Username, tc.name)
+ assert.Equal(t, password, auth.Password, tc.name)
+ } else {
+ assert.Empty(t, auth.Username, tc.name)
+ assert.Empty(t, auth.Password, tc.name)
+ }
+ }
+}
diff --git a/pkg/docker/config/testdata/abnormal.json b/pkg/docker/config/testdata/abnormal.json
new file mode 100644
index 0000000..33afff2
--- /dev/null
+++ b/pkg/docker/config/testdata/abnormal.json
@@ -0,0 +1,22 @@
+{
+ "auths": {
+ "example.org": {
+ "auth": "ZXhhbXBsZTpvcmc="
+ },
+ "https://index.docker.io/v1": {
+ "auth": "aW5kZXg6ZG9ja2VyLmlv"
+ },
+ "https://127.0.0.1:5000": {
+ "auth": "MTI3LjA6MC4xLTUwMDA="
+ },
+ "http://localhost": {
+ "auth": "bG9jYWw6aG9zdA=="
+ },
+ "https://localhost:5001": {
+ "auth": "bG9jYWw6aG9zdC01MDAx"
+ },
+ "localhost:5000": {
+ "auth": "bG9jYWw6aG9zdC01MDAw"
+ }
+ }
+}
diff --git a/pkg/docker/config/testdata/config.json b/pkg/docker/config/testdata/config.json
new file mode 100644
index 0000000..c27ecfa
--- /dev/null
+++ b/pkg/docker/config/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "credHelpers" : {
+ "registry-no-creds.com" : "helper-registry"
+ }
+}
diff --git a/pkg/docker/config/testdata/cred-helper-with-auth-files.conf b/pkg/docker/config/testdata/cred-helper-with-auth-files.conf
new file mode 100644
index 0000000..51da42d
--- /dev/null
+++ b/pkg/docker/config/testdata/cred-helper-with-auth-files.conf
@@ -0,0 +1 @@
+credential-helpers = [ "containers-auth.json", "helper-registry" ]
diff --git a/pkg/docker/config/testdata/cred-helper.conf b/pkg/docker/config/testdata/cred-helper.conf
new file mode 100644
index 0000000..680faab
--- /dev/null
+++ b/pkg/docker/config/testdata/cred-helper.conf
@@ -0,0 +1 @@
+credential-helpers = [ "helper-registry" ]
diff --git a/pkg/docker/config/testdata/docker-credential-helper-registry b/pkg/docker/config/testdata/docker-credential-helper-registry
new file mode 100755
index 0000000..d9dfa78
--- /dev/null
+++ b/pkg/docker/config/testdata/docker-credential-helper-registry
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+case "${1}" in
+ get)
+ read REGISTRY
+ case "${REGISTRY}" in
+ ("registry-a.com") echo "{\"ServerURL\":\"${REGISTRY}\",\"Username\":\"foo\",\"Secret\":\"bar\"}" ;;
+ ("registry-b.com") echo "{\"ServerURL\":\"${REGISTRY}\",\"Username\":\"<token>\",\"Secret\":\"fizzbuzz\"}" ;;
+ ("registry-no-creds.com") echo "credentials not found in native keychain" && exit 1 ;;
+ (*) echo "{}" ;;
+ esac
+ exit 0
+ ;;
+ store)
+ read UNUSED
+ exit 0
+ ;;
+ list)
+ read UNUSED
+ echo "{\"registry-a.com\":\"foo\"}"
+ exit 0
+ ;;
+ *)
+ echo "not implemented"
+ exit 1
+ ;;
+esac
diff --git a/pkg/docker/config/testdata/empty.json b/pkg/docker/config/testdata/empty.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/pkg/docker/config/testdata/empty.json
@@ -0,0 +1 @@
+{}
diff --git a/pkg/docker/config/testdata/example.json b/pkg/docker/config/testdata/example.json
new file mode 100644
index 0000000..58b9beb
--- /dev/null
+++ b/pkg/docker/config/testdata/example.json
@@ -0,0 +1,7 @@
+{
+ "auths": {
+ "example.org": {
+ "auth": "ZXhhbXBsZTpvcmc="
+ }
+ }
+}
diff --git a/pkg/docker/config/testdata/example_identitytoken.json b/pkg/docker/config/testdata/example_identitytoken.json
new file mode 100644
index 0000000..bfcf489
--- /dev/null
+++ b/pkg/docker/config/testdata/example_identitytoken.json
@@ -0,0 +1,8 @@
+{
+ "auths": {
+ "example.org": {
+ "auth": "MDAwMDAwMDAtMDAwMC0wMDAwLTAwMDAtMDAwMDAwMDAwMDAwOg==",
+ "identitytoken": "some very long identity token"
+ }
+ }
+} \ No newline at end of file
diff --git a/pkg/docker/config/testdata/full.json b/pkg/docker/config/testdata/full.json
new file mode 100644
index 0000000..041643e
--- /dev/null
+++ b/pkg/docker/config/testdata/full.json
@@ -0,0 +1,28 @@
+{
+ "auths": {
+ "example.org": {
+ "auth": "ZXhhbXBsZTpvcmc="
+ },
+ "index.docker.io": {
+ "auth": "aW5kZXg6ZG9ja2VyLmlv"
+ },
+ "docker.io": {
+ "auth": "ZG9ja2VyOmlv"
+ },
+ "localhost": {
+ "auth": "bG9jYWw6aG9zdA=="
+ },
+ "localhost:5000": {
+ "auth": "bG9jYWw6aG9zdC01MDAw"
+ },
+ "10.10.30.45": {
+ "auth": "MTAuMTA6MzAuNDU="
+ },
+ "10.10.30.45:5000": {
+ "auth": "MTAuMTA6MzAuNDUtNTAwMA=="
+ },
+ "https://normalize.example.org/v1": {
+ "auth": "bm9ybWFsaXplOmV4YW1wbGU="
+ }
+ }
+}
diff --git a/pkg/docker/config/testdata/legacy.json b/pkg/docker/config/testdata/legacy.json
new file mode 100644
index 0000000..479790a
--- /dev/null
+++ b/pkg/docker/config/testdata/legacy.json
@@ -0,0 +1,8 @@
+{
+ "http://index.docker.io/v1": {
+ "auth": "ZG9ja2VyOmlvLWxlZ2FjeQ=="
+ },
+ "https://localhost/v1": {
+ "auth": "bG9jYWw6aG9zdC1sZWdhY3k="
+ }
+}
diff --git a/pkg/docker/config/testdata/refpath.json b/pkg/docker/config/testdata/refpath.json
new file mode 100644
index 0000000..729aa9c
--- /dev/null
+++ b/pkg/docker/config/testdata/refpath.json
@@ -0,0 +1,10 @@
+{
+ "auths": {
+ "example.org/repo": { "auth": "ZXhhbXBsZTpvcmc=" },
+ "example.org": { "auth": "bG9jYWw6aG9zdA==" },
+ "docker.io/vendor": { "auth": "Zmlyc3Q6bGV2ZWw="},
+ "docker.io": { "auth": "dG9wOmxldmVs"},
+ "docker.io/library/busybox": { "auth": "bGlicmFyeTpidXN5Ym94" },
+ "docker.io/library": { "auth": "bGlicmFyeTpvdGhlcg==" }
+ }
+}
diff --git a/pkg/shortnames/shortnames.go b/pkg/shortnames/shortnames.go
new file mode 100644
index 0000000..a15b2b5
--- /dev/null
+++ b/pkg/shortnames/shortnames.go
@@ -0,0 +1,476 @@
+package shortnames
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/manifoldco/promptui"
+ "github.com/opencontainers/go-digest"
+ "golang.org/x/exp/slices"
+ "golang.org/x/term"
+)
+
+// IsShortName returns true if the specified input is a "short name". A "short
+// name" refers to a container image without a fully-qualified reference, and
+// is hence missing a registry (or domain). Names including a digest are not
+// short names.
+//
+// Examples:
+// - short names: "image:tag", "library/fedora"
+// - not short names: "quay.io/image", "localhost/image:tag",
+// "server.org:5000/lib/image", "image@sha256:..."
+func IsShortName(input string) bool {
+ isShort, _, _ := parseUnnormalizedShortName(input)
+ return isShort
+}
+
+// parseUnnormalizedShortName parses the input and returns if it's short name,
+// the unnormalized reference.Named, and a parsing error.
+func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
+ ref, err := reference.Parse(input)
+ if err != nil {
+ return false, nil, fmt.Errorf("cannot parse input: %q: %w", input, err)
+ }
+
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return true, nil, fmt.Errorf("%q is not a named reference", input)
+ }
+
+ registry := reference.Domain(named)
+ if strings.ContainsAny(registry, ".:") || registry == "localhost" {
+ // A final parse to make sure that docker.io references are correctly
+ // normalized (e.g., docker.io/alpine to docker.io/library/alpine.
+ named, err = reference.ParseNormalizedNamed(input)
+ if err != nil {
+ return false, nil, fmt.Errorf("cannot normalize input: %q: %w", input, err)
+ }
+ return false, named, nil
+ }
+
+ return true, named, nil
+}
+
+// splitUserInput parses the user-specified reference. Namely, it strips off
+// the tag or digest and stores it in the return values so that both can be
+// re-added to a possible resolved alias' or USRs at a later point.
+func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) {
+ tagged, isT := named.(reference.NamedTagged)
+ if isT {
+ isTagged = true
+ tag = tagged.Tag()
+ }
+
+ digested, isD := named.(reference.Digested)
+ if isD {
+ isDigested = true
+ digest = digested.Digest()
+ }
+
+ // Strip off tag/digest if present.
+ normalized = reference.TrimNamed(named)
+
+ return
+}
+
+// Add records the specified name-value pair as a new short-name alias to the
+// user-specific aliases.conf. It may override an existing alias for `name`.
+func Add(ctx *types.SystemContext, name string, value reference.Named) error {
+ isShort, _, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return err
+ }
+ if !isShort {
+ return fmt.Errorf("%q is not a short name", name)
+ }
+ return sysregistriesv2.AddShortNameAlias(ctx, name, value.String())
+}
+
+// Remove clears the short-name alias for the specified name. It throws an
+// error in case name does not exist in the machine-generated
+// short-name-alias.conf. In such case, the alias must be specified in one of
+// the registries.conf files, which is the users' responsibility.
+func Remove(ctx *types.SystemContext, name string) error {
+ isShort, _, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return err
+ }
+ if !isShort {
+ return fmt.Errorf("%q is not a short name", name)
+ }
+ return sysregistriesv2.RemoveShortNameAlias(ctx, name)
+}
+
+// Resolved encapsulates all data for a resolved image name.
+type Resolved struct {
+ PullCandidates []PullCandidate
+
+ userInput reference.Named
+ systemContext *types.SystemContext
+ rationale rationale
+ originDescription string
+}
+
+func (r *Resolved) addCandidate(named reference.Named) {
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
+ r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r})
+}
+
+func (r *Resolved) addCandidateToRecord(named reference.Named) {
+ r.PullCandidates = append(r.PullCandidates, PullCandidate{named, true, r})
+}
+
+// Allows to reason over pull errors and add some context information.
+// Used in (*Resolved).WrapPullError.
+type rationale int
+
+const (
+ // No additional context.
+ rationaleNone rationale = iota
+ // Resolved value is a short-name alias.
+ rationaleAlias
+ // Resolved value has been completed with an Unqualified Search Registry.
+ rationaleUSR
+ // Resolved value has been selected by the user (via the prompt).
+ rationaleUserSelection
+ // Resolved value has been enforced to use Docker Hub (via SystemContext).
+ rationaleEnforcedDockerHub
+)
+
+// Description returns a human-readable description about the resolution
+// process (e.g., short-name alias, unqualified-search registries, etc.).
+// It is meant to be printed before attempting to pull the pull candidates
+// to make the short-name resolution more transparent to user.
+//
+// If the returned string is empty, it is not meant to be printed.
+func (r *Resolved) Description() string {
+ switch r.rationale {
+ case rationaleAlias:
+ return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription)
+ case rationaleUSR:
+ return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription)
+ case rationaleEnforcedDockerHub:
+ return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription)
+ case rationaleUserSelection, rationaleNone:
+ fallthrough
+ default:
+ return ""
+ }
+}
+
+// FormatPullErrors is a convenience function to format errors that occurred
+// while trying to pull all of the resolved pull candidates.
+//
+// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of
+// pull errors must equal the amount of pull candidates.
+func (r *Resolved) FormatPullErrors(pullErrors []error) error {
+ if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) {
+ pullErrors = append(slices.Clone(pullErrors),
+ fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
+ len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
+ }
+
+ switch len(pullErrors) {
+ case 0:
+ return nil
+ case 1:
+ return pullErrors[0]
+ default:
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors)))
+ for _, e := range pullErrors {
+ sb.WriteString("\n * ")
+ sb.WriteString(e.Error())
+ }
+ return errors.New(sb.String())
+ }
+}
+
+// PullCandidate is a resolved name. Once the Value has been used
+// successfully, users MUST call `(*PullCandidate).Record(..)` to possibly
+// record it as a new short-name alias.
+type PullCandidate struct {
+ // Fully-qualified reference with tag or digest.
+ Value reference.Named
+ // Control whether to record it permanently as an alias.
+ record bool
+
+ // Backwards pointer to the Resolved "parent".
+ resolved *Resolved
+}
+
+// Record may store a short-name alias for the PullCandidate.
+func (c *PullCandidate) Record() error {
+ if !c.record {
+ return nil
+ }
+
+ // Strip off tags/digests from name/value.
+ name := reference.TrimNamed(c.resolved.userInput)
+ value := reference.TrimNamed(c.Value)
+
+ if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
+ return fmt.Errorf("recording short-name alias (%q=%q): %w", c.resolved.userInput, c.Value, err)
+ }
+ return nil
+}
+
+// Resolve resolves the specified name to either one or more fully-qualified
+// image references that the short name may be *pulled* from. If the specified
+// name is already a fully-qualified reference (i.e., not a short name), it is
+// returned as is. In case, it's a short name, it's resolved according to the
+// ShortNameMode in the SystemContext (if specified) or in the registries.conf.
+//
+// Note that tags and digests are stripped from the specified name before
+// looking up an alias. Stripped off tags and digests are later on appended to
+// all candidates. If neither tag nor digest is specified, candidates are
+// normalized with the "latest" tag. An error is returned if there is no
+// matching alias and no unqualified-search registries are configured.
+//
+// Note that callers *must* call `(PullCandidate).Record` after a returned
+// item has been pulled successfully; this callback will record a new
+// short-name alias (depending on the specified short-name mode).
+//
+// Furthermore, before attempting to pull callers *should* call
+// `(Resolved).Description` and afterwards use
+// `(Resolved).FormatPullErrors` in case of pull errors.
+func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
+ resolved := &Resolved{}
+
+ // Create a copy of the system context to make it usable beyond this
+ // function call.
+ if ctx != nil {
+ copy := *ctx
+ ctx = &copy
+ }
+ resolved.systemContext = ctx
+
+ // Detect which mode we're running in.
+ mode, err := sysregistriesv2.GetShortNameMode(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Sanity check the short-name mode.
+ switch mode {
+ case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing:
+ // We're good.
+ default:
+ return nil, fmt.Errorf("unsupported short-name mode (%v)", mode)
+ }
+
+ isShort, shortRef, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !isShort { // no short name
+ resolved.addCandidate(shortRef)
+ return resolved, nil
+ }
+
+ // Resolve to docker.io only if enforced by the caller (e.g., Podman's
+ // Docker-compatible REST API).
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, fmt.Errorf("cannot normalize input: %q: %w", name, err)
+ }
+ resolved.addCandidate(named)
+ resolved.rationale = rationaleEnforcedDockerHub
+ resolved.originDescription = "enforced by caller"
+ return resolved, nil
+ }
+
+ // Strip off the tag to normalize the short name for looking it up in
+ // the config files.
+ isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
+ resolved.userInput = shortNameRepo
+
+ // If there's already an alias, use it.
+ namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String())
+ if err != nil {
+ return nil, err
+ }
+
+ // Always use an alias if present.
+ if namedAlias != nil {
+ if isTagged {
+ namedAlias, err = reference.WithTag(namedAlias, tag)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if isDigested {
+ namedAlias, err = reference.WithDigest(namedAlias, digest)
+ if err != nil {
+ return nil, err
+ }
+ }
+ resolved.addCandidate(namedAlias)
+ resolved.rationale = rationaleAlias
+ resolved.originDescription = aliasOriginDescription
+ return resolved, nil
+ }
+
+ resolved.rationale = rationaleUSR
+
+ // Query the registry for unqualified-search registries.
+ unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // Error out if there's no matching alias and no search registries.
+ if len(unqualifiedSearchRegistries) == 0 {
+ if usrConfig != "" {
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig)
+ }
+ return nil, fmt.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name)
+ }
+ resolved.originDescription = usrConfig
+
+ for _, reg := range unqualifiedSearchRegistries {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
+ }
+ resolved.addCandidate(named)
+ }
+
+ // If we're running in disabled, return the candidates without
+ // prompting (and without recording).
+ if mode == types.ShortNameModeDisabled {
+ return resolved, nil
+ }
+
+ // If we have only one candidate, there's no ambiguity.
+ if len(resolved.PullCandidates) == 1 {
+ return resolved, nil
+ }
+
+ // If we don't have a TTY, act according to the mode.
+ if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stdin.Fd())) {
+ switch mode {
+ case types.ShortNameModePermissive:
+ // Permissive falls back to using all candidates.
+ return resolved, nil
+ case types.ShortNameModeEnforcing:
+ // Enforcing errors out without a prompt.
+ return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY")
+ default:
+ // We should not end up here.
+ return nil, fmt.Errorf("unexpected short-name mode (%v) during resolution", mode)
+ }
+ }
+
+ // We have a TTY, and can prompt the user with a selection of all
+ // possible candidates.
+ strCandidates := []string{}
+ for _, candidate := range resolved.PullCandidates {
+ strCandidates = append(strCandidates, candidate.Value.String())
+ }
+ prompt := promptui.Select{
+ Label: "Please select an image",
+ Items: strCandidates,
+ HideHelp: true, // do not show navigation help
+ }
+
+ _, selection, err := prompt.Run()
+ if err != nil {
+ return nil, err
+ }
+
+ named, err := reference.ParseNormalizedNamed(selection)
+ if err != nil {
+ return nil, fmt.Errorf("selection %q is not a valid reference: %w", selection, err)
+ }
+
+ resolved.PullCandidates = nil
+ resolved.addCandidateToRecord(named)
+ resolved.rationale = rationaleUserSelection
+
+ return resolved, nil
+}
+
+// ResolveLocally resolves the specified name to either one or more local
+// images. If the specified name is already a fully-qualified reference (i.e.,
+// not a short name), it is returned as is. In case, it's a short name, the
+// returned slice of named references looks as follows:
+//
+// 1. If present, the short-name alias
+// 2. "localhost/" as used by many container engines such as Podman and Buildah
+// 3. Unqualified-search registries from the registries.conf files
+//
+// Note that tags and digests are stripped from the specified name before
+// looking up an alias. Stripped off tags and digests are later on appended to
+// all candidates. If neither tag nor digest is specified, candidates are
+// normalized with the "latest" tag. The returned slice contains at least one
+// item.
+func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, error) {
+ isShort, shortRef, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !isShort { // no short name
+ named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed
+ return []reference.Named{named}, nil
+ }
+
+ var candidates []reference.Named
+
+ // Complete the candidates with the specified registries.
+ completeCandidates := func(registries []string) ([]reference.Named, error) {
+ for _, reg := range registries {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err)
+ }
+ named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
+ candidates = append(candidates, named)
+ }
+ return candidates, nil
+ }
+
+ if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub {
+ return completeCandidates([]string{"docker.io"})
+ }
+
+ // Strip off the tag to normalize the short name for looking it up in
+ // the config files.
+ isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
+
+ // If there's already an alias, use it.
+ namedAlias, _, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String())
+ if err != nil {
+ return nil, err
+ }
+ if namedAlias != nil {
+ if isTagged {
+ namedAlias, err = reference.WithTag(namedAlias, tag)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if isDigested {
+ namedAlias, err = reference.WithDigest(namedAlias, digest)
+ if err != nil {
+ return nil, err
+ }
+ }
+ namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed
+ candidates = append(candidates, namedAlias)
+ }
+
+ // Query the registry for unqualified-search registries.
+ unqualifiedSearchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Note that "localhost" has precedence over the unqualified-search registries.
+ return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...))
+}
diff --git a/pkg/shortnames/shortnames_test.go b/pkg/shortnames/shortnames_test.go
new file mode 100644
index 0000000..78a9fbf
--- /dev/null
+++ b/pkg/shortnames/shortnames_test.go
@@ -0,0 +1,603 @@
+package shortnames
+
+import (
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsShortName(t *testing.T) {
+ tests := []struct {
+ input string
+ parseUnnormalizedShortName bool
+ mustFail bool
+ }{
+ // SHORT NAMES
+ {"fedora", true, false},
+ {"fedora:latest", true, false},
+ {"library/fedora", true, false},
+ {"library/fedora:latest", true, false},
+ {"busybox@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", true, false},
+ {"busybox:latest@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", true, false},
+ // !SHORT NAMES
+ {"quay.io/fedora", false, false},
+ {"docker.io/fedora", false, false},
+ {"docker.io/library/fedora:latest", false, false},
+ {"localhost/fedora", false, false},
+ {"localhost:5000/fedora:latest", false, false},
+ {"example.foo.this.may.be.garbage.but.maybe.not:1234/fedora:latest", false, false},
+ {"docker.io/library/busybox@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", false, false},
+ {"docker.io/library/busybox:latest@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", false, false},
+ {"docker.io/fedora@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", false, false},
+ // INVALID NAMES
+ {"", false, true},
+ {"$$$", false, true},
+ {"::", false, true},
+ {"docker://quay.io/library/foo:bar", false, true},
+ {" ", false, true},
+ }
+
+ for _, test := range tests {
+ res, _, err := parseUnnormalizedShortName(test.input)
+ if test.mustFail {
+ require.Error(t, err, "%q should not be parseable")
+ continue
+ }
+ require.NoError(t, err, "%q should be parseable")
+ assert.Equal(t, test.parseUnnormalizedShortName, res, "%q", test.input)
+ }
+}
+
+func TestSplitUserInput(t *testing.T) {
+ tests := []struct {
+ input string
+ repo string
+ isTagged bool
+ isDigested bool
+ }{
+ // Neither tags nor digests
+ {"fedora", "fedora", false, false},
+ {"repo/fedora", "repo/fedora", false, false},
+ {"registry.com/fedora", "registry.com/fedora", false, false},
+ // Tags
+ {"fedora:tag", "fedora", true, false},
+ {"repo/fedora:tag", "repo/fedora", true, false},
+ {"registry.com/fedora:latest", "registry.com/fedora", true, false},
+ // Digests
+ {"fedora@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "fedora", false, true},
+ {"repo/fedora@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "repo/fedora", false, true},
+ {"registry.com/fedora@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "registry.com/fedora", false, true},
+ }
+
+ for _, test := range tests {
+ _, ref, err := parseUnnormalizedShortName(test.input)
+ require.NoError(t, err, "%v", test)
+
+ isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(ref)
+ require.NotNil(t, shortNameRepo)
+ normalized := shortNameRepo.String()
+ assert.Equal(t, test.repo, normalized)
+ assert.Equal(t, test.isTagged, isTagged)
+ assert.Equal(t, test.isDigested, isDigested)
+ if isTagged {
+ normalized = normalized + ":" + tag
+ } else if isDigested {
+ normalized = normalized + "@" + digest.String()
+ }
+ assert.Equal(t, test.input, normalized)
+ }
+}
+
+func TestResolve(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ sysResolveToDockerHub := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub: true,
+ }
+
+ _, err = sysregistriesv2.TryUpdatingCache(sys)
+ require.NoError(t, err)
+
+ digest := "@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a"
+
+ tests := []struct {
+ input, value, dockerHubValue string
+ }{
+ { // alias
+ "docker",
+ "docker.io/library/foo:latest",
+ "docker.io/library/docker:latest",
+ },
+ { // alias tagged
+ "docker:tag",
+ "docker.io/library/foo:tag",
+ "docker.io/library/docker:tag",
+ },
+ { // alias digested
+ "docker" + digest,
+ "docker.io/library/foo" + digest,
+ "docker.io/library/docker" + digest,
+ },
+ { // alias with repo
+ "quay/foo",
+ "quay.io/library/foo:latest",
+ "docker.io/quay/foo:latest",
+ },
+ { // alias with repo tagged
+ "quay/foo:tag",
+ "quay.io/library/foo:tag",
+ "docker.io/quay/foo:tag",
+ },
+ { // alias with repo digested
+ "quay/foo" + digest,
+ "quay.io/library/foo" + digest,
+ "docker.io/quay/foo" + digest,
+ },
+ { // alias
+ "example",
+ "example.com/library/foo:latest",
+ "docker.io/library/example:latest",
+ },
+ { // alias with tag
+ "example:tag",
+ "example.com/library/foo:tag",
+ "docker.io/library/example:tag",
+ },
+ { // alias digested
+ "example" + digest,
+ "example.com/library/foo" + digest,
+ "docker.io/library/example" + digest,
+ },
+ { // FQN
+ "registry.com/repo/image",
+ "registry.com/repo/image:latest",
+ "registry.com/repo/image:latest",
+ },
+ { // FQN tagged
+ "registry.com/repo/image:tag",
+ "registry.com/repo/image:tag",
+ "registry.com/repo/image:tag",
+ },
+ { // FQN digested
+ "registry.com/repo/image" + digest,
+ "registry.com/repo/image" + digest,
+ "registry.com/repo/image" + digest,
+ },
+ }
+
+ // All of them should resolve correctly.
+ for _, test := range tests {
+ resolved, err := Resolve(sys, test.input)
+ require.NoError(t, err, "%v", test)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, test.value, resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+ }
+
+ // Now another run with enforcing resolution to Docker Hub.
+ for _, test := range tests {
+ resolved, err := Resolve(sysResolveToDockerHub, test.input)
+ require.NoError(t, err, "%v", test)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, test.dockerHubValue, resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+ }
+
+ // Non-existent alias should return an error as no search registries
+ // are configured in the config.
+ resolved, err := Resolve(sys, "doesnotexist")
+ require.Error(t, err)
+ require.Nil(t, resolved)
+
+ // It'll work though when enforcing resolving to Docker Hub.
+ resolved, err = Resolve(sysResolveToDockerHub, "doesnotexist")
+ require.NoError(t, err)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, "docker.io/library/doesnotexist:latest", resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+
+ // An empty name is not valid.
+ resolved, err = Resolve(sys, "")
+ require.Error(t, err)
+ require.Nil(t, resolved)
+
+ // Invalid input.
+ resolved, err = Resolve(sys, "Invalid#$")
+ require.Error(t, err)
+ require.Nil(t, resolved)
+}
+
+func toNamed(t *testing.T, input string, trim bool) reference.Named {
+ ref, err := reference.Parse(input)
+ require.NoError(t, err)
+ named := ref.(reference.Named)
+ require.NotNil(t, named)
+
+ if trim {
+ named = reference.TrimNamed(named)
+ }
+
+ return named
+}
+
+func addAlias(t *testing.T, sys *types.SystemContext, name string, value string, mustFail bool) {
+ namedValue := toNamed(t, value, false)
+
+ if mustFail {
+ require.Error(t, Add(sys, name, namedValue))
+ } else {
+ require.NoError(t, Add(sys, name, namedValue))
+ }
+}
+
+func removeAlias(t *testing.T, sys *types.SystemContext, name string, mustFail bool, trim bool) {
+ namedName := toNamed(t, name, trim)
+
+ if mustFail {
+ require.Error(t, Remove(sys, namedName.String()))
+ } else {
+ require.NoError(t, Remove(sys, namedName.String()))
+ }
+}
+
+func TestResolveWithDropInConfigs(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ _, err = sysregistriesv2.TryUpdatingCache(sys)
+ require.NoError(t, err)
+
+ tests := []struct {
+ name, value string
+ }{
+ {"docker", "docker.io/library/config1:latest"}, // overridden by config1
+ {"docker:tag", "docker.io/library/config1:tag"},
+ {
+ "docker@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "docker.io/library/config1@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"quay/foo", "quay.io/library/foo:latest"},
+ {"quay/foo:tag", "quay.io/library/foo:tag"},
+ {
+ "quay/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "quay.io/library/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"config1", "config1.com/image:latest"},
+ {"config1:tag", "config1.com/image:tag"},
+ {
+ "config1@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "config1.com/image@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"barz", "barz.com/config2:latest"}, // from config1, overridden by config2
+ {"barz:tag", "barz.com/config2:tag"},
+ {
+ "barz@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "barz.com/config2@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"added1", "aliases.conf/added1:latest"}, // from Add()
+ {"added1:tag", "aliases.conf/added1:tag"},
+ {
+ "added1@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "aliases.conf/added1@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"added2", "aliases.conf/added2:latest"}, // from Add()
+ {"added2:tag", "aliases.conf/added2:tag"},
+ {
+ "added2@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "aliases.conf/added2@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ {"added3", "aliases.conf/added3:latest"}, // from Add()
+ {"added3:tag", "aliases.conf/added3:tag"},
+ {
+ "added3@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ "aliases.conf/added3@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a",
+ },
+ }
+
+ addAlias(t, sys, "added1", "aliases.conf/added1", false)
+ addAlias(t, sys, "added2", "aliases.conf/added2", false)
+ addAlias(t, sys, "added3", "aliases.conf/added3", false)
+
+ // Tags/digests are invalid!
+ addAlias(t, sys, "added3", "aliases.conf/added3:tag", true)
+ addAlias(t, sys, "added3:tag", "aliases.conf/added3", true)
+ addAlias(t, sys, "added3", "aliases.conf/added3@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", true)
+ addAlias(t, sys, "added3@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "aliases.conf/added3", true)
+
+ // All of them should resolve correctly.
+ for _, test := range tests {
+ resolved, err := Resolve(sys, test.name)
+ require.NoError(t, err)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, test.value, resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+ }
+
+ // config1 sets one search registry.
+ resolved, err := Resolve(sys, "doesnotexist")
+ require.NoError(t, err)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, "example-overwrite.com/doesnotexist:latest", resolved.PullCandidates[0].Value.String())
+
+ // An empty name is not valid.
+ resolved, err = Resolve(sys, "")
+ require.Error(t, err)
+ require.Nil(t, resolved)
+
+ // Invalid input.
+ resolved, err = Resolve(sys, "Invalid#$")
+ require.Error(t, err)
+ require.Nil(t, resolved)
+
+ // Fully-qualified input will be returned as is.
+ resolved, err = Resolve(sys, "quay.io/repo/fedora")
+ require.NoError(t, err)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, "quay.io/repo/fedora:latest", resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+
+ resolved, err = Resolve(sys, "localhost/repo/fedora:sometag")
+ require.NoError(t, err)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, 1)
+ assert.Equal(t, "localhost/repo/fedora:sometag", resolved.PullCandidates[0].Value.String())
+ assert.False(t, resolved.PullCandidates[0].record)
+
+ // Now test removal.
+
+ // Stored in aliases.conf, so we can remove it.
+ removeAlias(t, sys, "added1", false, false)
+ removeAlias(t, sys, "added2", false, false)
+ removeAlias(t, sys, "added3", false, false)
+ removeAlias(t, sys, "added2:tag", true, false)
+ removeAlias(t, sys, "added3@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", true, false)
+
+ // Doesn't exist -> error.
+ removeAlias(t, sys, "added1", true, false)
+ removeAlias(t, sys, "added2", true, false)
+ removeAlias(t, sys, "added3", true, false)
+
+ // Cannot remove entries from registries.conf files -> error.
+ removeAlias(t, sys, "docker", true, false)
+ removeAlias(t, sys, "docker", true, false)
+ removeAlias(t, sys, "docker", true, false)
+}
+
+func TestResolveWithVaryingShortNameModes(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ tests := []struct {
+ confPath string
+ mode types.ShortNameMode
+ name string
+ mustFail bool
+ numAliases int
+ }{
+ // Invalid -> error
+ {"testdata/no-reg.conf", types.ShortNameModeInvalid, "repo/image", true, 0},
+ {"testdata/one-reg.conf", types.ShortNameModeInvalid, "repo/image", true, 0},
+ {"testdata/two-reg.conf", types.ShortNameModeInvalid, "repo/image", true, 0},
+ // Permissive + match -> return alias
+ {"testdata/no-reg.conf", types.ShortNameModePermissive, "repo/image", false, 1},
+ {"testdata/one-reg.conf", types.ShortNameModePermissive, "repo/image", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModePermissive, "repo/image", false, 1},
+ // Permissive + no match -> search (no tty)
+ {"testdata/no-reg.conf", types.ShortNameModePermissive, "doesnotexist", true, 0},
+ {"testdata/one-reg.conf", types.ShortNameModePermissive, "doesnotexist", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModePermissive, "doesnotexist", false, 2},
+ // Disabled + match -> return alias
+ {"testdata/no-reg.conf", types.ShortNameModeDisabled, "repo/image", false, 1},
+ {"testdata/one-reg.conf", types.ShortNameModeDisabled, "repo/image", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModeDisabled, "repo/image", false, 1},
+ // Disabled + no match -> search
+ {"testdata/no-reg.conf", types.ShortNameModeDisabled, "doesnotexist", true, 0},
+ {"testdata/one-reg.conf", types.ShortNameModeDisabled, "doesnotexist", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModeDisabled, "doesnotexist", false, 2},
+ // Enforcing + match -> return alias
+ {"testdata/no-reg.conf", types.ShortNameModeEnforcing, "repo/image", false, 1},
+ {"testdata/one-reg.conf", types.ShortNameModeEnforcing, "repo/image", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModeEnforcing, "repo/image", false, 1},
+ // Enforcing + no match -> error if search regs > 1 and no tty
+ {"testdata/no-reg.conf", types.ShortNameModeEnforcing, "doesnotexist", true, 0},
+ {"testdata/one-reg.conf", types.ShortNameModeEnforcing, "doesnotexist", false, 1},
+ {"testdata/two-reg.conf", types.ShortNameModeEnforcing, "doesnotexist", true, 0},
+ }
+
+ for _, test := range tests {
+ sys := &types.SystemContext{
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ // From test
+ SystemRegistriesConfPath: test.confPath,
+ ShortNameMode: &test.mode,
+ }
+
+ _, err := sysregistriesv2.TryUpdatingCache(sys)
+ require.NoError(t, err)
+
+ resolved, err := Resolve(sys, test.name)
+ if test.mustFail {
+ require.Error(t, err, "%v", test)
+ continue
+ }
+ require.NoError(t, err, "%v", test)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, test.numAliases, "%v", test)
+ }
+}
+
+func TestResolveAndRecord(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/two-reg.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ _, err = sysregistriesv2.TryUpdatingCache(sys)
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ expected []string
+ }{
+ // No alias -> USRs
+ {"foo", []string{"quay.io/foo:latest", "registry.com/foo:latest"}},
+ {"foo:tag", []string{"quay.io/foo:tag", "registry.com/foo:tag"}},
+ {"foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", []string{"quay.io/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "registry.com/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a"}},
+ {"repo/foo", []string{"quay.io/repo/foo:latest", "registry.com/repo/foo:latest"}},
+ {"repo/foo:tag", []string{"quay.io/repo/foo:tag", "registry.com/repo/foo:tag"}},
+ {"repo/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", []string{"quay.io/repo/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", "registry.com/repo/foo@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a"}},
+ // Alias
+ {"repo/image", []string{"quay.io/repo/image:latest"}},
+ {"repo/image:tag", []string{"quay.io/repo/image:tag"}},
+ {"repo/image@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a", []string{"quay.io/repo/image@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a"}},
+ }
+ for _, test := range tests {
+ resolved, err := Resolve(sys, test.name)
+ require.NoError(t, err, "%v", test)
+ require.NotNil(t, resolved)
+ require.Len(t, resolved.PullCandidates, len(test.expected), "%v", test)
+
+ for i, candidate := range resolved.PullCandidates {
+ require.Equal(t, test.expected[i], candidate.Value.String(), "%v", test)
+
+ require.False(t, candidate.record, "%v", test)
+ candidate.record = true // make sure we can actually record
+
+ // Record the alias, look it up another time and make
+ // sure there's only one match (i.e., the new alias)
+ // and that is has the expected value.
+ require.NoError(t, candidate.Record())
+
+ newResolved, err := Resolve(sys, test.name)
+ require.NoError(t, err, "%v", test)
+ require.Len(t, newResolved.PullCandidates, 1, "%v", test)
+ require.Equal(t, candidate.Value.String(), newResolved.PullCandidates[0].Value.String(), "%v", test)
+
+ // Now remove the alias again.
+ removeAlias(t, sys, test.name, false, true)
+
+ // Now set recording to false and try recording again.
+ candidate.record = false
+ require.NoError(t, candidate.Record())
+ removeAlias(t, sys, test.name, true, true) // must error out now
+ }
+ }
+}
+
+func TestResolveLocally(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/two-reg.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+ sysResolveToDockerHub := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/two-reg.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub: true,
+ }
+
+ digest := "@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a"
+
+ tests := []struct {
+ input string
+ expectedSys []string
+ expectedSysResolveToDockerHub string
+ }{
+ { // alias match
+ "repo/image",
+ []string{"quay.io/repo/image:latest", "localhost/repo/image:latest", "quay.io/repo/image:latest", "registry.com/repo/image:latest"},
+ "docker.io/repo/image:latest",
+ },
+ { // no alias match
+ "foo",
+ []string{"localhost/foo:latest", "quay.io/foo:latest", "registry.com/foo:latest"},
+ "docker.io/library/foo:latest",
+ },
+ { // no alias match tagged
+ "foo:tag",
+ []string{"localhost/foo:tag", "quay.io/foo:tag", "registry.com/foo:tag"},
+ "docker.io/library/foo:tag",
+ },
+ { // no alias match digested
+ "foo" + digest,
+ []string{"localhost/foo" + digest, "quay.io/foo" + digest, "registry.com/foo" + digest},
+ "docker.io/library/foo" + digest,
+ },
+ { // localhost
+ "localhost/foo",
+ []string{"localhost/foo:latest"},
+ "localhost/foo:latest",
+ },
+ { // localhost tagged
+ "localhost/foo:tag",
+ []string{"localhost/foo:tag"},
+ "localhost/foo:tag",
+ },
+ { // localhost digested
+ "localhost/foo" + digest,
+ []string{"localhost/foo" + digest},
+ "localhost/foo" + digest,
+ },
+ { // non-localhost FQN + digest
+ "registry.com/repo/image" + digest,
+ []string{"registry.com/repo/image" + digest},
+ "registry.com/repo/image" + digest,
+ },
+ }
+
+ for _, test := range tests {
+ aliases, err := ResolveLocally(sys, test.input)
+ require.NoError(t, err)
+ require.Len(t, aliases, len(test.expectedSys))
+ for i := range aliases {
+ assert.Equal(t, test.expectedSys[i], aliases[i].String())
+ }
+
+ // Another run enforcing resolving to Docker Hub.
+ aliases, err = ResolveLocally(sysResolveToDockerHub, test.input)
+ require.NoError(t, err)
+ require.Len(t, aliases, 1)
+ assert.Equal(t, test.expectedSysResolveToDockerHub, aliases[0].String())
+ }
+}
diff --git a/pkg/shortnames/testdata/aliases.conf b/pkg/shortnames/testdata/aliases.conf
new file mode 100644
index 0000000..cb05b27
--- /dev/null
+++ b/pkg/shortnames/testdata/aliases.conf
@@ -0,0 +1,7 @@
+short-name-mode="enforcing"
+
+[aliases]
+docker="docker.io/library/foo"
+"quay/foo"="quay.io/library/foo"
+example="example.com/library/foo"
+empty=""
diff --git a/pkg/shortnames/testdata/no-reg.conf b/pkg/shortnames/testdata/no-reg.conf
new file mode 100644
index 0000000..968f7ec
--- /dev/null
+++ b/pkg/shortnames/testdata/no-reg.conf
@@ -0,0 +1,2 @@
+[aliases]
+"repo/image"="quay.io/repo/image"
diff --git a/pkg/shortnames/testdata/one-reg.conf b/pkg/shortnames/testdata/one-reg.conf
new file mode 100644
index 0000000..e9dfab2
--- /dev/null
+++ b/pkg/shortnames/testdata/one-reg.conf
@@ -0,0 +1,4 @@
+unqualified-search-registries=["quay.io"]
+
+[aliases]
+"repo/image"="quay.io/repo/image"
diff --git a/pkg/shortnames/testdata/registries.conf.d/config-1.conf b/pkg/shortnames/testdata/registries.conf.d/config-1.conf
new file mode 100644
index 0000000..f02e618
--- /dev/null
+++ b/pkg/shortnames/testdata/registries.conf.d/config-1.conf
@@ -0,0 +1,9 @@
+unqualified-search-registries = ["example-overwrite.com"]
+
+[[registry]]
+location = "1.com"
+
+[aliases]
+docker="docker.io/library/config1"
+config1="config1.com/image"
+barz="barz.com/image/config1"
diff --git a/pkg/shortnames/testdata/registries.conf.d/config-2.conf b/pkg/shortnames/testdata/registries.conf.d/config-2.conf
new file mode 100644
index 0000000..7ec82c7
--- /dev/null
+++ b/pkg/shortnames/testdata/registries.conf.d/config-2.conf
@@ -0,0 +1,14 @@
+short-name-mode="permissive"
+
+[[registry]]
+location = "2.com"
+
+[[registry]]
+location = "base.com"
+blocked = true
+
+[aliases]
+config2="config2.com/image"
+barz="barz.com/config2"
+added3="xxx.com/image"
+example=""
diff --git a/pkg/shortnames/testdata/registries.conf.d/config-3.conf b/pkg/shortnames/testdata/registries.conf.d/config-3.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg/shortnames/testdata/registries.conf.d/config-3.conf
diff --git a/pkg/shortnames/testdata/registries.conf.d/config-3.ignore b/pkg/shortnames/testdata/registries.conf.d/config-3.ignore
new file mode 100644
index 0000000..65866fd
--- /dev/null
+++ b/pkg/shortnames/testdata/registries.conf.d/config-3.ignore
@@ -0,0 +1,7 @@
+unqualified-search-registries = ["ignore-example-overwrite.com"]
+
+[[registry]]
+location = "ignore-me-because-i-have-a-wrong-suffix.com"
+
+[aliases]
+ignore="me because i have a wrong suffix"
diff --git a/pkg/shortnames/testdata/two-reg.conf b/pkg/shortnames/testdata/two-reg.conf
new file mode 100644
index 0000000..2ed7829
--- /dev/null
+++ b/pkg/shortnames/testdata/two-reg.conf
@@ -0,0 +1,4 @@
+unqualified-search-registries=["quay.io", "registry.com"]
+
+[aliases]
+"repo/image"="quay.io/repo/image"
diff --git a/pkg/strslice/README.md b/pkg/strslice/README.md
new file mode 100644
index 0000000..ae6097e
--- /dev/null
+++ b/pkg/strslice/README.md
@@ -0,0 +1 @@
+This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
diff --git a/pkg/strslice/strslice.go b/pkg/strslice/strslice.go
new file mode 100644
index 0000000..bad493f
--- /dev/null
+++ b/pkg/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/pkg/strslice/strslice_test.go b/pkg/strslice/strslice_test.go
new file mode 100644
index 0000000..0ef5f4b
--- /dev/null
+++ b/pkg/strslice/strslice_test.go
@@ -0,0 +1,86 @@
+package strslice
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestStrSliceMarshalJSON(t *testing.T) {
+ for _, testcase := range []struct {
+ input StrSlice
+ expected string
+ }{
+ // MADNESS(stevvooe): No clue why nil would be "" but empty would be
+ // "null". Had to make a change here that may affect compatibility.
+ {input: nil, expected: "null"},
+ {StrSlice{}, "[]"},
+ {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`},
+ } {
+ data, err := json.Marshal(testcase.input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(data) != testcase.expected {
+ t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data))
+ }
+ }
+}
+
+func TestStrSliceUnmarshalJSON(t *testing.T) {
+ parts := map[string][]string{
+ "": {"default", "values"},
+ "[]": {},
+ `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
+ }
+ for json, expectedParts := range parts {
+ strs := StrSlice{"default", "values"}
+ if err := strs.UnmarshalJSON([]byte(json)); err != nil {
+ t.Fatal(err)
+ }
+
+ actualParts := []string(strs)
+ if !reflect.DeepEqual(actualParts, expectedParts) {
+ t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts)
+ }
+
+ }
+}
+
+func TestStrSliceUnmarshalString(t *testing.T) {
+ var e StrSlice
+ echo, err := json.Marshal("echo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := json.Unmarshal(echo, &e); err != nil {
+ t.Fatal(err)
+ }
+
+ if len(e) != 1 {
+ t.Fatalf("expected 1 element after unmarshal: %q", e)
+ }
+
+ if e[0] != "echo" {
+ t.Fatalf("expected `echo`, got: %q", e[0])
+ }
+}
+
+func TestStrSliceUnmarshalSlice(t *testing.T) {
+ var e StrSlice
+ echo, err := json.Marshal([]string{"echo"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := json.Unmarshal(echo, &e); err != nil {
+ t.Fatal(err)
+ }
+
+ if len(e) != 1 {
+ t.Fatalf("expected 1 element after unmarshal: %q", e)
+ }
+
+ if e[0] != "echo" {
+ t.Fatalf("expected `echo`, got: %q", e[0])
+ }
+}
diff --git a/pkg/sysregistriesv2/paths_common.go b/pkg/sysregistriesv2/paths_common.go
new file mode 100644
index 0000000..07fe502
--- /dev/null
+++ b/pkg/sysregistriesv2/paths_common.go
@@ -0,0 +1,12 @@
+//go:build !freebsd
+// +build !freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
diff --git a/pkg/sysregistriesv2/paths_freebsd.go b/pkg/sysregistriesv2/paths_freebsd.go
new file mode 100644
index 0000000..741b99f
--- /dev/null
+++ b/pkg/sysregistriesv2/paths_freebsd.go
@@ -0,0 +1,12 @@
+//go:build freebsd
+// +build freebsd
+
+package sysregistriesv2
+
+// builtinRegistriesConfPath is the path to the registry configuration file.
+// DO NOT change this, instead see systemRegistriesConfPath above.
+const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf"
+
+// builtinRegistriesConfDirPath is the path to the registry configuration directory.
+// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
+const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d"
diff --git a/pkg/sysregistriesv2/shortnames.go b/pkg/sysregistriesv2/shortnames.go
new file mode 100644
index 0000000..3a11542
--- /dev/null
+++ b/pkg/sysregistriesv2/shortnames.go
@@ -0,0 +1,350 @@
+package sysregistriesv2
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/rootless"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+)
+
+// defaultShortNameMode is the default mode of registries.conf files if the
+// corresponding field is left empty.
+const defaultShortNameMode = types.ShortNameModePermissive
+
+// userShortNamesFile is the user-specific config file to store aliases.
+var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf")
+
+// shortNameAliasesConfPath returns the path to the machine-generated
+// short-name-aliases.conf file.
+func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
+ if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 {
+ return ctx.UserShortNameAliasConfPath, nil
+ }
+
+ if rootless.GetRootlessEUID() == 0 {
+ // Root user or in a non-conforming user NS
+ return filepath.Join("/var/cache", userShortNamesFile), nil
+ }
+
+ // Rootless user
+ cacheRoot, err := homedir.GetCacheHome()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(cacheRoot, userShortNamesFile), nil
+}
+
+// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the
+// software-maintained `userShortNamesFile`.
+type shortNameAliasConf struct {
+ // A map for aliasing short names to their fully-qualified image
+ // reference counter parts.
+ // Note that Aliases is niled after being loaded from a file.
+ Aliases map[string]string `toml:"aliases"`
+
+ // If you add any field, make sure to update nonempty() below.
+}
+
+// nonempty returns true if config contains at least one configuration entry.
+func (c *shortNameAliasConf) nonempty() bool {
+ copy := *c // A shallow copy
+ if copy.Aliases != nil && len(copy.Aliases) == 0 {
+ copy.Aliases = nil
+ }
+ return !reflect.DeepEqual(copy, shortNameAliasConf{})
+}
+
+// alias combines the parsed value of an alias with the config file it has been
+// specified in. The config file is crucial for an improved user experience
+// such that users are able to resolve potential pull errors.
+type alias struct {
+ // The parsed value of an alias. May be nil if set to "" in a config.
+ value reference.Named
+ // The config file the alias originates from.
+ configOrigin string
+}
+
+// shortNameAliasCache is the result of parsing shortNameAliasConf,
+// pre-processed for faster usage.
+type shortNameAliasCache struct {
+ // Note that an alias value may be nil iff it's set as an empty string
+ // in the config.
+ namedAliases map[string]alias
+}
+
+// ResolveShortNameAlias performs an alias resolution of the specified name.
+// The user-specific short-name-aliases.conf has precedence over aliases in the
+// assembled registries.conf. It returns the possibly resolved alias or nil, a
+// human-readable description of the config where the alias is specified, and
+// an error. The origin of the config file is crucial for an improved user
+// experience such that users are able to resolve potential pull errors.
+// Almost all callers should use pkg/shortnames instead.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) {
+ if err := validateShortName(name); err != nil {
+ return nil, "", err
+ }
+ confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // Acquire the lock as a reader to allow for multiple routines in the
+ // same process space to read simultaneously.
+ lock.RLock()
+ defer lock.Unlock()
+
+ _, aliasCache, err := loadShortNameAliasConf(confPath)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // First look up the short-name-aliases.conf. Note that a value may be
+ // nil iff it's set as an empty string in the config.
+ alias, resolved := aliasCache.namedAliases[name]
+ if resolved {
+ return alias.value, alias.configOrigin, nil
+ }
+
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ alias, resolved = config.aliasCache.namedAliases[name]
+ if resolved {
+ return alias.value, alias.configOrigin, nil
+ }
+ return nil, "", nil
+}
+
+// editShortNameAlias loads the aliases.conf file and changes it. If value is
+// set, it adds the name-value pair as a new alias. Otherwise, it will remove
+// name from the config.
+func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
+ if err := validateShortName(name); err != nil {
+ return err
+ }
+ if value != nil {
+ if _, err := parseShortNameValue(*value); err != nil {
+ return err
+ }
+ }
+
+ confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Acquire the lock as a writer to prevent data corruption.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Load the short-name-alias.conf, add the specified name-value pair,
+ // and write it back to the file.
+ conf, _, err := loadShortNameAliasConf(confPath)
+ if err != nil {
+ return err
+ }
+
+ if conf.Aliases == nil { // Ensure we have a map to update.
+ conf.Aliases = make(map[string]string)
+ }
+ if value != nil {
+ conf.Aliases[name] = *value
+ } else {
+ // If the name does not exist, throw an error.
+ if _, exists := conf.Aliases[name]; !exists {
+ return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
+ }
+
+ delete(conf.Aliases, name)
+ }
+
+ f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ encoder := toml.NewEncoder(f)
+ return encoder.Encode(conf)
+}
+
+// AddShortNameAlias adds the specified name-value pair as a new alias to the
+// user-specific aliases.conf. It may override an existing alias for `name`.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error {
+ return editShortNameAlias(ctx, name, &value)
+}
+
+// RemoveShortNameAlias clears the alias for the specified name. It throws an
+// error in case name does not exist in the machine-generated
+// short-name-alias.conf. In such case, the alias must be specified in one of
+// the registries.conf files, which is the users' responsibility.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
+ return editShortNameAlias(ctx, name, nil)
+}
+
+// parseShortNameValue parses the specified alias into a reference.Named. The alias is
+// expected to not be tagged or carry a digest and *must* include a
+// domain/registry.
+//
+// Note that the returned reference is always normalized.
+func parseShortNameValue(alias string) (reference.Named, error) {
+ ref, err := reference.Parse(alias)
+ if err != nil {
+ return nil, fmt.Errorf("parsing alias %q: %w", alias, err)
+ }
+
+ if _, ok := ref.(reference.Digested); ok {
+ return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias)
+ }
+
+ if _, ok := ref.(reference.Tagged); ok {
+ return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias)
+ }
+
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
+ }
+
+ registry := reference.Domain(named)
+ if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
+ return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
+ }
+
+ // A final parse to make sure that docker.io references are correctly
+ // normalized (e.g., docker.io/alpine to docker.io/library/alpine.
+ named, err = reference.ParseNormalizedNamed(alias)
+ return named, err
+}
+
+// validateShortName parses the specified `name` of an alias (i.e., the left-hand
+// side) and checks if it's a short name and does not include a tag or digest.
+func validateShortName(name string) error {
+ repo, err := reference.Parse(name)
+ if err != nil {
+ return fmt.Errorf("cannot parse short name: %q: %w", name, err)
+ }
+
+ if _, ok := repo.(reference.Digested); ok {
+ return fmt.Errorf("invalid short name %q: must not contain digest", name)
+ }
+
+ if _, ok := repo.(reference.Tagged); ok {
+ return fmt.Errorf("invalid short name %q: must not contain tag", name)
+ }
+
+ named, ok := repo.(reference.Named)
+ if !ok {
+ return fmt.Errorf("invalid short name %q: no name", name)
+ }
+
+ registry := reference.Domain(named)
+ if strings.ContainsAny(registry, ".:") || registry == "localhost" {
+ return fmt.Errorf("invalid short name %q: must not contain registry", name)
+ }
+ return nil
+}
+
+// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal
+// representation.
+func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) {
+ res := shortNameAliasCache{
+ namedAliases: make(map[string]alias),
+ }
+ errs := []error{}
+ for name, value := range conf.Aliases {
+ if err := validateShortName(name); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Empty right-hand side values in config files allow to reset
+ // an alias in a previously loaded config. This way, drop-in
+ // config files from registries.conf.d can reset potentially
+ // malconfigured aliases.
+ if value == "" {
+ res.namedAliases[name] = alias{nil, path}
+ continue
+ }
+
+ named, err := parseShortNameValue(value)
+ if err != nil {
+ // We want to report *all* malformed entries to avoid a
+ // whack-a-mole for the user.
+ errs = append(errs, err)
+ } else {
+ res.namedAliases[name] = alias{named, path}
+ }
+ }
+ if len(errs) > 0 {
+ err := errs[0]
+ for i := 1; i < len(errs); i++ {
+ err = fmt.Errorf("%v\n: %w", errs[i], err)
+ }
+ return nil, err
+ }
+ return &res, nil
+}
+
+// updateWithConfigurationFrom updates c with configuration from updates.
+// In case of conflict, updates is preferred.
+func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
+ maps.Copy(c.namedAliases, updates.namedAliases)
+}
+
+func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
+ conf := shortNameAliasConf{}
+
+ meta, err := toml.DecodeFile(confPath, &conf)
+ if err != nil && !os.IsNotExist(err) {
+ // It's okay if the config doesn't exist. Other errors are not.
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
+ }
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
+ }
+
+ // Even if we don’t always need the cache, doing so validates the machine-generated config. The
+ // file could still be corrupted by another process or user.
+ cache, err := newShortNameAliasCache(confPath, &conf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err)
+ }
+
+ return &conf, cache, nil
+}
+
+func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) {
+ shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
+ if err != nil {
+ return "", nil, err
+ }
+ // Make sure the path to file exists.
+ if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil {
+ return "", nil, err
+ }
+
+ lockPath := shortNameAliasesConfPath + ".lock"
+ locker, err := lockfile.GetLockFile(lockPath)
+ return shortNameAliasesConfPath, locker, err
+}
diff --git a/pkg/sysregistriesv2/shortnames_test.go b/pkg/sysregistriesv2/shortnames_test.go
new file mode 100644
index 0000000..7f29544
--- /dev/null
+++ b/pkg/sysregistriesv2/shortnames_test.go
@@ -0,0 +1,298 @@
+package sysregistriesv2
+
+import (
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestShortNameAliasConfNonempty(t *testing.T) {
+ for _, c := range []shortNameAliasConf{
+ {},
+ {Aliases: map[string]string{}},
+ } {
+ copy := c // A shallow copy
+ res := copy.nonempty()
+ assert.False(t, res, c)
+ assert.Equal(t, c, copy, c) // Ensure the method did not change the original value
+ }
+
+ res := (&shortNameAliasConf{}).nonempty()
+ assert.False(t, res)
+ for _, c := range []shortNameAliasConf{
+ {Aliases: map[string]string{"a": "example.com/b"}},
+ } {
+ copy := c // A shallow copy
+ res := copy.nonempty()
+ assert.True(t, res, c)
+ assert.Equal(t, c, copy, c) // Ensure the method did not change the original value
+ }
+}
+
+func TestParseShortNameValue(t *testing.T) {
+ tests := []struct {
+ input string
+ valid bool
+ }{
+ // VALID INPUT
+ {"docker.io/library/fedora", true},
+ {"localhost/fedora", true},
+ {"localhost:5000/fedora", true},
+ {"localhost:5000/namespace/fedora", true},
+ // INVALID INPUT
+ {"docker.io/library/fedora:latest", false}, // tag
+ {"docker.io/library/fedora@sha256:b87dd5f837112a9e1e9882963a6406387597698268c0ad371b187151a5dfe6bf", false}, // digest
+ {"fedora", false}, // short name
+ {"fedora:latest", false}, // short name + tag
+ {"library/fedora", false}, // no registry
+ {"library/fedora:latest", false}, // no registry + tag
+ {"$$4455%%", false}, // garbage
+ {"docker://foo", false}, // transports are not supported
+ {"docker-archive://foo", false}, // transports are not supported
+ {"", false}, // empty
+ }
+
+ for _, test := range tests {
+ named, err := parseShortNameValue(test.input)
+ if test.valid {
+ require.NoError(t, err, "%q should be a valid alias", test.input)
+ assert.NotNil(t, named)
+ assert.Equal(t, test.input, named.String())
+ } else {
+ require.Error(t, err, "%q should be an invalid alias", test.input)
+ assert.Nil(t, named)
+ }
+ }
+
+ // Now make sure that docker.io references are normalized.
+ named, err := parseShortNameValue("docker.io/fedora")
+ require.NoError(t, err)
+ assert.NotNil(t, named)
+ assert.Equal(t, "docker.io/library/fedora", named.String())
+
+}
+
+func TestValidateShortName(t *testing.T) {
+ tests := []struct {
+ input string
+ valid bool
+ }{
+ // VALID INPUT
+ {"library/fedora", true},
+ {"fedora", true},
+ {"1234567489", true},
+ // INVALID INPUT
+ {"docker.io/library/fedora:latest", false},
+ {"docker.io/library/fedora@sha256:b87dd5f837112a9e1e9882963a6406387597698268c0ad371b187151a5dfe6bf", false}, // digest
+ {"fedora:latest", false},
+ {"library/fedora:latest", false},
+ {"$$4455%%", false},
+ {"docker://foo", false},
+ {"docker-archive://foo", false},
+ {"", false},
+ }
+
+ for _, test := range tests {
+ err := validateShortName(test.input)
+ if test.valid {
+ require.NoError(t, err, "%q should be a valid alias", test.input)
+ } else {
+ require.Error(t, err, "%q should be an invalid alias", test.input)
+ }
+ }
+}
+
+func TestResolveShortNameAlias(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ InvalidateCache()
+ conf, err := tryUpdatingCache(sys, newConfigWrapper(sys))
+ require.NoError(t, err)
+ assert.Len(t, conf.aliasCache.namedAliases, 4)
+ assert.Len(t, conf.partialV2.Aliases, 0) // This is an implementation detail, not an API guarantee.
+
+ aliases := []struct {
+ name, value string
+ }{
+ {
+ "docker",
+ "docker.io/library/foo",
+ },
+ {
+ "quay/foo",
+ "quay.io/library/foo",
+ },
+ {
+ "example",
+ "example.com/library/foo",
+ },
+ }
+
+ for _, alias := range aliases {
+ value, path, err := ResolveShortNameAlias(sys, alias.name)
+ require.NoError(t, err)
+ require.NotNil(t, value)
+ assert.Equal(t, alias.value, value.String())
+ assert.Equal(t, "testdata/aliases.conf", path)
+ }
+
+ // Non-existent alias.
+ value, path, err := ResolveShortNameAlias(sys, "idonotexist")
+ require.NoError(t, err)
+ assert.Nil(t, value)
+ assert.Equal(t, "", path)
+
+ // Empty right-hand value (special case) -> does not resolve.
+ value, path, err = ResolveShortNameAlias(sys, "empty")
+ require.NoError(t, err)
+ assert.Nil(t, value)
+ assert.Equal(t, "testdata/aliases.conf", path)
+}
+
+func TestAliasesWithDropInConfigs(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ InvalidateCache()
+ conf, err := tryUpdatingCache(sys, newConfigWrapper(sys))
+ require.NoError(t, err)
+ assert.Len(t, conf.aliasCache.namedAliases, 8)
+ assert.Len(t, conf.partialV2.Aliases, 0) // This is an implementation detail, not an API guarantee.
+
+ aliases := []struct {
+ name, value, config string
+ }{
+ {
+ "docker",
+ "docker.io/library/config1",
+ "testdata/registries.conf.d/config-1.conf",
+ },
+ {
+ "quay/foo",
+ "quay.io/library/foo",
+ "testdata/aliases.conf",
+ },
+ {
+ "config1",
+ "config1.com/image", // from config1
+ "testdata/registries.conf.d/config-1.conf",
+ },
+ {
+ "barz",
+ "barz.com/config2", // from config1, overridden by config2
+ "testdata/registries.conf.d/config-2.conf",
+ },
+ {
+ "config2",
+ "config2.com/image", // from config2
+ "testdata/registries.conf.d/config-2.conf",
+ },
+ {
+ "added1",
+ "aliases.conf/added1", // from AddShortNameAlias
+ tmp.Name(),
+ },
+ {
+ "added2",
+ "aliases.conf/added2", // from AddShortNameAlias
+ tmp.Name(),
+ },
+ {
+ "added3",
+ "aliases.conf/added3", // from config2, overridden by AddShortNameAlias
+ tmp.Name(),
+ },
+ }
+
+ require.NoError(t, AddShortNameAlias(sys, "added1", "aliases.conf/added1"))
+ require.NoError(t, AddShortNameAlias(sys, "added2", "aliases.conf/added2"))
+ require.NoError(t, AddShortNameAlias(sys, "added3", "aliases.conf/added3"))
+
+ for _, alias := range aliases {
+ value, path, err := ResolveShortNameAlias(sys, alias.name)
+ require.NoError(t, err)
+ require.NotNil(t, value, "%v", alias)
+ assert.Equal(t, alias.value, value.String())
+ assert.Equal(t, alias.config, path)
+ }
+
+ value, path, err := ResolveShortNameAlias(sys, "i/do/no/exist")
+ require.NoError(t, err)
+ assert.Nil(t, value)
+ assert.Equal(t, "", path)
+
+ // Empty right-hand value (special case) -> does not resolve.
+ value, path, err = ResolveShortNameAlias(sys, "empty") // from aliases.conf, overridden by config2
+ require.NoError(t, err)
+ assert.Nil(t, value)
+ assert.Equal(t, "testdata/aliases.conf", path)
+
+ mode, err := GetShortNameMode(sys)
+ require.NoError(t, err)
+ assert.Equal(t, types.ShortNameModePermissive, mode) // from alias.conf, overridden by config2
+
+ // Now remove the aliases from the machine config.
+ require.NoError(t, RemoveShortNameAlias(sys, "added1"))
+ require.NoError(t, RemoveShortNameAlias(sys, "added2"))
+ require.NoError(t, RemoveShortNameAlias(sys, "added3"))
+
+ // Make sure that 1 and 2 are gone.
+ for _, alias := range []string{"added1", "added2"} {
+ value, path, err := ResolveShortNameAlias(sys, alias)
+ require.NoError(t, err)
+ assert.Nil(t, value)
+ assert.Equal(t, "", path)
+ }
+
+ // 3 is still present in config2
+ value, path, err = ResolveShortNameAlias(sys, "added3")
+ require.NoError(t, err)
+ require.NotNil(t, value)
+ assert.Equal(t, "xxx.com/image", value.String())
+ assert.Equal(t, "testdata/registries.conf.d/config-2.conf", path)
+
+ require.Error(t, RemoveShortNameAlias(sys, "added3")) // we cannot remove it from config2
+}
+
+func TestInvalidAliases(t *testing.T) {
+ tmp, err := os.CreateTemp("", "aliases.conf")
+ require.NoError(t, err)
+ defer os.Remove(tmp.Name())
+
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/invalid-aliases.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ UserShortNameAliasConfPath: tmp.Name(),
+ }
+
+ InvalidateCache()
+ _, err = TryUpdatingCache(sys)
+ require.Error(t, err)
+
+ // We validate the alias value before loading existing configuration,
+ // so this tests the validation although the pre-existing configuration
+ // is invalid.
+ assert.Error(t, AddShortNameAlias(sys, "added1", "aliases"))
+ assert.Error(t, AddShortNameAlias(sys, "added2", "aliases.conf"))
+ assert.Error(t, AddShortNameAlias(sys, "added3", ""))
+ assert.Error(t, AddShortNameAlias(sys, "added3", " "))
+ assert.Error(t, AddShortNameAlias(sys, "added3", "$$$"))
+}
diff --git a/pkg/sysregistriesv2/system_registries_v2.go b/pkg/sysregistriesv2/system_registries_v2.go
new file mode 100644
index 0000000..f45fd9d
--- /dev/null
+++ b/pkg/sysregistriesv2/system_registries_v2.go
@@ -0,0 +1,1056 @@
+package sysregistriesv2
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/regexp"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+)
+
+// systemRegistriesConfPath is the path to the system-wide registry
+// configuration file and is used to add/subtract potential registries for
+// obtaining images. You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path'
+var systemRegistriesConfPath = builtinRegistriesConfPath
+
+// systemRegistriesConfDirPath is the path to the system-wide registry
+// configuration directory and is used to add/subtract potential registries for
+// obtaining images. You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path'
+var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
+
+// AuthenticationFileHelper is a special key for credential helpers indicating
+// the usage of consulting containers-auth.json files instead of a credential
+// helper.
+const AuthenticationFileHelper = "containers-auth.json"
+
+const (
+ // configuration values for "pull-from-mirror"
+ // mirrors will be used for both digest pulls and tag pulls
+ MirrorAll = "all"
+ // mirrors will only be used for digest pulls
+ MirrorByDigestOnly = "digest-only"
+ // mirrors will only be used for tag pulls
+ MirrorByTagOnly = "tag-only"
+)
+
+// Endpoint describes a remote location of a registry.
+type Endpoint struct {
+ // The endpoint's remote location. Can be empty iff Prefix contains
+ // wildcard in the format: "*.example.com" for subdomain matching.
+ // Please refer to FindRegistry / PullSourcesFromReference instead
+ // of accessing/interpreting `Location` directly.
+ Location string `toml:"location,omitempty"`
+ // If true, certs verification will be skipped and HTTP (non-TLS)
+ // connections will be allowed.
+ Insecure bool `toml:"insecure,omitempty"`
+ // PullFromMirror is used for adding restrictions to image pull through the mirror.
+ // Set to "all", "digest-only", or "tag-only".
+ // If "digest-only", mirrors will only be used for digest pulls. Pulling images by
+ // tag can potentially yield different images, depending on which endpoint
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
+ // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror
+ // that it is less likely to be out of sync if tags move, it should not be unnecessarily
+ // used for digest references.
+ // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry.
+ // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint.
+ // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry.
+ PullFromMirror string `toml:"pull-from-mirror,omitempty"`
+}
+
+// userRegistriesFile is the path to the per user registry configuration file.
+var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf")
+
+// userRegistriesDir is the path to the per user registry configuration file.
+var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d")
+
+// rewriteReference will substitute the provided reference `prefix` to the
+// endpoints `location` from the `ref` and creates a new named reference from it.
+// The function errors if the newly created reference is not parsable.
+func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
+ refString := ref.String()
+ var newNamedRef string
+ // refMatchingPrefix returns the length of the match. Everything that
+ // follows the match gets appended to registries location.
+ prefixLen := refMatchingPrefix(refString, prefix)
+ if prefixLen == -1 {
+ return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
+ }
+ // In the case of an empty `location` field, simply return the original
+ // input ref as-is.
+ //
+ // FIXME: already validated in postProcessRegistries, so check can probably
+ // be dropped.
+ // https://github.com/containers/image/pull/1191#discussion_r610621608
+ if e.Location == "" {
+ if !strings.HasPrefix(prefix, "*.") {
+ return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
+ }
+ return ref, nil
+ }
+ newNamedRef = e.Location + refString[prefixLen:]
+ newParsedRef, err := reference.ParseNamed(newNamedRef)
+ if err != nil {
+ return nil, fmt.Errorf("rewriting reference: %w", err)
+ }
+
+ return newParsedRef, nil
+}
+
+// Registry represents a registry.
+type Registry struct {
+ // Prefix is used for matching images, and to translate one namespace to
+ // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
+ // and we pull from "example.com/bar/myimage:latest", the image will
+ // effectively be pulled from "example.com/foo/bar/myimage:latest".
+ // If no Prefix is specified, it defaults to the specified location.
+ // Prefix can also be in the format: "*.example.com" for matching
+ // subdomains. The wildcard should only be in the beginning and should also
+ // not contain any namespaces or special characters: "/", "@" or ":".
+ // Please refer to FindRegistry / PullSourcesFromReference instead
+ // of accessing/interpreting `Prefix` directly.
+ Prefix string `toml:"prefix"`
+ // A registry is an Endpoint too
+ Endpoint
+ // The registry's mirrors.
+ Mirrors []Endpoint `toml:"mirror,omitempty"`
+ // If true, pulling from the registry will be blocked.
+ Blocked bool `toml:"blocked,omitempty"`
+ // If true, mirrors will only be used for digest pulls. Pulling images by
+ // tag can potentially yield different images, depending on which endpoint
+ // we pull from. Restricting mirrors to pulls by digest avoids that issue.
+ MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"`
+}
+
+// PullSource consists of an Endpoint and a Reference. Note that the reference is
+// rewritten according to the registries prefix and the Endpoint's location.
+type PullSource struct {
+ Endpoint Endpoint
+ Reference reference.Named
+}
+
+// PullSourcesFromReference returns a slice of PullSource's based on the passed
+// reference.
+func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) {
+ var endpoints []Endpoint
+ _, isDigested := ref.(reference.Canonical)
+ if r.MirrorByDigestOnly {
+ // Only use mirrors when the reference is a digested one.
+ if isDigested {
+ endpoints = append(endpoints, r.Mirrors...)
+ }
+ } else {
+ for _, mirror := range r.Mirrors {
+ // skip the mirror if per mirror setting exists but reference does not match the restriction
+ switch mirror.PullFromMirror {
+ case MirrorByDigestOnly:
+ if !isDigested {
+ continue
+ }
+ case MirrorByTagOnly:
+ if isDigested {
+ continue
+ }
+ }
+ endpoints = append(endpoints, mirror)
+ }
+ }
+ endpoints = append(endpoints, r.Endpoint)
+
+ sources := []PullSource{}
+ for _, ep := range endpoints {
+ rewritten, err := ep.rewriteReference(ref, r.Prefix)
+ if err != nil {
+ return nil, err
+ }
+ sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten})
+ }
+
+ return sources, nil
+}
+
+// V1TOMLregistries is for backwards compatibility to sysregistries v1
+type V1TOMLregistries struct {
+ Registries []string `toml:"registries"`
+}
+
+// V1TOMLConfig is for backwards compatibility to sysregistries v1
+type V1TOMLConfig struct {
+ Search V1TOMLregistries `toml:"search"`
+ Insecure V1TOMLregistries `toml:"insecure"`
+ Block V1TOMLregistries `toml:"block"`
+}
+
+// V1RegistriesConf is the sysregistries v1 configuration format.
+type V1RegistriesConf struct {
+ V1TOMLConfig `toml:"registries"`
+}
+
+// Nonempty returns true if config contains at least one configuration entry.
+// Empty arrays are treated as missing entries.
+func (config *V1RegistriesConf) Nonempty() bool {
+ copy := *config // A shallow copy
+ if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 {
+ copy.V1TOMLConfig.Search.Registries = nil
+ }
+ if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 {
+ copy.V1TOMLConfig.Insecure.Registries = nil
+ }
+ if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 {
+ copy.V1TOMLConfig.Block.Registries = nil
+ }
+ return copy.hasSetField()
+}
+
+// hasSetField returns true if config contains at least one configuration entry.
+// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
+// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+// as a non-nil []string{}.
+func (config *V1RegistriesConf) hasSetField() bool {
+ return !reflect.DeepEqual(*config, V1RegistriesConf{})
+}
+
+// V2RegistriesConf is the sysregistries v2 configuration format.
+type V2RegistriesConf struct {
+ Registries []Registry `toml:"registry"`
+ // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references
+ UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"`
+ // An array of global credential helpers to use for authentication
+ // (e.g., ["pass", "secretservice"]). The helpers are consulted in the
+ // specified order. Note that "containers-auth.json" is a reserved
+ // value for consulting auth files as specified in
+ // containers-auth.json(5).
+ //
+ // If empty, CredentialHelpers defaults to ["containers-auth.json"].
+ CredentialHelpers []string `toml:"credential-helpers"`
+
+ // ShortNameMode defines how short-name resolution should be handled by
+ // _consumers_ of this package. Depending on the mode, the user should
+ // be prompted with a choice of using one of the unqualified-search
+ // registries when referring to a short name.
+ //
+ // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise
+ // use all unqualified-search registries * "enforcing": always prompt
+ // and error if stdout is not a TTY * "disabled": do not prompt and
+ // potentially use all unqualified-search registries
+ ShortNameMode string `toml:"short-name-mode"`
+
+ shortNameAliasConf
+
+ // If you add any field, make sure to update Nonempty() below.
+}
+
+// Nonempty returns true if config contains at least one configuration entry.
+func (config *V2RegistriesConf) Nonempty() bool {
+ copy := *config // A shallow copy
+ if copy.Registries != nil && len(copy.Registries) == 0 {
+ copy.Registries = nil
+ }
+ if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 {
+ copy.UnqualifiedSearchRegistries = nil
+ }
+ if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 {
+ copy.CredentialHelpers = nil
+ }
+ if !copy.shortNameAliasConf.nonempty() {
+ copy.shortNameAliasConf = shortNameAliasConf{}
+ }
+ return copy.hasSetField()
+}
+
+// hasSetField returns true if config contains at least one configuration entry.
+// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
+// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+// as a non-nil []string{}.
+func (config *V2RegistriesConf) hasSetField() bool {
+ return !reflect.DeepEqual(*config, V2RegistriesConf{})
+}
+
+// parsedConfig is the result of parsing, and possibly merging, configuration files;
+// it is the boundary between the process of reading+ingesting the files, and
+// later interpreting the configuration based on caller’s requests.
+type parsedConfig struct {
+ // NOTE: Update also parsedConfig.updateWithConfigurationFrom!
+
+ // partialV2 must continue to exist to maintain the return value of TryUpdatingCache
+ // for compatibility with existing callers.
+ // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well.
+ partialV2 V2RegistriesConf
+ // Absolute path to the configuration file that set the UnqualifiedSearchRegistries.
+ unqualifiedSearchRegistriesOrigin string
+ // Result of parsing of partialV2.ShortNameMode.
+ // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values;
+ // the full configuration in configCache / getConfig() always contains a valid value.
+ shortNameMode types.ShortNameMode
+ aliasCache *shortNameAliasCache
+}
+
+// InvalidRegistries represents an invalid registry configurations. An example
+// is when "registry.com" is defined multiple times in the configuration but
+// with conflicting security settings.
+type InvalidRegistries struct {
+ s string
+}
+
+// Error returns the error string.
+func (e *InvalidRegistries) Error() string {
+ return e.s
+}
+
+// parseLocation parses the input string, performs some sanity checks and returns
+// the sanitized input string. An error is returned if the input string is
+// empty or if contains an "http{s,}://" prefix.
+func parseLocation(input string) (string, error) {
+ trimmed := strings.TrimRight(input, "/")
+
+ // FIXME: This check needs to exist but fails for empty Location field with
+ // wildcarded prefix. Removal of this check "only" allows invalid input in,
+ // and does not prevent correct operation.
+ // https://github.com/containers/image/pull/1191#discussion_r610122617
+ //
+ // if trimmed == "" {
+ // return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
+ // }
+ //
+
+ if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
+ msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
+ return "", &InvalidRegistries{s: msg}
+ }
+
+ return trimmed, nil
+}
+
+// ConvertToV2 returns a v2 config corresponding to a v1 one.
+func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
+ regMap := make(map[string]*Registry)
+ // The order of the registries is not really important, but make it deterministic (the same for the same config file)
+ // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
+ registryOrder := []string{}
+
+ getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
+ var err error
+ location, err = parseLocation(location)
+ if err != nil {
+ return nil, err
+ }
+ reg, exists := regMap[location]
+ if !exists {
+ reg = &Registry{
+ Endpoint: Endpoint{Location: location},
+ Mirrors: []Endpoint{},
+ Prefix: location,
+ }
+ regMap[location] = reg
+ registryOrder = append(registryOrder, location)
+ }
+ return reg, nil
+ }
+
+ for _, blocked := range config.V1TOMLConfig.Block.Registries {
+ reg, err := getRegistry(blocked)
+ if err != nil {
+ return nil, err
+ }
+ reg.Blocked = true
+ }
+ for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
+ reg, err := getRegistry(insecure)
+ if err != nil {
+ return nil, err
+ }
+ reg.Insecure = true
+ }
+
+ res := &V2RegistriesConf{
+ UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries,
+ }
+ for _, location := range registryOrder {
+ reg := regMap[location]
+ res.Registries = append(res.Registries, *reg)
+ }
+ return res, nil
+}
+
+// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
+var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
+
+// postProcess checks the consistency of all the configuration, looks for conflicts,
+// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
+func (config *V2RegistriesConf) postProcessRegistries() error {
+ regMap := make(map[string][]*Registry)
+
+ for i := range config.Registries {
+ reg := &config.Registries[i]
+ // make sure Location and Prefix are valid
+ var err error
+ reg.Location, err = parseLocation(reg.Location)
+ if err != nil {
+ return err
+ }
+
+ if reg.Prefix == "" {
+ if reg.Location == "" {
+ return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"}
+ }
+ reg.Prefix = reg.Location
+ } else {
+ reg.Prefix, err = parseLocation(reg.Prefix)
+ if err != nil {
+ return err
+ }
+ // FIXME: allow config authors to always use Prefix.
+ // https://github.com/containers/image/pull/1191#discussion_r610622495
+ if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" {
+ return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
+ }
+ }
+
+ // validate the mirror usage settings does not apply to primary registry
+ if reg.PullFromMirror != "" {
+ return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix)
+ }
+ // make sure mirrors are valid
+ for _, mir := range reg.Mirrors {
+ mir.Location, err = parseLocation(mir.Location)
+ if err != nil {
+ return err
+ }
+
+ //FIXME: unqualifiedSearchRegistries now also accepts empty values
+ //and shouldn't
+ // https://github.com/containers/image/pull/1191#discussion_r610623216
+ if mir.Location == "" {
+ return &InvalidRegistries{s: "invalid condition: mirror location is unset"}
+ }
+
+ if reg.MirrorByDigestOnly && mir.PullFromMirror != "" {
+ return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)}
+ }
+ if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll &&
+ mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly {
+ return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)}
+ }
+ }
+ if reg.Location == "" {
+ regMap[reg.Prefix] = append(regMap[reg.Prefix], reg)
+ } else {
+ regMap[reg.Location] = append(regMap[reg.Location], reg)
+ }
+ }
+
+ // Given a registry can be mentioned multiple times (e.g., to have
+ // multiple prefixes backed by different mirrors), we need to make sure
+ // there are no conflicts among them.
+ //
+ // Note: we need to iterate over the registries array to ensure a
+ // deterministic behavior which is not guaranteed by maps.
+ for _, reg := range config.Registries {
+ var others []*Registry
+ var ok bool
+ if reg.Location == "" {
+ others, ok = regMap[reg.Prefix]
+ } else {
+ others, ok = regMap[reg.Location]
+ }
+ if !ok {
+ return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing")
+ }
+ for _, other := range others {
+ if reg.Insecure != other.Insecure {
+ msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
+ return &InvalidRegistries{s: msg}
+ }
+
+ if reg.Blocked != other.Blocked {
+ msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
+ return &InvalidRegistries{s: msg}
+ }
+ }
+ }
+
+ for i := range config.UnqualifiedSearchRegistries {
+ registry, err := parseLocation(config.UnqualifiedSearchRegistries[i])
+ if err != nil {
+ return err
+ }
+ if !anchoredDomainRegexp.MatchString(registry) {
+ return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)}
+ }
+ config.UnqualifiedSearchRegistries[i] = registry
+ }
+
+ // Registries are ordered and the first longest prefix always wins,
+ // rendering later items with the same prefix non-existent. We cannot error
+ // out anymore as this might break existing users, so let's just ignore them
+ // to guarantee that the same prefix exists only once.
+ //
+ // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice
+ // is always sorted. To be consistent in situations where it is not called (no drop-ins),
+ // sort it here as well.
+ prefixes := []string{}
+ uniqueRegistries := make(map[string]Registry)
+ for i := range config.Registries {
+ // TODO: should we warn if we see the same prefix being used multiple times?
+ prefix := config.Registries[i].Prefix
+ if _, exists := uniqueRegistries[prefix]; !exists {
+ uniqueRegistries[prefix] = config.Registries[i]
+ prefixes = append(prefixes, prefix)
+ }
+ }
+ sort.Strings(prefixes)
+ config.Registries = []Registry{}
+ for _, prefix := range prefixes {
+ config.Registries = append(config.Registries, uniqueRegistries[prefix])
+ }
+
+ return nil
+}
+
+// ConfigPath returns the path to the system-wide registry configuration file.
+// Deprecated: This API implies configuration is read from files, and that there is only one.
+// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
+func ConfigPath(ctx *types.SystemContext) string {
+ return newConfigWrapper(ctx).configPath
+}
+
+// ConfigDirPath returns the path to the directory for drop-in
+// registry configuration files.
+// Deprecated: This API implies configuration is read from directories, and that there is only one.
+// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
+func ConfigDirPath(ctx *types.SystemContext) string {
+ configWrapper := newConfigWrapper(ctx)
+ if configWrapper.userConfigDirPath != "" {
+ return configWrapper.userConfigDirPath
+ }
+ return configWrapper.configDirPath
+}
+
+// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
+// and acts as a key to the internal cache.
+type configWrapper struct {
+ // path to the registries.conf file
+ configPath string
+ // path to system-wide registries.conf.d directory, or "" if not used
+ configDirPath string
+ // path to user specified registries.conf.d directory, or "" if not used
+ userConfigDirPath string
+}
+
+// newConfigWrapper returns a configWrapper for the specified SystemContext.
+func newConfigWrapper(ctx *types.SystemContext) configWrapper {
+ return newConfigWrapperWithHomeDir(ctx, homedir.Get())
+}
+
+// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper,
+// it exists only to allow testing it with an artificial home directory.
+func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper {
+ var wrapper configWrapper
+ userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile)
+ userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
+
+ // decide configPath using per-user path or system file
+ if ctx != nil && ctx.SystemRegistriesConfPath != "" {
+ wrapper.configPath = ctx.SystemRegistriesConfPath
+ } else if _, err := os.Stat(userRegistriesFilePath); err == nil {
+ // per-user registries.conf exists, not reading system dir
+ // return config dirs from ctx or per-user one
+ wrapper.configPath = userRegistriesFilePath
+ if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
+ wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
+ } else {
+ wrapper.userConfigDirPath = userRegistriesDirPath
+ }
+
+ return wrapper
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
+ } else {
+ wrapper.configPath = systemRegistriesConfPath
+ }
+
+ // potentially use both system and per-user dirs if not using per-user config file
+ if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
+ // dir explicitly chosen: use only that one
+ wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
+ wrapper.userConfigDirPath = userRegistriesDirPath
+ } else {
+ wrapper.configDirPath = systemRegistriesConfDirPath
+ wrapper.userConfigDirPath = userRegistriesDirPath
+ }
+
+ return wrapper
+}
+
+// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d
+func ConfigurationSourceDescription(ctx *types.SystemContext) string {
+ wrapper := newConfigWrapper(ctx)
+ configSources := []string{wrapper.configPath}
+ if wrapper.configDirPath != "" {
+ configSources = append(configSources, wrapper.configDirPath)
+ }
+ if wrapper.userConfigDirPath != "" {
+ configSources = append(configSources, wrapper.userConfigDirPath)
+ }
+ return strings.Join(configSources, ", ")
+}
+
+// configMutex is used to synchronize concurrent accesses to configCache.
+var configMutex = sync.Mutex{}
+
+// configCache caches already loaded configs with config paths as keys and is
+// used to avoid redundantly parsing configs. Concurrent accesses to the cache
+// are synchronized via configMutex.
+var configCache = make(map[configWrapper]*parsedConfig)
+
+// InvalidateCache invalidates the registry cache. This function is meant to be
+// used for long-running processes that need to reload potential changes made to
+// the cached registry config files.
+func InvalidateCache() {
+ configMutex.Lock()
+ defer configMutex.Unlock()
+ configCache = make(map[configWrapper]*parsedConfig)
+}
+
+// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
+func getConfig(ctx *types.SystemContext) (*parsedConfig, error) {
+ wrapper := newConfigWrapper(ctx)
+ configMutex.Lock()
+ if config, inCache := configCache[wrapper]; inCache {
+ configMutex.Unlock()
+ return config, nil
+ }
+ configMutex.Unlock()
+
+ return tryUpdatingCache(ctx, wrapper)
+}
+
+// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
+// directory.
+func dropInConfigs(wrapper configWrapper) ([]string, error) {
+ var (
+ configs []string
+ dirPaths []string
+ )
+ if wrapper.configDirPath != "" {
+ dirPaths = append(dirPaths, wrapper.configDirPath)
+ }
+ if wrapper.userConfigDirPath != "" {
+ dirPaths = append(dirPaths, wrapper.userConfigDirPath)
+ }
+ for _, dirPath := range dirPaths {
+ err := filepath.WalkDir(dirPath,
+ // WalkFunc to read additional configs
+ func(path string, d fs.DirEntry, err error) error {
+ switch {
+ case err != nil:
+ // return error (could be a permission problem)
+ return err
+ case d == nil:
+ // this should only happen when err != nil but let's be sure
+ return nil
+ case d.IsDir():
+ if path != dirPath {
+ // make sure to not recurse into sub-directories
+ return filepath.SkipDir
+ }
+ // ignore directories
+ return nil
+ default:
+ // only add *.conf files
+ if strings.HasSuffix(path, ".conf") {
+ configs = append(configs, path)
+ }
+ return nil
+ }
+ },
+ )
+
+ if err != nil && !os.IsNotExist(err) {
+ // Ignore IsNotExist errors: most systems won't have a registries.conf.d
+ // directory.
+ return nil, fmt.Errorf("reading registries.conf.d: %w", err)
+ }
+ }
+
+ return configs, nil
+}
+
+// TryUpdatingCache loads the configuration from the provided `SystemContext`
+// without using the internal cache. On success, the loaded configuration will
+// be added into the internal registry cache.
+// It returns the resulting configuration; this is DEPRECATED and may not correctly
+// reflect any future data handled by this package.
+func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) {
+ config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx))
+ if err != nil {
+ return nil, err
+ }
+ return &config.partialV2, err
+}
+
+// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper
+// argument to avoid redundantly calculating the config paths.
+func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) {
+ configMutex.Lock()
+ defer configMutex.Unlock()
+
+ // load the config
+ config, err := loadConfigFile(wrapper.configPath, false)
+ if err != nil {
+ // Continue with an empty []Registry if we use the default config, which
+ // implies that the config path of the SystemContext isn't set.
+ //
+ // Note: if ctx.SystemRegistriesConfPath points to the default config,
+ // we will still return an error.
+ if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
+ config = &parsedConfig{}
+ config.partialV2 = V2RegistriesConf{Registries: []Registry{}}
+ config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{})
+ if err != nil {
+ return nil, err // Should never happen
+ }
+ } else {
+ return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err)
+ }
+ }
+
+ // Load the configs from the conf directory path.
+ dinConfigs, err := dropInConfigs(wrapper)
+ if err != nil {
+ return nil, err
+ }
+ for _, path := range dinConfigs {
+ // Enforce v2 format for drop-in-configs.
+ dropIn, err := loadConfigFile(path, true)
+ if err != nil {
+ return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
+ }
+ config.updateWithConfigurationFrom(dropIn)
+ }
+
+ if config.shortNameMode == types.ShortNameModeInvalid {
+ config.shortNameMode = defaultShortNameMode
+ }
+
+ if len(config.partialV2.CredentialHelpers) == 0 {
+ config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper}
+ }
+
+ // populate the cache
+ configCache[wrapper] = config
+ return config, nil
+}
+
+// GetRegistries has been deprecated. Use FindRegistry instead.
+//
+// GetRegistries loads and returns the registries specified in the config.
+// Note the parsed content of registry config files is cached. For reloading,
+// use `InvalidateCache` and re-call `GetRegistries`.
+func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return config.partialV2.Registries, nil
+}
+
+// UnqualifiedSearchRegistries returns a list of host[:port] entries to try
+// for unqualified image search, in the returned order)
+func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) {
+ registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx)
+ return registries, err
+}
+
+// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries
+// to try for unqualified image search, in the returned order. It also returns
+// a human-readable description of where these entries are specified (e.g., a
+// registries.conf file).
+func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) {
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil
+}
+
+// parseShortNameMode translates the string into well-typed
+// types.ShortNameMode.
+func parseShortNameMode(mode string) (types.ShortNameMode, error) {
+ switch mode {
+ case "disabled":
+ return types.ShortNameModeDisabled, nil
+ case "enforcing":
+ return types.ShortNameModeEnforcing, nil
+ case "permissive":
+ return types.ShortNameModePermissive, nil
+ default:
+ return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode)
+ }
+}
+
+// GetShortNameMode returns the configured types.ShortNameMode.
+func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) {
+ if ctx != nil && ctx.ShortNameMode != nil {
+ return *ctx.ShortNameMode, nil
+ }
+ config, err := getConfig(ctx)
+ if err != nil {
+ return -1, err
+ }
+ return config.shortNameMode, err
+}
+
+// CredentialHelpers returns the global top-level credential helpers.
+func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
+ config, err := getConfig(sys)
+ if err != nil {
+ return nil, err
+ }
+ return config.partialV2.CredentialHelpers, nil
+}
+
+// refMatchingSubdomainPrefix returns the length of ref
+// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
+// reference.Domain(), reference.Named.Name() or reference.Reference.String()
+// — note that this requires the name to start with an explicit hostname!),
+// matches a Registry.Prefix value containing wildcarded subdomains in the
+// format: *.example.com. Wildcards are only accepted at the beginning, so
+// other formats like example.*.com will not work. Wildcarded prefixes also
+// cannot contain port numbers or namespaces in them.
+func refMatchingSubdomainPrefix(ref, prefix string) int {
+ index := strings.Index(ref, prefix[1:])
+ if index == -1 {
+ return -1
+ }
+ if strings.Contains(ref[:index], "/") {
+ return -1
+ }
+ index += len(prefix[1:])
+ if index == len(ref) {
+ return index
+ }
+ switch ref[index] {
+ case ':', '/', '@':
+ return index
+ default:
+ return -1
+ }
+}
+
+// refMatchingPrefix returns the length of the prefix iff ref,
+// which is a registry, repository namespace, repository or image reference (as formatted by
+// reference.Domain(), reference.Named.Name() or reference.Reference.String()
+// — note that this requires the name to start with an explicit hostname!),
+// matches a Registry.Prefix value.
+// (This is split from the caller primarily to make testing easier.)
+func refMatchingPrefix(ref, prefix string) int {
+ switch {
+ case strings.HasPrefix(prefix, "*."):
+ return refMatchingSubdomainPrefix(ref, prefix)
+ case len(ref) < len(prefix):
+ return -1
+ case len(ref) == len(prefix):
+ if ref == prefix {
+ return len(prefix)
+ }
+ return -1
+ case len(ref) > len(prefix):
+ if !strings.HasPrefix(ref, prefix) {
+ return -1
+ }
+ c := ref[len(prefix)]
+ // This allows "example.com:5000" to match "example.com",
+ // which is unintended; that will get fixed eventually, DON'T RELY
+ // ON THE CURRENT BEHAVIOR.
+ if c == ':' || c == '/' || c == '@' {
+ return len(prefix)
+ }
+ return -1
+ default:
+ panic("Internal error: impossible comparison outcome")
+ }
+}
+
+// FindRegistry returns the Registry with the longest prefix for ref,
+// which is a registry, repository namespace repository or image reference (as formatted by
+// reference.Domain(), reference.Named.Name() or reference.Reference.String()
+// — note that this requires the name to start with an explicit hostname!).
+// If no Registry prefixes the image, nil is returned.
+func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return findRegistryWithParsedConfig(config, ref)
+}
+
+// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded
+// parseConfig.
+func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) {
+ reg := Registry{}
+ prefixLen := 0
+ for _, r := range config.partialV2.Registries {
+ if refMatchingPrefix(ref, r.Prefix) != -1 {
+ length := len(r.Prefix)
+ if length > prefixLen {
+ reg = r
+ prefixLen = length
+ }
+ }
+ }
+ if prefixLen != 0 {
+ return &reg, nil
+ }
+ return nil, nil
+}
+
+// loadConfigFile loads and unmarshals a single config file.
+// Use forceV2 if the config must in the v2 format.
+func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
+ logrus.Debugf("Loading registries configuration %q", path)
+
+ // tomlConfig allows us to unmarshal either V1 or V2 simultaneously.
+ type tomlConfig struct {
+ V2RegistriesConf
+ V1RegistriesConf // for backwards compatibility with sysregistries v1
+ }
+
+ // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields.
+ var combinedTOML tomlConfig
+ meta, err := toml.DecodeFile(path, &combinedTOML)
+ if err != nil {
+ return nil, err
+ }
+ if keys := meta.Undecoded(); len(keys) > 0 {
+ logrus.Debugf("Failed to decode keys %q from %q", keys, path)
+ }
+
+ if combinedTOML.V1RegistriesConf.hasSetField() {
+ // Enforce the v2 format if requested.
+ if forceV2 {
+ return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
+ }
+
+ // Convert a v1 config into a v2 config.
+ if combinedTOML.V2RegistriesConf.hasSetField() {
+ return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)}
+ }
+ converted, err := combinedTOML.V1RegistriesConf.ConvertToV2()
+ if err != nil {
+ return nil, err
+ }
+ combinedTOML.V1RegistriesConf = V1RegistriesConf{}
+ combinedTOML.V2RegistriesConf = *converted
+ }
+
+ res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf}
+
+ // Post process registries, set the correct prefixes, sanity checks, etc.
+ if err := res.partialV2.postProcessRegistries(); err != nil {
+ return nil, err
+ }
+
+ res.unqualifiedSearchRegistriesOrigin = path
+
+ if len(res.partialV2.ShortNameMode) > 0 {
+ mode, err := parseShortNameMode(res.partialV2.ShortNameMode)
+ if err != nil {
+ return nil, err
+ }
+ res.shortNameMode = mode
+ } else {
+ res.shortNameMode = types.ShortNameModeInvalid
+ }
+
+ // Valid wildcarded prefixes must be in the format: *.example.com
+ // FIXME: Move to postProcessRegistries
+ // https://github.com/containers/image/pull/1191#discussion_r610623829
+ for i := range res.partialV2.Registries {
+ prefix := res.partialV2.Registries[i].Prefix
+ if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") {
+ msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
+ return nil, &InvalidRegistries{s: msg}
+ }
+ }
+
+ // Parse and validate short-name aliases.
+ cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
+ if err != nil {
+ return nil, fmt.Errorf("validating short-name aliases: %w", err)
+ }
+ res.aliasCache = cache
+ // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
+ // reduce memory consumption. We're consulting aliasCache for lookups.
+ res.partialV2.shortNameAliasConf = shortNameAliasConf{}
+
+ return &res, nil
+}
+
+// updateWithConfigurationFrom updates c with configuration from updates.
+//
+// Fields present in updates will typically replace already set fields in c.
+// The [[registry]] and alias tables are merged.
+func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
+ // == Merge Registries:
+ registryMap := make(map[string]Registry)
+ for i := range c.partialV2.Registries {
+ registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i]
+ }
+ // Merge the freshly loaded registries.
+ for i := range updates.partialV2.Registries {
+ registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i]
+ }
+
+ // Go maps have a non-deterministic order when iterating the keys, so
+ // we dump them in a slice and sort it to enforce some order in
+ // Registries slice. Some consumers of c/image (e.g., CRI-O) log the
+ // configuration where a non-deterministic order could easily cause
+ // confusion.
+ prefixes := maps.Keys(registryMap)
+ sort.Strings(prefixes)
+
+ c.partialV2.Registries = []Registry{}
+ for _, prefix := range prefixes {
+ c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix])
+ }
+
+ // == Merge UnqualifiedSearchRegistries:
+ // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field
+ // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+ // as a non-nil []string{}.
+ if updates.partialV2.UnqualifiedSearchRegistries != nil {
+ c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries
+ c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin
+ }
+
+ // == Merge credential helpers:
+ if updates.partialV2.CredentialHelpers != nil {
+ c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers
+ }
+
+ // == Merge shortNameMode:
+ // We don’t maintain c.partialV2.ShortNameMode.
+ if updates.shortNameMode != types.ShortNameModeInvalid {
+ c.shortNameMode = updates.shortNameMode
+ }
+
+ // == Merge aliasCache:
+ // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
+ c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)
+}
diff --git a/pkg/sysregistriesv2/system_registries_v2_test.go b/pkg/sysregistriesv2/system_registries_v2_test.go
new file mode 100644
index 0000000..ca88c6e
--- /dev/null
+++ b/pkg/sysregistriesv2/system_registries_v2_test.go
@@ -0,0 +1,975 @@
+package sysregistriesv2
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var v1RegistriesConfEmptyTestData = []struct {
+ nonempty, hasSetField bool
+ v V1RegistriesConf
+}{
+ {nonempty: false, hasSetField: false, v: V1RegistriesConf{}},
+ {nonempty: false, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Search: V1TOMLregistries{Registries: []string{}}}}},
+ {nonempty: false, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Insecure: V1TOMLregistries{Registries: []string{}}}}},
+ {nonempty: false, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Block: V1TOMLregistries{Registries: []string{}}}}},
+ {nonempty: true, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Search: V1TOMLregistries{Registries: []string{"example.com"}}}}},
+ {nonempty: true, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Insecure: V1TOMLregistries{Registries: []string{"example.com"}}}}},
+ {nonempty: true, hasSetField: true, v: V1RegistriesConf{V1TOMLConfig{Block: V1TOMLregistries{Registries: []string{"example.com"}}}}},
+}
+
+func TestV1RegistriesConfNonempty(t *testing.T) {
+ for _, c := range v1RegistriesConfEmptyTestData {
+ copy := c.v // A shallow copy
+ res := copy.Nonempty()
+ assert.Equal(t, c.nonempty, res, c.v)
+ assert.Equal(t, c.v, copy, c.v) // Ensure the method did not change the original value
+ }
+}
+
+func TestV1RegistriesConfHasSetField(t *testing.T) {
+ for _, c := range v1RegistriesConfEmptyTestData {
+ copy := c.v // A shallow copy
+ res := copy.hasSetField()
+ assert.Equal(t, c.hasSetField, res, c.v)
+ assert.Equal(t, c.v, copy, c.v) // Ensure the method did not change the original value
+ }
+}
+
+var v2RegistriesConfEmptyTestData = []struct {
+ nonempty, hasSetField bool
+ v V2RegistriesConf
+}{
+ {nonempty: false, hasSetField: false, v: V2RegistriesConf{}},
+ {nonempty: false, hasSetField: true, v: V2RegistriesConf{Registries: []Registry{}}},
+ {nonempty: false, hasSetField: true, v: V2RegistriesConf{UnqualifiedSearchRegistries: []string{}}},
+ {nonempty: false, hasSetField: true, v: V2RegistriesConf{CredentialHelpers: []string{}}},
+ {nonempty: false, hasSetField: true, v: V2RegistriesConf{shortNameAliasConf: shortNameAliasConf{Aliases: map[string]string{}}}},
+ {nonempty: true, hasSetField: true, v: V2RegistriesConf{Registries: []Registry{{Prefix: "example.com"}}}},
+ {nonempty: true, hasSetField: true, v: V2RegistriesConf{UnqualifiedSearchRegistries: []string{"example.com"}}},
+ {nonempty: true, hasSetField: true, v: V2RegistriesConf{CredentialHelpers: []string{"a"}}},
+ {nonempty: true, hasSetField: true, v: V2RegistriesConf{ShortNameMode: "enforcing"}},
+ {nonempty: true, hasSetField: true, v: V2RegistriesConf{shortNameAliasConf: shortNameAliasConf{Aliases: map[string]string{"a": "example.com/b"}}}},
+}
+
+func TestV2RegistriesConfNonempty(t *testing.T) {
+ for _, c := range v2RegistriesConfEmptyTestData {
+ copy := c.v // A shallow copy
+ res := copy.Nonempty()
+ assert.Equal(t, c.nonempty, res, c.v)
+ assert.Equal(t, c.v, copy, c.v) // Ensure the method did not change the original value
+ }
+}
+
+func TestV2RegistriesConfHasSetField(t *testing.T) {
+ for _, c := range v2RegistriesConfEmptyTestData {
+ copy := c.v // A shallow copy
+ res := copy.hasSetField()
+ assert.Equal(t, c.hasSetField, res, c.v)
+ assert.Equal(t, c.v, copy, c.v) // Ensure the method did not change the original value
+ }
+}
+
+func TestParseLocation(t *testing.T) {
+ var err error
+ var location string
+
+ // invalid locations
+ _, err = parseLocation("https://example.com")
+ assert.ErrorContains(t, err, "invalid location 'https://example.com': URI schemes are not supported")
+
+ _, err = parseLocation("john.doe@example.com")
+ assert.Nil(t, err)
+
+ // valid locations
+ location, err = parseLocation("example.com")
+ assert.Nil(t, err)
+ assert.Equal(t, "example.com", location)
+
+ location, err = parseLocation("example.com/") // trailing slashes are stripped
+ assert.Nil(t, err)
+ assert.Equal(t, "example.com", location)
+
+ location, err = parseLocation("example.com//////") // trailing slashes are stripped
+ assert.Nil(t, err)
+ assert.Equal(t, "example.com", location)
+
+ location, err = parseLocation("example.com:5000/with/path")
+ assert.Nil(t, err)
+ assert.Equal(t, "example.com:5000/with/path", location)
+}
+
+func TestEmptyConfig(t *testing.T) {
+ registries, err := GetRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: "testdata/empty.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(registries))
+
+ // When SystemRegistriesConfPath is not explicitly specified (but RootForImplicitAbsolutePaths might be), missing file is treated
+ // the same as an empty one, without reporting an error.
+ nonexistentRoot, err := filepath.Abs("testdata/this-does-not-exist")
+ require.NoError(t, err)
+ registries, err = GetRegistries(&types.SystemContext{
+ RootForImplicitAbsolutePaths: nonexistentRoot,
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.Nil(t, err)
+ assert.Equal(t, 0, len(registries))
+}
+
+func TestMirrors(t *testing.T) {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/mirrors.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+
+ registries, err := GetRegistries(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, 2, len(registries))
+
+ reg, err := FindRegistry(sys, "registry.com/image:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, 2, len(reg.Mirrors))
+ assert.Equal(t, "mirror-1.registry.com", reg.Mirrors[0].Location)
+ assert.False(t, reg.Mirrors[0].Insecure)
+ assert.Equal(t, "mirror-2.registry.com", reg.Mirrors[1].Location)
+ assert.True(t, reg.Mirrors[1].Insecure)
+}
+
+func TestRefMatchingSubdomainPrefix(t *testing.T) {
+ for _, c := range []struct {
+ ref, prefix string
+ expected int
+ }{
+ // Check for subdomain matches
+ {"docker.io", "*.io", len("docker.io")},
+ {"docker.io/foo", "*.com", -1},
+ {"example.com/foo", "*.co", -1},
+ {"example.com/foo", "*.example.com", -1},
+ //FIXME: Port Number matching needs to be revisited.
+ // https://github.com/containers/image/pull/1191#pullrequestreview-631869416
+ //{"example.com:5000", "*.com", len("example.com")},
+ //{"example.com:5000/foo", "*.com", len("example.com")},
+ //{"sub.example.com:5000/foo", "*.example.com", len("sub.example.com")},
+ //{"example.com:5000/foo/bar", "*.com", len("example.com")},
+ //{"example.com:5000/foo/bar:baz", "*.com", len("example.com")},
+ //{"example.com:5000/foo/bar/bbq:baz", "*.com", len("example.com")},
+ //{"example.com:50000/foo", "*.example.com", -1},
+ {"example.com/foo", "*.com", len("example.com")},
+ {"example.com/foo:bar", "*.com", len("example.com")},
+ {"example.com/foo/bar:baz", "*.com", len("example.com")},
+ {"yet.another.example.com/foo", "**.example.com", -1},
+ {"yet.another.example.com/foo", "***.another.example.com", -1},
+ {"yet.another.example.com/foo", "**********.another.example.com", -1},
+ {"yet.another.example.com/foo/bar", "**********.another.example.com", -1},
+ {"yet.another.example.com/foo/bar", "*.another.example.com", len("yet.another.example.com")},
+ {"another.example.com/namespace.com/foo/bar/bbq:baz", "*.example.com", len("another.example.com")},
+ {"example.net/namespace-ends-in.com/foo/bar/bbq:baz", "*.com", -1},
+ {"another.example.com/namespace.com/foo/bar/bbq:baz", "*.namespace.com", -1},
+ {"sub.example.com/foo/bar", "*.com", len("sub.example.com")},
+ {"sub.example.com/foo/bar", "*.example.com", len("sub.example.com")},
+ {"another.sub.example.com/foo/bar/bbq:baz", "*.example.com", len("another.sub.example.com")},
+ {"another.sub.example.com/foo/bar/bbq:baz", "*.sub.example.com", len("another.sub.example.com")},
+ {"yet.another.example.com/foo/bar@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "*.example.com", len("yet.another.example.com")},
+ {"yet.another.sub.example.com/foo/bar@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "*.sub.example.com", len("yet.another.sub.example.com")},
+ } {
+ refLen := refMatchingSubdomainPrefix(c.ref, c.prefix)
+ assert.Equal(t, c.expected, refLen, fmt.Sprintf("%s vs. %s", c.ref, c.prefix))
+ }
+}
+
+func TestRefMatchingPrefix(t *testing.T) {
+ for _, c := range []struct {
+ ref, prefix string
+ expected int
+ }{
+ // Prefix is a reference.Domain() value
+ {"docker.io", "docker.io", len("docker.io")},
+ {"docker.io", "example.com", -1},
+ {"example.com:5000", "example.com:5000", len("example.com:5000")},
+ {"example.com:50000", "example.com:5000", -1},
+ {"example.com:5000", "example.com", len("example.com")}, // FIXME FIXME This is unintended and undocumented, don't rely on this behavior
+ {"example.com/foo", "example.com", len("example.com")},
+ {"example.com/foo/bar", "example.com", len("example.com")},
+ {"example.com/foo/bar:baz", "example.com", len("example.com")},
+ {"example.com/foo/bar@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com", len("example.com")},
+ // Prefix is a reference.Named.Name() value or a repo namespace
+ {"docker.io", "docker.io/library", -1},
+ {"docker.io/library", "docker.io/library", len("docker.io/library")},
+ {"example.com/library", "docker.io/library", -1},
+ {"docker.io/libraryy", "docker.io/library", -1},
+ {"docker.io/library/busybox", "docker.io/library", len("docker.io/library")},
+ {"docker.io", "docker.io/library/busybox", -1},
+ {"docker.io/library/busybox", "docker.io/library/busybox", len("docker.io/library/busybox")},
+ {"example.com/library/busybox", "docker.io/library/busybox", -1},
+ {"docker.io/library/busybox2", "docker.io/library/busybox", -1},
+ // Prefix is a single image
+ {"example.com", "example.com/foo:bar", -1},
+ {"example.com/foo", "example.com/foo:bar", -1},
+ {"example.com/foo:bar", "example.com/foo:bar", len("example.com/foo:bar")},
+ {"example.com/foo:bar2", "example.com/foo:bar", -1},
+ {"example.com", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", -1},
+ {"example.com/foo", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", -1},
+ {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ len("example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")},
+ {"example.com/foo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", -1},
+ // Prefix is invalid, but we shouldn’t crash.
+ // (Note that this is necessary only because loadConfigFile doesn’t reject single-character values outright,
+ // which it, in principle, could; a valid prefix/location must start with a host name, and host names
+ // that could ever match anything contain either a dot or a port number, due to docker.io normalization rules.)
+ {"example.com/foo", "*", -1},
+ } {
+ prefixLen := refMatchingPrefix(c.ref, c.prefix)
+ assert.Equal(t, c.expected, prefixLen, fmt.Sprintf("%s vs. %s", c.ref, c.prefix))
+ }
+}
+
+func TestNewConfigWrapper(t *testing.T) {
+ const nondefaultPath = "/this/is/not/the/default/registries.conf"
+ const variableReference = "$HOME"
+ const rootPrefix = "/root/prefix"
+ tempHome := t.TempDir()
+ var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf")
+ userRegistriesFilePath := filepath.Join(tempHome, userRegistriesFile)
+
+ for _, c := range []struct {
+ sys *types.SystemContext
+ userfilePresent bool
+ expected string
+ }{
+ // The common case
+ {nil, false, systemRegistriesConfPath},
+ // There is a context, but it does not override the path.
+ {&types.SystemContext{}, false, systemRegistriesConfPath},
+ // Path overridden
+ {&types.SystemContext{SystemRegistriesConfPath: nondefaultPath}, false, nondefaultPath},
+ // Root overridden
+ {
+ &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix},
+ false,
+ filepath.Join(rootPrefix, systemRegistriesConfPath),
+ },
+ // Root and path overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ SystemRegistriesConfPath: nondefaultPath,
+ },
+ false,
+ nondefaultPath,
+ },
+ // User registries file overridden
+ {&types.SystemContext{}, true, userRegistriesFilePath},
+ // Context and user User registries file preset simultaneously
+ {&types.SystemContext{SystemRegistriesConfPath: nondefaultPath}, true, nondefaultPath},
+ // Root and user registries file overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ SystemRegistriesConfPath: nondefaultPath,
+ },
+ true,
+ nondefaultPath,
+ },
+ // No environment expansion happens in the overridden paths
+ {&types.SystemContext{SystemRegistriesConfPath: variableReference}, false, variableReference},
+ } {
+ if c.userfilePresent {
+ err := os.MkdirAll(filepath.Dir(userRegistriesFilePath), os.ModePerm)
+ require.NoError(t, err)
+ f, err := os.Create(userRegistriesFilePath)
+ require.NoError(t, err)
+ f.Close()
+ } else {
+ os.Remove(userRegistriesFilePath)
+ }
+ path := newConfigWrapperWithHomeDir(c.sys, tempHome).configPath
+ assert.Equal(t, c.expected, path)
+ }
+}
+
+func TestFindRegistry(t *testing.T) {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/find-registry.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d",
+ }
+
+ registries, err := GetRegistries(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, 19, len(registries))
+
+ reg, err := FindRegistry(sys, "simple-prefix.com/foo/bar:latest")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "simple-prefix.com", reg.Prefix)
+ assert.Equal(t, reg.Location, "registry.com:5000")
+
+ // path match
+ reg, err = FindRegistry(sys, "simple-prefix.com/")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+
+ // hostname match
+ reg, err = FindRegistry(sys, "simple-prefix.com")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+
+ // subdomain prefix match
+ reg, err = FindRegistry(sys, "not.so.simple-prefix.com/")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix.com", reg.Location)
+
+ reg, err = FindRegistry(sys, "not.quite.simple-prefix.com/")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-2.com", reg.Location)
+
+ reg, err = FindRegistry(sys, "not.quite.simple-prefix.com:5000/with/path/and/beyond:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-2.com", reg.Location)
+
+ // subdomain prefix match for *.not.quite.simple-prefix.com
+ // location field overridden by /registries.conf.d/subdomain-override-1.conf
+ reg, err = FindRegistry(sys, "really.not.quite.simple-prefix.com:5000/with/path/and/beyond:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-1-overridden-by-dropin-location.com", reg.Location)
+
+ // In this case, the override does NOT occur because the dropin
+ // prefix = "*.docker.com" which is not a match.
+ reg, err = FindRegistry(sys, "foo.docker.io:5000/omg/wtf/bbq:foo")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-2.com", reg.Location)
+
+ // subdomain prefix match for *.bar.example.com
+ // location field overridden by /registries.conf.d/subdomain-override-3.conf
+ reg, err = FindRegistry(sys, "foo.bar.example.com:6000/omg/wtf/bbq@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-3-overridden-by-dropin-location.com", reg.Location)
+
+ // This case first matches with prefix = *.docker.io in find-registry.conf but
+ // there's a longer match with *.bar.docker.io which gets used
+ reg, err = FindRegistry(sys, "foo.bar.docker.io:5000/omg/wtf/bbq:foo")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-4.com", reg.Location)
+
+ // This case first matches with prefix = *.example.com in find-registry.conf but
+ // there's a longer match with foo.bar.example.com:5000 which gets used
+ reg, err = FindRegistry(sys, "foo.bar.example.com:5000/omg/wtf/bbq:foo")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "subdomain-prefix-5.com", reg.Location)
+
+ // invalid match
+ reg, err = FindRegistry(sys, "simple-prefix.comx")
+ assert.Nil(t, err)
+ assert.Nil(t, reg)
+
+ reg, err = FindRegistry(sys, "complex-prefix.com:4000/with/path/and/beyond:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "complex-prefix.com:4000/with/path", reg.Prefix)
+ assert.Equal(t, "another-registry.com:5000", reg.Location)
+
+ reg, err = FindRegistry(sys, "no-prefix.com/foo:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "no-prefix.com", reg.Prefix)
+ assert.Equal(t, "no-prefix.com", reg.Location)
+
+ reg, err = FindRegistry(sys, "empty-prefix.com/foo:tag")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.Equal(t, "empty-prefix.com", reg.Prefix)
+ assert.Equal(t, "empty-prefix.com", reg.Location)
+
+ _, err = FindRegistry(&types.SystemContext{SystemRegistriesConfPath: "testdata/this-does-not-exist.conf"}, "example.com")
+ assert.Error(t, err)
+}
+
+func assertRegistryLocationsEqual(t *testing.T, expected []string, regs []Registry) {
+ // verify the expected registries and their order
+ names := []string{}
+ for _, r := range regs {
+ names = append(names, r.Location)
+ }
+ assert.Equal(t, expected, names)
+}
+
+func TestFindUnqualifiedSearchRegistries(t *testing.T) {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/unqualified-search.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+
+ registries, err := GetRegistries(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+
+ unqRegs, origin, err := UnqualifiedSearchRegistriesWithOrigin(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs)
+ assert.Equal(t, "testdata/unqualified-search.conf", origin)
+
+ _, err = UnqualifiedSearchRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: "testdata/invalid-search.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.Error(t, err)
+}
+
+func TestInvalidV2Configs(t *testing.T) {
+ for _, c := range []struct{ path, errorSubstring string }{
+ {"testdata/insecure-conflicts.conf", "registry 'registry.com' is defined multiple times with conflicting 'insecure' setting"},
+ {"testdata/blocked-conflicts.conf", "registry 'registry.com' is defined multiple times with conflicting 'blocked' setting"},
+ {"testdata/missing-mirror-location.conf", "invalid condition: mirror location is unset"},
+ {"testdata/invalid-prefix.conf", "invalid location"},
+ {"testdata/this-does-not-exist.conf", "no such file or directory"},
+ } {
+ _, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: c.path})
+ assert.Error(t, err, c.path)
+ if c.errorSubstring != "" {
+ assert.ErrorContains(t, err, c.errorSubstring, c.path)
+ }
+ }
+}
+
+func TestUnmarshalConfig(t *testing.T) {
+ registries, err := GetRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: "testdata/unmarshal.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+}
+
+func TestV1BackwardsCompatibility(t *testing.T) {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/v1-compatibility.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+
+ registries, err := GetRegistries(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+
+ unqRegs, err := UnqualifiedSearchRegistries(sys)
+ assert.Nil(t, err)
+ assert.Equal(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs)
+
+ // check if merging works
+ reg, err := FindRegistry(sys, "registry-b.com/bar/foo/barfoo:latest")
+ assert.Nil(t, err)
+ assert.NotNil(t, reg)
+ assert.True(t, reg.Insecure)
+ assert.True(t, reg.Blocked)
+
+ for _, c := range []string{"testdata/v1-invalid-block.conf", "testdata/v1-invalid-insecure.conf", "testdata/v1-invalid-search.conf"} {
+ _, err := GetRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: c,
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.Error(t, err, c)
+ }
+}
+
+func TestMixingV1andV2(t *testing.T) {
+ for _, c := range []string{
+ "testdata/mixing-v1-v2.conf",
+ "testdata/mixing-v1-v2-empty.conf",
+ } {
+ _, err := GetRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: c,
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ })
+ assert.ErrorContains(t, err, "mixing sysregistry v1/v2 is not supported", c)
+ }
+}
+
+func TestConfigCache(t *testing.T) {
+ configFile, err := os.CreateTemp("", "sysregistriesv2-test")
+ require.NoError(t, err)
+ defer os.Remove(configFile.Name())
+ defer configFile.Close()
+
+ err = os.WriteFile(configFile.Name(), []byte(`
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+
+
+[[registry]]
+location = "blocked.registry.com"
+blocked = true
+
+
+[[registry]]
+location = "insecure.registry.com"
+insecure = true
+
+
+[[registry]]
+location = "untrusted.registry.com"
+insecure = true`), 0600)
+ require.NoError(t, err)
+
+ ctx := &types.SystemContext{SystemRegistriesConfPath: configFile.Name()}
+
+ InvalidateCache()
+ registries, err := GetRegistries(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+
+ // empty the config, but use the same SystemContext to show that the
+ // previously specified registries are in the cache
+ err = os.WriteFile(configFile.Name(), []byte{}, 0600)
+ require.NoError(t, err)
+ registries, err = GetRegistries(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+}
+
+func TestInvalidateCache(t *testing.T) {
+ ctx := &types.SystemContext{SystemRegistriesConfPath: "testdata/invalidate-cache.conf"}
+
+ InvalidateCache()
+ registries, err := GetRegistries(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+ assertRegistryLocationsEqual(t, []string{"blocked.registry.com", "insecure.registry.com", "registry.com", "untrusted.registry.com"}, registries)
+
+ // invalidate the cache, make sure it's empty and reload
+ InvalidateCache()
+ assert.Equal(t, 0, len(configCache))
+
+ registries, err = GetRegistries(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, len(registries))
+ assertRegistryLocationsEqual(t, []string{"blocked.registry.com", "insecure.registry.com", "registry.com", "untrusted.registry.com"}, registries)
+}
+
+func toNamedRef(t *testing.T, ref string) reference.Named {
+ parsedRef, err := reference.ParseNamed(ref)
+ require.NoError(t, err)
+ return parsedRef
+}
+
+func TestRewriteReferenceSuccess(t *testing.T) {
+ for _, c := range []struct{ inputRef, prefix, location, expected string }{
+ // Standard use cases
+ {"example.com/image", "example.com", "example.com", "example.com/image"},
+ {"example.com/image:latest", "example.com", "example.com", "example.com/image:latest"},
+ {"example.com:5000/image", "example.com:5000", "example.com:5000", "example.com:5000/image"},
+ {"example.com:5000/image:latest", "example.com:5000", "example.com:5000", "example.com:5000/image:latest"},
+ // Separator test ('/', '@', ':')
+ {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "example.com", "example.com",
+ "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
+ {"example.com/foo/image:latest", "example.com/foo", "example.com", "example.com/image:latest"},
+ {"example.com/foo/image:latest", "example.com/foo", "example.com/path", "example.com/path/image:latest"},
+ // Docker examples
+ {"docker.io/library/image:latest", "docker.io", "docker.io", "docker.io/library/image:latest"},
+ {"docker.io/library/image", "docker.io/library", "example.com", "example.com/image"},
+ {"docker.io/library/image", "docker.io", "example.com", "example.com/library/image"},
+ {"docker.io/library/prefix/image", "docker.io/library/prefix", "example.com", "example.com/image"},
+ // Wildcard prefix examples
+ {"docker.io/namespace/image", "*.io", "example.com", "example.com/namespace/image"},
+ {"docker.io/library/prefix/image", "*.io", "example.com", "example.com/library/prefix/image"},
+ {"sub.example.io/library/prefix/image", "*.example.io", "example.com", "example.com/library/prefix/image"},
+ {"another.sub.example.io:5000/library/prefix/image:latest", "*.sub.example.io", "example.com", "example.com:5000/library/prefix/image:latest"},
+ {"foo.bar.io/ns1/ns2/ns3/ns4", "*.bar.io", "omg.bbq.com/roflmao", "omg.bbq.com/roflmao/ns1/ns2/ns3/ns4"},
+ // Empty location with wildcard prefix examples. Essentially, no
+ // rewrite occurs and original reference is used as-is.
+ {"abc.internal.registry.com/foo:bar", "*.internal.registry.com", "", "abc.internal.registry.com/foo:bar"},
+ {"blah.foo.bar.com/omg:bbq", "*.com", "", "blah.foo.bar.com/omg:bbq"},
+ {"alien.vs.predator.foobar.io:5000/omg", "*.foobar.io", "", "alien.vs.predator.foobar.io:5000/omg"},
+ {"alien.vs.predator.foobar.io:5000/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "*.foobar.io", "",
+ "alien.vs.predator.foobar.io:5000/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
+ {"alien.vs.predator.foobar.io:5000/omg:bbq", "*.foobar.io", "", "alien.vs.predator.foobar.io:5000/omg:bbq"},
+ } {
+ ref := toNamedRef(t, c.inputRef)
+ testEndpoint := Endpoint{Location: c.location}
+ out, err := testEndpoint.rewriteReference(ref, c.prefix)
+ require.NoError(t, err)
+ assert.Equal(t, c.expected, out.String())
+ }
+}
+
+func TestRewriteReferenceFailedDuringParseNamed(t *testing.T) {
+ for _, c := range []struct{ inputRef, prefix, location string }{
+ // Invalid reference format
+ {"example.com/foo/image:latest", "example.com/foo", "example.com/path/"},
+ {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "example.com/foo", "example.com"},
+ {"example.com:5000/image:latest", "example.com", ""},
+ {"example.com:5000/image:latest", "example.com", "example.com:5000"},
+ // Malformed prefix
+ {"example.com/foo/image:latest", "example.com//foo", "example.com/path"},
+ {"example.com/image:latest", "image", "anotherimage"},
+ {"example.com/foo/image:latest", "example.com/foo/", "example.com"},
+ {"example.com/foo/image", "example.com/fo", "example.com/foo"},
+ {"example.com/foo:latest", "example.com/fo", "example.com/foo"},
+ {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "example.com/fo", "example.com/foo"},
+ {"docker.io/library/image", "example.com", "example.com"},
+ {"docker.io/library/image", "*.com", "example.com"},
+ {"foo.docker.io/library/image", "*.example.com", "example.com/image"},
+ {"foo.docker.io/library/image", "*.docker.com", "example.com/image"},
+ } {
+ ref := toNamedRef(t, c.inputRef)
+ testEndpoint := Endpoint{Location: c.location}
+ out, err := testEndpoint.rewriteReference(ref, c.prefix)
+ assert.NotNil(t, err)
+ assert.Nil(t, out)
+ }
+}
+
+func TestPullSourcesFromReference(t *testing.T) {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/pull-sources-from-reference.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+ registries, err := GetRegistries(sys)
+ require.NoError(t, err)
+ assert.Equal(t, 9, len(registries))
+
+ digest := "@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ tag := ":aaa"
+ for _, tc := range []struct {
+ matchedPrefix string
+ repo string
+ digestPrefixes []string
+ digestInsecure []bool
+ tagPrefixes []string
+ tagInsecure []bool
+ }{
+ // Registry A allowing any kind of pull from mirrors
+ {
+ "registry-a.com/foo",
+ "image",
+ []string{"mirror-1.registry-a.com", "mirror-2.registry-a.com", "registry-a.com/bar"},
+ []bool{false, true, false},
+ []string{"mirror-1.registry-a.com", "mirror-2.registry-a.com", "registry-a.com/bar"},
+ []bool{false, true, false},
+ },
+ // Registry B allowing digests pull only from mirrors
+ {
+ "registry-b.com/foo",
+ "image",
+ []string{"mirror-1.registry-b.com", "mirror-2.registry-b.com", "registry-b.com/bar"},
+ []bool{false, false, false},
+ []string{"registry-b.com/bar"},
+ []bool{false},
+ },
+ // Registry A has mirrors allow any kind of pull
+ {
+ "registry-a.com/baz",
+ "image",
+ []string{"mirror-1.registry-a.com", "mirror-2.registry-a.com", "registry-a.com/bar"},
+ []bool{false, true, false},
+ []string{"mirror-1.registry-a.com", "mirror-2.registry-a.com", "registry-a.com/bar"},
+ []bool{false, true, false},
+ },
+ // Registry B has mirrors allow digests pull only
+ {
+ "registry-b.com/baz",
+ "image",
+ []string{"mirror-1.registry-b.com", "mirror-2.registry-b.com", "registry-b.com/bar"},
+ []bool{false, false, false},
+ []string{"registry-b.com/bar"},
+ []bool{false},
+ },
+ // Registry C has a mirror allows digest pull only and a mirror allows any kind of pull
+ {
+ "registry-c.com/baz",
+ "image",
+ []string{"mirror-1.registry-c.com", "mirror-2.registry-c.com", "registry-c.com/bar"},
+ []bool{false, false, false},
+ []string{"mirror-1.registry-c.com", "registry-c.com/bar"},
+ []bool{false, false},
+ },
+ // Registry D set digest-only for registry level, allows only digest pulls
+ // Registry D has no digest-only set for mirrors table
+ {
+ "registry-d.com/baz",
+ "image",
+ []string{"mirror-1.registry-d.com", "mirror-2.registry-d.com", "registry-d.com/bar"},
+ []bool{false, false, false},
+ []string{"registry-d.com/bar"},
+ []bool{false},
+ },
+ // Registry E has mirrors only allows tag pull
+ {
+ "registry-e.com/baz",
+ "image",
+ []string{"registry-e.com/bar"},
+ []bool{false},
+ []string{"mirror-1.registry-e.com", "mirror-2.registry-e.com", "registry-e.com/bar"},
+ []bool{false, false, false},
+ },
+ // Registry F has one tag only mirror does not allow digest pull
+ {
+ "registry-f.com/baz",
+ "image",
+ []string{"mirror-1.registry-f.com", "registry-f.com/bar"},
+ []bool{false, false},
+ []string{"mirror-1.registry-f.com", "mirror-2.registry-f.com", "registry-f.com/bar"},
+ []bool{false, false, false},
+ },
+ // Registry G has one digest-only pull and one tag only pull
+ {
+ "registry-g.com/baz",
+ "image",
+ []string{"mirror-1.registry-g.com", "mirror-3.registry-g.com", "mirror-4.registry-g.com", "registry-g.com/bar"},
+ []bool{false, false, false, false},
+ []string{"mirror-2.registry-g.com", "mirror-3.registry-g.com", "mirror-4.registry-g.com", "registry-g.com/bar"},
+ []bool{false, false, false, false},
+ },
+ } {
+ // Digest
+ digestedRef := toNamedRef(t, fmt.Sprintf("%s/%s", tc.matchedPrefix, tc.repo)+digest)
+ registry, err := FindRegistry(sys, digestedRef.Name())
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+ pullSource, err := registry.PullSourcesFromReference(digestedRef)
+ require.NoError(t, err)
+ for i, p := range tc.digestPrefixes {
+ assert.Equal(t, p, pullSource[i].Endpoint.Location)
+ assert.Equal(t, fmt.Sprintf("%s/%s", p, tc.repo)+digest, pullSource[i].Reference.String())
+ assert.Equal(t, tc.digestInsecure[i], pullSource[i].Endpoint.Insecure)
+ }
+ // Tag
+ taggedRef := toNamedRef(t, fmt.Sprintf("%s/%s", tc.matchedPrefix, tc.repo)+tag)
+ registry, err = FindRegistry(sys, taggedRef.Name())
+ require.NoError(t, err)
+ require.NotNil(t, registry)
+ pullSource, err = registry.PullSourcesFromReference(taggedRef)
+ require.NoError(t, err)
+ for i, p := range tc.tagPrefixes {
+ assert.Equal(t, p, pullSource[i].Endpoint.Location)
+ assert.Equal(t, fmt.Sprintf("%s/%s", p, tc.repo)+tag, pullSource[i].Reference.String())
+ assert.Equal(t, tc.tagInsecure[i], pullSource[i].Endpoint.Insecure)
+ }
+ }
+}
+
+func TestInvalidMirrorConfig(t *testing.T) {
+ for _, tc := range []struct {
+ sys *types.SystemContext
+ expectErr string
+ }{
+ {
+ sys: &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/invalid-config-level-mirror.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ },
+ expectErr: fmt.Sprintf("pull-from-mirror must not be set for a non-mirror registry %q", "registry-a.com/foo"),
+ },
+ {
+ sys: &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/invalid-conflict-mirror.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ },
+ expectErr: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", "registry-a.com/foo", "mirror-1.registry-a.com"),
+ },
+ {
+ sys: &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/invalid-value-mirror.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ },
+ expectErr: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", "notvalid", "mirror-1.registry-a.com"),
+ },
+ } {
+ _, err := GetRegistries(tc.sys)
+ assert.ErrorContains(t, err, tc.expectErr)
+ }
+
+}
+
+func TestTryUpdatingCache(t *testing.T) {
+ ctx := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/try-update-cache-valid.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+ InvalidateCache()
+ registries, err := TryUpdatingCache(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(registries.Registries))
+ assert.Equal(t, 1, len(configCache))
+
+ ctxInvalid := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/try-update-cache-invalid.conf",
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+ registries, err = TryUpdatingCache(ctxInvalid)
+ assert.NotNil(t, err)
+ assert.Nil(t, registries)
+ assert.Equal(t, 1, len(configCache))
+}
+
+func TestRegistriesConfDirectory(t *testing.T) {
+ ctx := &types.SystemContext{
+ SystemRegistriesConfPath: "testdata/base-for-registries.d.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d",
+ }
+
+ InvalidateCache()
+ registries, err := TryUpdatingCache(ctx)
+ require.NoError(t, err)
+ assert.NotNil(t, registries)
+
+ assert.Equal(t, []string{"example-overwrite.com"}, registries.UnqualifiedSearchRegistries)
+ assert.Equal(t, 6, len(registries.Registries))
+ assertRegistryLocationsEqual(t, []string{"subdomain-prefix-3-overridden-by-dropin-location.com", "subdomain-prefix-2-overridden-by-dropin-location.com", "subdomain-prefix-1-overridden-by-dropin-location.com", "1.com", "2.com", "base.com"}, registries.Registries)
+
+ reg, err := FindRegistry(ctx, "base.com/test:latest")
+ require.NoError(t, err)
+ assert.True(t, reg.Blocked)
+
+ usrs, origin, err := UnqualifiedSearchRegistriesWithOrigin(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, []string{"example-overwrite.com"}, usrs)
+ assert.Equal(t, "testdata/registries.conf.d/config-1.conf", origin)
+
+ // Test that unqualified-search-registries is merged correctly
+ usr, err := UnqualifiedSearchRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: "testdata/unqualified-search.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d-usr1",
+ })
+ require.NoError(t, err)
+ assert.Equal(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, usr) // Nothing overrides the base file
+
+ usr, err = UnqualifiedSearchRegistries(&types.SystemContext{
+ SystemRegistriesConfPath: "testdata/unqualified-search.conf",
+ SystemRegistriesConfDirPath: "testdata/registries.conf.d-usr2",
+ })
+ require.NoError(t, err)
+ assert.Equal(t, []string{}, usr) // Search overridden with an empty array
+}
+
+func TestParseShortNameMode(t *testing.T) {
+ tests := []struct {
+ input string
+ result types.ShortNameMode
+ mustFail bool
+ }{
+ {"disabled", types.ShortNameModeDisabled, false},
+ {"enforcing", types.ShortNameModeEnforcing, false},
+ {"permissive", types.ShortNameModePermissive, false},
+ {"", -1, true},
+ {"xxx", -1, true},
+ }
+
+ for _, test := range tests {
+ shortName, err := parseShortNameMode(test.input)
+ if test.mustFail {
+ assert.Error(t, err)
+ continue
+ }
+ require.NoError(t, err)
+ assert.Equal(t, test.result, shortName)
+ }
+}
+
+func TestGetShortNameMode(t *testing.T) {
+ tests := []struct {
+ path string
+ mode types.ShortNameMode
+ mustFail bool
+ }{
+ {
+ "testdata/aliases.conf",
+ types.ShortNameModeEnforcing,
+ false,
+ },
+ {
+ "testdata/registries.conf.d/config-2.conf",
+ types.ShortNameModePermissive,
+ false,
+ },
+ {
+ "testdata/registries.conf.d/config-3.conf",
+ types.ShortNameModePermissive, // empty -> default to permissive
+ false,
+ },
+ {
+ "testdata/invalid-short-name-mode.conf",
+ -1,
+ true,
+ },
+ }
+
+ for _, test := range tests {
+ sys := &types.SystemContext{
+ SystemRegistriesConfPath: test.path,
+ SystemRegistriesConfDirPath: "testdata/this-does-not-exist",
+ }
+ mode, err := GetShortNameMode(sys)
+ if test.mustFail {
+ assert.Error(t, err)
+ continue
+ }
+ require.NoError(t, err)
+ assert.Equal(t, test.mode, mode, "%s", test.path)
+ }
+}
+
+func TestCredentialHelpers(t *testing.T) {
+ tests := []struct {
+ confPath string
+ confDirPath string
+ helpers []string
+ }{
+ {
+ confPath: "testdata/cred-helper.conf",
+ confDirPath: "testdata/this-does-not-exist",
+ helpers: []string{"helper-1", "helper-2"},
+ },
+ {
+ confPath: "testdata/empty.conf",
+ confDirPath: "testdata/this-does-not-exist",
+ helpers: []string{"containers-auth.json"},
+ },
+ {
+ confPath: "testdata/cred-helper.conf",
+ confDirPath: "testdata/registries.conf.d-empty-helpers",
+ helpers: []string{"containers-auth.json"},
+ },
+ {
+ confPath: "testdata/cred-helper.conf",
+ confDirPath: "testdata/registries.conf.d",
+ helpers: []string{"dropin-1", "dropin-2"},
+ },
+ }
+
+ for _, test := range tests {
+ ctx := &types.SystemContext{
+ SystemRegistriesConfPath: test.confPath,
+ SystemRegistriesConfDirPath: test.confDirPath,
+ }
+
+ helpers, err := CredentialHelpers(ctx)
+ require.NoError(t, err)
+ require.Equal(t, test.helpers, helpers, "%v", test)
+ }
+}
diff --git a/pkg/sysregistriesv2/testdata/aliases.conf b/pkg/sysregistriesv2/testdata/aliases.conf
new file mode 100644
index 0000000..cb05b27
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/aliases.conf
@@ -0,0 +1,7 @@
+short-name-mode="enforcing"
+
+[aliases]
+docker="docker.io/library/foo"
+"quay/foo"="quay.io/library/foo"
+example="example.com/library/foo"
+empty=""
diff --git a/pkg/sysregistriesv2/testdata/base-for-registries.d.conf b/pkg/sysregistriesv2/testdata/base-for-registries.d.conf
new file mode 100644
index 0000000..f27ed07
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/base-for-registries.d.conf
@@ -0,0 +1,5 @@
+unqualified-search-registries = ["example.com"]
+
+[[registry]]
+location = "base.com"
+insecure = true \ No newline at end of file
diff --git a/pkg/sysregistriesv2/testdata/blocked-conflicts.conf b/pkg/sysregistriesv2/testdata/blocked-conflicts.conf
new file mode 100644
index 0000000..5bf8925
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/blocked-conflicts.conf
@@ -0,0 +1,13 @@
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+
+
+[[registry]]
+location = "registry.com"
+blocked = true
diff --git a/pkg/sysregistriesv2/testdata/cred-helper.conf b/pkg/sysregistriesv2/testdata/cred-helper.conf
new file mode 100644
index 0000000..fbc5e97
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/cred-helper.conf
@@ -0,0 +1,10 @@
+credential-helpers = ["helper-1", "helper-2"]
+[[registry]]
+location = "registry-a.com"
+
+[[registry]]
+location = "registry-b.com"
+
+[[registry]]
+location = "registry-c.com/foo"
+prefix = "registry-c.com"
diff --git a/pkg/sysregistriesv2/testdata/empty.conf b/pkg/sysregistriesv2/testdata/empty.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/empty.conf
diff --git a/pkg/sysregistriesv2/testdata/find-registry.conf b/pkg/sysregistriesv2/testdata/find-registry.conf
new file mode 100644
index 0000000..0abb7d7
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/find-registry.conf
@@ -0,0 +1,62 @@
+[[registry]]
+location = "registry.com:5000"
+prefix = "simple-prefix.com"
+
+[[registry]]
+location = "another-registry.com:5000"
+prefix = "complex-prefix.com:4000/with/path"
+
+[[registry]]
+location = "registry.com:5000"
+prefix = "another-registry.com"
+
+[[registry]]
+location = "no-prefix.com"
+
+[[registry]]
+location = "empty-prefix.com"
+prefix = ""
+
+[[registry]]
+location = "subdomain-prefix.com"
+prefix = "*.so.simple-prefix.com"
+
+[[registry]]
+location = "subdomain-prefix-2.com"
+prefix = "*.simple-prefix.com"
+
+# For subdomain override using dropin registries.conf.d/subdomain-override-1.conf
+[[registry]]
+location = "subdomain-prefix-1.com"
+prefix = "*.not.quite.simple-prefix.com"
+
+# For subdomain override failure using registries.conf.d/subdomain-override-2.conf
+# with unmatched prefix = "*.example.com"
+[[registry]]
+location = "subdomain-prefix-2.com"
+prefix = "*.docker.io"
+
+# For subdomain override using dropin registries.conf.d/subdomain-override-3.conf
+[[registry]]
+location = "subdomain-prefix-3.com"
+prefix = "*.bar.example.com"
+
+# For longest wildcarded prefix match in comparison with "*.docker.io"
+[[registry]]
+location = "subdomain-prefix-4.com"
+prefix = "*.bar.docker.io"
+
+# For longest prefix match in comparison with *.bar.example.com
+[[registry]]
+location = "subdomain-prefix-5.com"
+prefix = "foo.bar.example.com:5000"
+
+# For empty location with wildcard prefix
+[[registry]]
+prefix="*.internal.registry.com"
+
+[[registry]]
+prefix="*.com"
+
+[[registry]]
+prefix="*.foobar.io"
diff --git a/pkg/sysregistriesv2/testdata/insecure-conflicts.conf b/pkg/sysregistriesv2/testdata/insecure-conflicts.conf
new file mode 100644
index 0000000..6368194
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/insecure-conflicts.conf
@@ -0,0 +1,13 @@
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+
+
+[[registry]]
+location = "registry.com"
+insecure = true
diff --git a/pkg/sysregistriesv2/testdata/invalid-aliases.conf b/pkg/sysregistriesv2/testdata/invalid-aliases.conf
new file mode 100644
index 0000000..80530ca
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-aliases.conf
@@ -0,0 +1,3 @@
+[aliases]
+image1="quay.io/repo/image:1"
+image2="image:1"
diff --git a/pkg/sysregistriesv2/testdata/invalid-config-level-mirror.conf b/pkg/sysregistriesv2/testdata/invalid-config-level-mirror.conf
new file mode 100644
index 0000000..cdcde7e
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-config-level-mirror.conf
@@ -0,0 +1,11 @@
+[[registry]]
+prefix = "registry-a.com/foo"
+location = "registry-a.com/bar"
+pull-from-mirror = "digest-only"
+
+[[registry.mirror]]
+location = "mirror-1.registry-a.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-a.com"
+insecure = true \ No newline at end of file
diff --git a/pkg/sysregistriesv2/testdata/invalid-conflict-mirror.conf b/pkg/sysregistriesv2/testdata/invalid-conflict-mirror.conf
new file mode 100644
index 0000000..f710c13
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-conflict-mirror.conf
@@ -0,0 +1,12 @@
+[[registry]]
+prefix = "registry-a.com/foo"
+location = "registry-a.com/bar"
+mirror-by-digest-only = true
+
+[[registry.mirror]]
+pull-from-mirror = "digest-only"
+location = "mirror-1.registry-a.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-a.com"
+insecure = true \ No newline at end of file
diff --git a/pkg/sysregistriesv2/testdata/invalid-prefix.conf b/pkg/sysregistriesv2/testdata/invalid-prefix.conf
new file mode 100644
index 0000000..0103d7a
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-prefix.conf
@@ -0,0 +1,18 @@
+[[registry]]
+location = "registry.com:5000"
+prefix = "http://schema-is-invalid.com"
+
+[[registry]]
+location = "another-registry.com:5000"
+prefix = "complex-prefix.com:4000/with/path"
+
+[[registry]]
+location = "registry.com:5000"
+prefix = "another-registry.com"
+
+[[registry]]
+location = "no-prefix.com"
+
+[[registry]]
+location = "empty-prefix.com"
+prefix = ""
diff --git a/pkg/sysregistriesv2/testdata/invalid-search.conf b/pkg/sysregistriesv2/testdata/invalid-search.conf
new file mode 100644
index 0000000..c31fed1
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-search.conf
@@ -0,0 +1 @@
+unqualified-search-registries = ["registry-a.com/namespace-is-forbidden"]
diff --git a/pkg/sysregistriesv2/testdata/invalid-short-name-mode.conf b/pkg/sysregistriesv2/testdata/invalid-short-name-mode.conf
new file mode 100644
index 0000000..acf2eb2
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-short-name-mode.conf
@@ -0,0 +1 @@
+short-name-mode="invalid"
diff --git a/pkg/sysregistriesv2/testdata/invalid-value-mirror.conf b/pkg/sysregistriesv2/testdata/invalid-value-mirror.conf
new file mode 100644
index 0000000..bb0797b
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalid-value-mirror.conf
@@ -0,0 +1,11 @@
+[[registry]]
+prefix = "registry-a.com/foo"
+location = "registry-a.com/bar"
+
+[[registry.mirror]]
+pull-from-mirror = "notvalid"
+location = "mirror-1.registry-a.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-a.com"
+insecure = true \ No newline at end of file
diff --git a/pkg/sysregistriesv2/testdata/invalidate-cache.conf b/pkg/sysregistriesv2/testdata/invalidate-cache.conf
new file mode 100644
index 0000000..597796f
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/invalidate-cache.conf
@@ -0,0 +1,23 @@
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+
+
+[[registry]]
+location = "blocked.registry.com"
+blocked = true
+
+
+[[registry]]
+location = "insecure.registry.com"
+insecure = true
+
+
+[[registry]]
+location = "untrusted.registry.com"
+insecure = true
diff --git a/pkg/sysregistriesv2/testdata/mirrors.conf b/pkg/sysregistriesv2/testdata/mirrors.conf
new file mode 100644
index 0000000..f1012f1
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/mirrors.conf
@@ -0,0 +1,13 @@
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+insecure = true
+
+[[registry]]
+location = "blocked.registry.com"
+blocked = true
diff --git a/pkg/sysregistriesv2/testdata/missing-mirror-location.conf b/pkg/sysregistriesv2/testdata/missing-mirror-location.conf
new file mode 100644
index 0000000..481574f
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/missing-mirror-location.conf
@@ -0,0 +1,10 @@
+unqualified-search-registries = ["registry-a.com"]
+
+[[registry]]
+location = "registry-a.com"
+
+[[registry]]
+location = "registry-b.com"
+[[registry.mirror]]
+location = "mirror-b.com"
+[[registry.mirror]]
diff --git a/pkg/sysregistriesv2/testdata/mixing-v1-v2-empty.conf b/pkg/sysregistriesv2/testdata/mixing-v1-v2-empty.conf
new file mode 100644
index 0000000..f049987
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/mixing-v1-v2-empty.conf
@@ -0,0 +1,10 @@
+unqualified-search-registries = []
+
+[registries.search]
+registries = []
+
+[registries.block]
+registries = []
+
+[registries.insecure]
+registries = []
diff --git a/pkg/sysregistriesv2/testdata/mixing-v1-v2.conf b/pkg/sysregistriesv2/testdata/mixing-v1-v2.conf
new file mode 100644
index 0000000..bd20b38
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/mixing-v1-v2.conf
@@ -0,0 +1,19 @@
+unqualified-search-registries = ["registry-a.com", "registry-c.com"]
+
+[registries.search]
+registries = ["registry-a.com", "registry-c.com"]
+
+[registries.block]
+registries = ["registry-b.com"]
+
+[registries.insecure]
+registries = ["registry-d.com", "registry-e.com", "registry-a.com"]
+
+[[registry]]
+location = "registry-a.com"
+
+[[registry]]
+location = "registry-b.com"
+
+[[registry]]
+location = "registry-c.com" \ No newline at end of file
diff --git a/pkg/sysregistriesv2/testdata/pull-sources-from-reference.conf b/pkg/sysregistriesv2/testdata/pull-sources-from-reference.conf
new file mode 100644
index 0000000..ee50fdc
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/pull-sources-from-reference.conf
@@ -0,0 +1,108 @@
+[[registry]]
+prefix = "registry-a.com/foo"
+location = "registry-a.com/bar"
+
+[[registry.mirror]]
+location = "mirror-1.registry-a.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-a.com"
+insecure = true
+
+[[registry]]
+prefix = "registry-b.com/foo"
+location = "registry-b.com/bar"
+mirror-by-digest-only = true
+
+[[registry.mirror]]
+location = "mirror-1.registry-b.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-b.com"
+
+[[registry]]
+prefix = "registry-a.com/baz"
+location = "registry-a.com/bar"
+
+[[registry.mirror]]
+location = "mirror-1.registry-a.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-a.com"
+insecure = true
+
+[[registry]]
+prefix = "registry-b.com/baz"
+location = "registry-b.com/bar"
+
+[[registry.mirror]]
+pull-from-mirror = "digest-only"
+location = "mirror-1.registry-b.com"
+
+[[registry.mirror]]
+pull-from-mirror = "digest-only"
+location = "mirror-2.registry-b.com"
+
+[[registry]]
+prefix = "registry-c.com/baz"
+location = "registry-c.com/bar"
+
+[[registry.mirror]]
+location = "mirror-1.registry-c.com"
+
+[[registry.mirror]]
+pull-from-mirror = "digest-only"
+location = "mirror-2.registry-c.com"
+
+[[registry]]
+prefix = "registry-d.com/baz"
+location = "registry-d.com/bar"
+mirror-by-digest-only = true
+
+[[registry.mirror]]
+location = "mirror-1.registry-d.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry-d.com"
+
+[[registry]]
+prefix = "registry-e.com/baz"
+location = "registry-e.com/bar"
+
+[[registry.mirror]]
+pull-from-mirror = "tag-only"
+location = "mirror-1.registry-e.com"
+
+[[registry.mirror]]
+pull-from-mirror = "tag-only"
+location = "mirror-2.registry-e.com"
+
+[[registry]]
+prefix = "registry-f.com/baz"
+location = "registry-f.com/bar"
+
+[[registry.mirror]]
+location = "mirror-1.registry-f.com"
+
+[[registry.mirror]]
+pull-from-mirror = "tag-only"
+location = "mirror-2.registry-f.com"
+
+[[registry]]
+prefix = "registry-g.com/baz"
+location = "registry-g.com/bar"
+
+[[registry.mirror]]
+pull-from-mirror = "digest-only"
+location = "mirror-1.registry-g.com"
+
+[[registry.mirror]]
+pull-from-mirror = "tag-only"
+location = "mirror-2.registry-g.com"
+
+[[registry.mirror]]
+location = "mirror-3.registry-g.com"
+
+[[registry.mirror]]
+pull-from-mirror = "all"
+location = "mirror-4.registry-g.com"
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d-empty-helpers/empty.conf b/pkg/sysregistriesv2/testdata/registries.conf.d-empty-helpers/empty.conf
new file mode 100644
index 0000000..3be4372
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d-empty-helpers/empty.conf
@@ -0,0 +1 @@
+credential-helpers=[]
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d-usr1/no-usr.conf b/pkg/sysregistriesv2/testdata/registries.conf.d-usr1/no-usr.conf
new file mode 100644
index 0000000..913fa11
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d-usr1/no-usr.conf
@@ -0,0 +1 @@
+# unqualified-search-registries is not set
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d-usr2/empty-usr.conf b/pkg/sysregistriesv2/testdata/registries.conf.d-usr2/empty-usr.conf
new file mode 100644
index 0000000..0e4d167
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d-usr2/empty-usr.conf
@@ -0,0 +1 @@
+unqualified-search-registries = []
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/config-1.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/config-1.conf
new file mode 100644
index 0000000..b52add5
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/config-1.conf
@@ -0,0 +1,10 @@
+unqualified-search-registries = ["example-overwrite.com"]
+credential-helpers = ["we", "will", "be", "overridden", "later"]
+
+[[registry]]
+location = "1.com"
+
+[aliases]
+docker="docker.io/library/config1"
+config1="config1.com/image"
+barz="barz.com/image/config1"
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/config-2.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/config-2.conf
new file mode 100644
index 0000000..695b80b
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/config-2.conf
@@ -0,0 +1,19 @@
+short-name-mode="permissive"
+
+credential-helpers=["dropin-1", "dropin-2"]
+
+[[registry]]
+location = "1.com"
+
+[[registry]]
+location = "2.com"
+
+[[registry]]
+location = "base.com"
+blocked = true
+
+[aliases]
+config2="config2.com/image"
+barz="barz.com/config2"
+added3="xxx.com/image"
+example=""
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.conf
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.ignore b/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.ignore
new file mode 100644
index 0000000..65866fd
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/config-3.ignore
@@ -0,0 +1,7 @@
+unqualified-search-registries = ["ignore-example-overwrite.com"]
+
+[[registry]]
+location = "ignore-me-because-i-have-a-wrong-suffix.com"
+
+[aliases]
+ignore="me because i have a wrong suffix"
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-1.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-1.conf
new file mode 100644
index 0000000..b98000c
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-1.conf
@@ -0,0 +1,3 @@
+[[registry]]
+location = "subdomain-prefix-1-overridden-by-dropin-location.com"
+prefix = "*.not.quite.simple-prefix.com"
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-2.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-2.conf
new file mode 100644
index 0000000..87a10a1
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-2.conf
@@ -0,0 +1,3 @@
+[[registry]]
+location = "subdomain-prefix-2-overridden-by-dropin-location.com"
+prefix = "*.docker.com"
diff --git a/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-3.conf b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-3.conf
new file mode 100644
index 0000000..f7daf16
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/registries.conf.d/subdomain-override-3.conf
@@ -0,0 +1,3 @@
+[[registry]]
+location = "subdomain-prefix-3-overridden-by-dropin-location.com"
+prefix = "*.bar.example.com"
diff --git a/pkg/sysregistriesv2/testdata/try-update-cache-invalid.conf b/pkg/sysregistriesv2/testdata/try-update-cache-invalid.conf
new file mode 100644
index 0000000..9977a28
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/try-update-cache-invalid.conf
@@ -0,0 +1 @@
+invalid
diff --git a/pkg/sysregistriesv2/testdata/try-update-cache-valid.conf b/pkg/sysregistriesv2/testdata/try-update-cache-valid.conf
new file mode 100644
index 0000000..0a498af
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/try-update-cache-valid.conf
@@ -0,0 +1,2 @@
+[[registry]]
+location = "registry.com"
diff --git a/pkg/sysregistriesv2/testdata/unmarshal.conf b/pkg/sysregistriesv2/testdata/unmarshal.conf
new file mode 100644
index 0000000..597796f
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/unmarshal.conf
@@ -0,0 +1,23 @@
+[[registry]]
+location = "registry.com"
+
+[[registry.mirror]]
+location = "mirror-1.registry.com"
+
+[[registry.mirror]]
+location = "mirror-2.registry.com"
+
+
+[[registry]]
+location = "blocked.registry.com"
+blocked = true
+
+
+[[registry]]
+location = "insecure.registry.com"
+insecure = true
+
+
+[[registry]]
+location = "untrusted.registry.com"
+insecure = true
diff --git a/pkg/sysregistriesv2/testdata/unqualified-search.conf b/pkg/sysregistriesv2/testdata/unqualified-search.conf
new file mode 100644
index 0000000..c1137a0
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/unqualified-search.conf
@@ -0,0 +1,13 @@
+unqualified-search-registries = ["registry-a.com", "registry-c.com", "registry-d.com"]
+
+[[registry]]
+location = "registry-a.com"
+
+[[registry]]
+location = "registry-b.com"
+
+[[registry]]
+location = "registry-c.com"
+
+[[registry]]
+location = "registry-d.com"
diff --git a/pkg/sysregistriesv2/testdata/v1-compatibility.conf b/pkg/sysregistriesv2/testdata/v1-compatibility.conf
new file mode 100644
index 0000000..c26ee83
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/v1-compatibility.conf
@@ -0,0 +1,8 @@
+[registries.search]
+registries = ["registry-a.com////", "registry-c.com", "registry-d.com"]
+
+[registries.block]
+registries = ["registry-b.com"]
+
+[registries.insecure]
+registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"]
diff --git a/pkg/sysregistriesv2/testdata/v1-invalid-block.conf b/pkg/sysregistriesv2/testdata/v1-invalid-block.conf
new file mode 100644
index 0000000..bcf9248
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/v1-invalid-block.conf
@@ -0,0 +1,8 @@
+[registries.search]
+registries = ["registry-a.com////", "registry-c.com", "registry-d.com", "http://schema-is-invalid.com"]
+
+[registries.block]
+registries = ["registry-b.com", "http://schema-is-invalid.com"]
+
+[registries.insecure]
+registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"]
diff --git a/pkg/sysregistriesv2/testdata/v1-invalid-insecure.conf b/pkg/sysregistriesv2/testdata/v1-invalid-insecure.conf
new file mode 100644
index 0000000..d821fb1
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/v1-invalid-insecure.conf
@@ -0,0 +1,8 @@
+[registries.search]
+registries = ["registry-a.com////", "registry-c.com", "registry-d.com"]
+
+[registries.block]
+registries = ["registry-b.com"]
+
+[registries.insecure]
+registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com", "http://schema-is-invalid.com"]
diff --git a/pkg/sysregistriesv2/testdata/v1-invalid-search.conf b/pkg/sysregistriesv2/testdata/v1-invalid-search.conf
new file mode 100644
index 0000000..a9cf15b
--- /dev/null
+++ b/pkg/sysregistriesv2/testdata/v1-invalid-search.conf
@@ -0,0 +1,8 @@
+[registries.search]
+registries = ["registry-a.com////", "registry-c.com", "registry-d.com", "http://schema-is-invalid.com"]
+
+[registries.block]
+registries = ["registry-b.com"]
+
+[registries.insecure]
+registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"]
diff --git a/pkg/tlsclientconfig/testdata/full/ca-cert-1.crt b/pkg/tlsclientconfig/testdata/full/ca-cert-1.crt
new file mode 100644
index 0000000..69151e3
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/ca-cert-1.crt
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIE+jCCAuKgAwIBAgIJAOwwZJiWYGNwMA0GCSqGSIb3DQEBCwUAMDExLzAtBgNV
+BAMMJmNvbnRhaW5lcnMvaW1hZ2UgdGVzdCBDQSBjZXJ0aWZpY2F0ZSAxMCAXDTE4
+MDgyODE2MDQ0NVoYDzIxMTgwODA0MTYwNDQ1WjAxMS8wLQYDVQQDDCZjb250YWlu
+ZXJzL2ltYWdlIHRlc3QgQ0EgY2VydGlmaWNhdGUgMTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAMhS5F4PXlmXAtnxqdbxVRv/YYqKqg+/YV7y2hRuRZk2
+DVhcc2qCr80he6V9He9esAlGTAk/tLES4HasB27ry4ExZvAivNhf4VLSso8LANf6
+/mFDAK3+istdlZ4hb2f4eAKmKaCEB3GxB1EMxBWB8BiSZhzSLQfMaWBLOt5HKxNv
+/7Ha2HOwQqCoqoKR6dg6nt9PV2VLuVsmgI6pKpn9CsXnsplak6hDzUtT71HH80De
+GsZsfSApRB/iSWlJie80hDKyP5HK5//qFfRAhlrfdb7wuqrsjdafO4WYskVFtvJy
+1eU2jmI/EPO83dWhyW/COiMJNHh+8IPYlDP8tCbm8tdGnqF+pZTe5hlXEXvwJwF0
+jxWlx6MhiDLX2T2Tq/ypOEsrAWFfRtKY+W1Hbm6w+i9vKKhpxkGFvg7Js/oKPif9
+QqKKY6bpERQG9tNZzpU+PcX3y0AyQU1mk4WmlF07p40U2lGddvXwUokEunbvSIKp
+W3PINodekHuHdDVMA4bMS1SucJtp4MIPw2so83rfcwY0x2rc8ONWd97rJybqZtyf
+DThWjnCUa/QDuAe2G2dVN3O6h0CZD1h9/faWecu7ppRN1x7HMfgjT5p1y+ZvObJS
+fQr1AHZFx8lwRBBrAES0ygeuBIzgnCFo5pdeTE7cVbgCb1+5FeLiIhVXjZ9ZESJl
+AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAKdM
+SjM6m8+v/FwNboB+y0LC5Nu3BOLbSqo4V/OmNh94Lnk3gsGbJTEmNWfZ6Xa6svgx
+IcR7ayZz/y2iXF45rbxk1b/UGPe4SR8YqO4z4NnGVjC37HAW1AtoYXsdQTQMjg39
+oGJSfRTn58BQbVj2DMYFK7hZhnn1a/9zQgz5wKlxEYqmF2UPHbERf0X6Nj8aj2gQ
+rHmCx6erAKHDtqRA6WBIEC+01bdeq5Ds+i4x5E1XJtkugNY/MWrid1NXPaiWeJeh
+HpebfxKXHqY8BKhTothBVJR5N+8YkJSFLCcSIrkZSvdu7vk8ZtuTKVHkjPPeCwKj
+iIP0/SDLDE2FIH3VXpkuT4FutNkut8P3DAVVgpKEj3SyzSbPchrbVsYdRJk/iqLO
+bRZot5V6W351U/GPvdyedpzykd9NWqVc1j624M8OXzrso48BKuhd2NlzeCHiev2d
+VPKkYQzhxrHfQICbLgwVTf9BRPQDhjgzbXVzcEMQEt0eM9bFRhbunlPkcK6zSTeH
+q6A2XEXuF4Y5f1azJNPX4x6RsPTRt1JmNUUOowcC+ilW3ALIlIQszzmUzKBaIVko
+5A6Z2eih1Fj7AwzjnqErVGMhwHIzHkRc/NxE1s//oxwunqWCapduCHxvH7T5k6Dk
+donX0wDmrj980cDo5+X8ZjjroGJvoraSl0QV2e+g
+-----END CERTIFICATE-----
diff --git a/pkg/tlsclientconfig/testdata/full/ca-cert-2.crt b/pkg/tlsclientconfig/testdata/full/ca-cert-2.crt
new file mode 100644
index 0000000..5658090
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/ca-cert-2.crt
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIE+jCCAuKgAwIBAgIJAPazFn6MWm78MA0GCSqGSIb3DQEBCwUAMDExLzAtBgNV
+BAMMJmNvbnRhaW5lcnMvaW1hZ2UgdGVzdCBDQSBjZXJ0aWZpY2F0ZSAyMCAXDTE4
+MDgyODE2MDUyMVoYDzIxMTgwODA0MTYwNTIxWjAxMS8wLQYDVQQDDCZjb250YWlu
+ZXJzL2ltYWdlIHRlc3QgQ0EgY2VydGlmaWNhdGUgMjCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKOEn+tOrG03S5LMxH/5LBCdQJ/JYVJCw9daPG1rEZSQ
+RavX6NBZ+JhGg9Gb2d724y595UQ81IuiD6VIz8q0p1Pz09kW6AwQHLcMi64Xg9eN
+AJ2SnEdpO2mYxydEpReWlkg82ZAgFHGMBUkie2Q07ascxo+MpXd8D4Q12uGVrjbA
+BUcVz5xVarhX024hYgbuhvaItD2Bg1bwtZ9uAI6pKbIddkEW4rr4K3xLb5IitOk3
+Gr3Eg9e1ZIX+ZXbgGOgENlXZ4LouwK0aER9TjGIJ/KRTKZxbGG7lZhQ7ycamtjaX
+Al1k4zhZ/OkEJF9lsFfQ1KUcPoG6leQw0hMR+6j3iNDHXs3rEhj4brgNIGK5ou0n
+XKIaz0uaocYBnUPV0Bd3mHFuReouQIBFezoMGw2f9/SPq+aRwW5z0+xuMTJeDuX4
+J4mr7+Cm+vLLc3hdTOtw8+oHkKIHwVpC6VWEKXLDzfXCTGbq5+at2qR9edd1DVLc
+o5wVKh31Oawd/0OxeUN+KRW+txBDLmpIsWxQqTt6S+qRhE3AoOO/3n8jzF/0G6LC
+raLxIk/NCFaVMQYxK2PCVaQ/qJA5sNGsiHtcC/xmmKW/+0IGN1WOnJBWWCFoy/3s
+CCXICmHEOT01aO3tTfLt67KyiDY9BCDhWTVtcrknturf/52qQAOuSXllwrIf+9QT
+AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAB/G
+UlpC8F1hq9t25fSA5Jf2P6lJXFX2dm+oJDR/dFHwj/tOU2ladVSae0mWzWnCsTyq
+sCbNcdUlJ1X6rQoVFDeMsnooWFo52P56dNRlPCbboyd1mKBQZffR5ASbmMkdcDbM
+ZYS5s6XXh80o9HoHKFF95XlBpnGY4GKuvm/LA6EikDcrcDrzwokfNJYEwjTKblh/
+QIEdnAeNNohpSIm4flDmJriUwnZxKjEzLcxPbII7kWJwwa/EAySxCfstGJ4KY6Cl
+gE8a/bJ/R3t/g9ZrHyFxvKkVJ8ZdOE4KX3zbc/Hn2VVlL0/VBbenRuyt9DgSHY6I
+ghPIpixxI35h4HcmDKD+3Ga6tKO3WNZgTLF66+4UdtvQ9XEGI3l1u9HqNsnYkapi
+uzPHtIw7MZvNTj1ado4mun5k63Qgq0AsAF6Zffgvl7SNg7LyezMrs4+pythfvm8a
+c/blO06Ue2hIasjFGDyYGVGXnv1xuQE8zgQ/Ye4e5VRoa0bNJqLPtnH3s9S6+i8E
+sCtZYk4AZDMSRPE3jlUWdJ6yLEgdrqvtSuPZEX8sc0XnhPnM/OtzfGpRIID+IYVc
+hqDdB6zpnJ6D4ficmWVRY+HLI3XLKHcTfg1JDfXHK9guNXTd+h1fwtk64XBhVLaf
+YK5NHbrYyeuaY40cP2SqiDAhoSzzPpuronDjz4Nh
+-----END CERTIFICATE-----
diff --git a/pkg/tlsclientconfig/testdata/full/client-cert-1.cert b/pkg/tlsclientconfig/testdata/full/client-cert-1.cert
new file mode 100644
index 0000000..bbf4d63
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/client-cert-1.cert
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIE6DCCAtACCQCF7Uo5GTGOezANBgkqhkiG9w0BAQsFADA1MTMwMQYDVQQDDCpj
+b250YWluZXJzL2ltYWdlIHRlc3QgY2xpZW50IGNlcnRpZmljYXRlIDEwIBcNMTgw
+ODI4MTUzNjI5WhgPMjExODA4MDQxNTM2MjlaMDUxMzAxBgNVBAMMKmNvbnRhaW5l
+cnMvaW1hZ2UgdGVzdCBjbGllbnQgY2VydGlmaWNhdGUgMTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAKEhMzeyArbV+VuNVP0ZeOrEDNzHooXY9yST0Ftl
+blXifm8uFrCx+QytGJEFuM8K/Z1AR6MH0x6yXtYsqyCkM3+tFHlErdjm9Zxe+4Oh
+e+3/NImmnyfMt7N7solyfoGm8RTu5/NhGCjUTVJxS5xwO5rg7UXNHNBSVVyW4VSQ
+cAUs/j8zqajdAOP+3rE1A9rPLRZTUkuuLZqSPvlth2d1EMeQmvi2EUXoArQ1JKDm
+FxdpeUJ7qoUyrGuvaSY9jTyvAaqRzBXA6bHZXUbeaCrB1feOY0NEmQdN0+knV/+s
+an7MVpTrIRtsL+RraGCJX8hJFoTKcc7SiFKUK7Kv5KeqP6GQkv347HlXLZ9Ay+Do
+b4bRyBu7tRnkoBlCtXSF+7MEFd82le0K9PRqqf4g4riSvPvWdTrvWxJEv1ntD8Mr
+DRtBfw8MoMTdqTkrUCKzoHKzl2BHb9RzjOuRQpcT4tfNHn0UIn280CbDqzeNnqfp
+x+1dXLTPiEVRz/BSekcjYcXjqrZPJZ6pm/f7IA041nPq4L/pzZH8NSyMf2K5CjRQ
+P+ThhHqY2oYQRk73PezZVLFUk1hIuiuyQaoQqLplvNNzeXslyoY02UgjUYm4VHQz
+ctHD5sETbNJO4oLm4hCVkGZUzogMd3Rki+f6TM0jL0LqCkWzPIsvkdVLrKk2xZOF
+YPH1AgMBAAEwDQYJKoZIhvcNAQELBQADggIBAG2SYSh6nYy6BR8XpdprRUwyUmDt
+v2Ee14ItrBFVjlyzJV+hVG9rjB4IUu+Xmj6J78m0y3ONrYS1bXfkIifJULVZFnBw
+e3Y4l2ke1K0GaMZNvr8JhKB4uX9P/WL5kmYCV6F7YgFqCBr+YcQHRBHYbpVmkc1S
+YFDzXmBPTI4OoYrNBHyjlF9zgLY40ZsL1Af/j6P11ufdNrqdiCJ6RcfCdsTMORd0
+H+xyCjIX54A+V5CWhUix53gQEuN8W6zPyRtRzV+zLX05bYIC7gYqQ+j6qvh6dEkF
+zNr0YGLw2ecM/KLgocPPsaRGeAnXadnIP4Zt0YynAme0jSqYHK3JJD8qWZdj1QOd
+bJ9twiO+4G+UC2cMZ/OaujVpHr8QjSppHEb+uw4mUqiQtzXBH42DjKeUZFA3MXbp
+PWg8xmeuxS0uhb/j6Ookg9wREjcdb9dja7Ym/qslH5aix9CbULr4H6vllwMnFgiN
+cKXuqupnvCihxVe2n1RHQetvgacOyMoi9/1AwJ6WLnHU+8KHNSdlxD9JrzYQ+WeZ
+N82yqBZkKbmESj9BZuRT1Pl7y0qWAPmB9HiAr9A1LenoH/ZG2JBSCGiraUb6zxvg
+Ros7TQYAh1C3dgdwyiISVvCblVQdn4nFbYCBwFWbPrpMM/PNKQ8Hmdj/2rRKPX6q
+Ho8jfpXhdO8eMcbJ
+-----END CERTIFICATE-----
diff --git a/pkg/tlsclientconfig/testdata/full/client-cert-1.key b/pkg/tlsclientconfig/testdata/full/client-cert-1.key
new file mode 100644
index 0000000..6bbafc2
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/client-cert-1.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQChITM3sgK21flb
+jVT9GXjqxAzcx6KF2Pckk9BbZW5V4n5vLhawsfkMrRiRBbjPCv2dQEejB9Mesl7W
+LKsgpDN/rRR5RK3Y5vWcXvuDoXvt/zSJpp8nzLeze7KJcn6BpvEU7ufzYRgo1E1S
+cUuccDua4O1FzRzQUlVcluFUkHAFLP4/M6mo3QDj/t6xNQPazy0WU1JLri2akj75
+bYdndRDHkJr4thFF6AK0NSSg5hcXaXlCe6qFMqxrr2kmPY08rwGqkcwVwOmx2V1G
+3mgqwdX3jmNDRJkHTdPpJ1f/rGp+zFaU6yEbbC/ka2hgiV/ISRaEynHO0ohSlCuy
+r+Snqj+hkJL9+Ox5Vy2fQMvg6G+G0cgbu7UZ5KAZQrV0hfuzBBXfNpXtCvT0aqn+
+IOK4krz71nU671sSRL9Z7Q/DKw0bQX8PDKDE3ak5K1Ais6Bys5dgR2/Uc4zrkUKX
+E+LXzR59FCJ9vNAmw6s3jZ6n6cftXVy0z4hFUc/wUnpHI2HF46q2TyWeqZv3+yAN
+ONZz6uC/6c2R/DUsjH9iuQo0UD/k4YR6mNqGEEZO9z3s2VSxVJNYSLorskGqEKi6
+ZbzTc3l7JcqGNNlII1GJuFR0M3LRw+bBE2zSTuKC5uIQlZBmVM6IDHd0ZIvn+kzN
+Iy9C6gpFszyLL5HVS6ypNsWThWDx9QIDAQABAoICABfPxTbk2pvUcT5DW5zvp7vh
+7xitc+odzyAbq/3ltnGAwDiD8hx4oJpr9iZwVwhZ0nLrrep83Ik7yOITxdJUEbw7
+grwNFzfnocJTEw43zg202jnBYuHJ0hf3HsJLJkDYv+XdDHAeGCjofujBD3B18YkI
+1merUSfEExNUxMXvdm59YLFMXDU1O811u8kqUYCDf2E+PPosKlceZ6oKsqjqVul3
+CD/bACB5kfS5qckRV7ZBAwd9KQz4GRzs1jgtfnLKVg+z7hoE4UREIBG6CmDPNmSY
+KTkmqQq4SKm7A7kn7LECV4U4XjMkQMubx9gEVSBPFMHY/QqYeEbj2LVWYw8YDMEC
+dWrAR9+kg5Y5LLlxTEHv3nJSAwWf2MmSxZGO3Gav9vxMv0+lUC90A874y61bnebf
+ombV+WfrpVrSQd3UvDSwGxbKIdIsqz/6VTd1qrURIRNiL57JfbeQVo7p/cRjFsk1
+yzhPnFqVjgC3xt2bpzVMhFl1jovpk7YHctm305Oh1L5+O/OTILyuzVQOLVlmQlSh
+69amtdWioUD+kuzAyxplue/achpk9ylB5/p2I5yHx7i4UOs8SVBHIgSi+6y8CMPi
+1f9U8r+MbziLPP8fX4KqltZ15gMJRQ5yAjiaLOEq1ysvcdNxnSEHpDppN2XqDZXX
+th5ma+zuN/Y5eXLouIv9AoIBAQDV8sG8lskAr6aOlPhWyPgrOSrIH0Lvcc2/+Pn+
+SkEry5HsZKXQhxbLb3WGGyVrocwv4RIRrsfqMGUN0HMbJbZLVTUTJ6uRDZe7zdP8
+FlY8q453XClOXxkXjtqg/ekstm7e0ZtsDXICzajDKZBt9iH1RtWshKlGncBE8w47
+gaMthbw8a2694ov34XC2A8AtrI/NOwXT1j2hhZ5oBVx8sDfNPgL/Tbklv90vWWQ8
+uTOlrQCekLvcdAH+SnyBhq/9sOKmsR8O+Cq8hrKeRyMCRe50612wO3V3w6Ug76R0
+4qH93hHH5fF/tT3ix4nUemjDC2fFt0dHBd8u0FvqzxFADuJTAoIBAQDAzMTLbKkq
+FEY6tsXxDcv/Vd5AY7TjuXOe4hwner25KUfTSEpo+0KQGi4wZcUhC9edqea4trvu
+8aTD2E1t7HwU9qC3MLfbCsoFTC3PuI9zZuBYBQ1QJSab5xaLCSKEg/Xf+uVcFSTv
+lTRYHvAIUFQtH6A6xuGjOFSa3d0xbgOJYlGDSsdZ6eeEocpU86X6YEfJ4PU9RCXb
+6XfFqxG4qBhEnJiGVaqNSFkrlyEScS5ZOZeNJfxsxaHtddopPsoZXKWeG5enPQ+S
+i2BGZEVd73vgASYz8fqb9j8zwcUrL6ycSiWvWx2r94DPZ9DKIBzAKNF4X0dUkXn7
+NvRCloWYyGGXAoIBAC4AKw2scf1Wxv4DI74tKcbJXNx4dEDdfDqZgs7dElQjSfXE
+0i4azZjiFyfFcG9K84cb4nGw3cUJsMCeoBEnM6HQ6T98GRRwEr7Li5e5CcOzs0cQ
+psT2B4QcL//LeDYn54C7GqrJ3UijBXUo26f48uY0275jK65GPs/UXqwGvJoOFiH7
+i20CZ8vdSgmolsp7PtQGq8MXXlr7Ssrc1Dzu+qCDg8t236cxMJJ8quOvgl5N60Ms
+pWfJ/Z+6TjjfR7nJLYJftOjYDQBKCd+kNe/IL3QuIw/ASQp+I2QIgpirHd9ivvn3
+A8zMoEjBVG49/4ZoTmChfo7TwV/kZs7+xJu8V4cCggEAMIsePa3lRfAG4r+kRRZZ
+N7fyFrpEEFZgUy/RMOuJm3ZWXE9GVPp2fvvoh00IflKR/mOJ8RYpaXc0Kg9rJ3M0
+pr6WJqnAkQk9ZmoQ2s04aTEM6XyUJorEFtrvZSBurXjgTn3IhA/a1ev2Wv2nKCC5
+oQbmfAYZR9RmEPwttkVh5JR5u5n3aZ8oKM/nts4GC210hdL7TGq9MYquGnoBI0JH
+ofbfvGsTXzFJKl2J+S/AIL0MeICI9wYnyDc1L89caVhXZuDBpV0Nb7NiETcr9APZ
+Z8RCTy/tDpnIvgpaz43YYx2UMXu6k9LkftQ/0LSXKJbebvVma1eZo/PpOl7V0msZ
+jwKCAQEAljJQnDUWiK73B1vMLjeEwuy/p86p43xCXGMW1rX6+AayYt3/ODNmKh8s
+AhOF2Tl0dDJTBibJcpcoGCKm4nL3k9tO23U9omrBz5obRDl4To7l+ALv3x+tf+Fq
+b/nVkZKhyUh99RneOjOfpEI6Cd4ffQkXA05/bFGdVVaMJ0yzeC8qQ/QGbyAwpZiL
+c1e7Kju5uv+rT4czqKmQ+YsKpSM1Xjz9Mzoxs5E3OCdXSYsv7oo/sS1aXunQTZ6L
+xv1M+F9YlCtgo8+1IdlYvcFb4WusAIDf3xjO1bDCvlYzv5JBKWtyO3BVKkfWzSx1
+yKIoxKzIpzNh5dHk6iIDjp1B/YU6fA==
+-----END PRIVATE KEY-----
diff --git a/pkg/tlsclientconfig/testdata/full/client-cert-2.cert b/pkg/tlsclientconfig/testdata/full/client-cert-2.cert
new file mode 100644
index 0000000..5047957
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/client-cert-2.cert
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIE6DCCAtACCQCZd3nR9D1jPzANBgkqhkiG9w0BAQsFADA1MTMwMQYDVQQDDCpj
+b250YWluZXJzL2ltYWdlIHRlc3QgY2xpZW50IGNlcnRpZmljYXRlIDIwIBcNMTgw
+ODI4MTUzNzAyWhgPMjExODA4MDQxNTM3MDJaMDUxMzAxBgNVBAMMKmNvbnRhaW5l
+cnMvaW1hZ2UgdGVzdCBjbGllbnQgY2VydGlmaWNhdGUgMjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPQITxwSAieNkNlDDcZUlGeiQmm7tEF9TKqCF06j
+LvM+O8nA/DmNIIw84mAeuLH2/vv7jkS3Hz5MEj1duaiAAzhjRrcHTLj63ttcpomP
+yc5OyW1GiuVYWKLKdZWRPaiSBeIV+uCGMaxvAzPivYiNz1GhYYt0Jf84OZm8oCZT
+HEI/jNRp56nYQDIAW3dwf6LpQ2dyc3n1O2eysDp02AvWfIr9w8agUgMykWziN2ZX
+c3fs/8UJ1Ta81YTuuuwTfEB2TtUmzxYql4WjxzXOL/rN7PqlwG7SZ1q6hoACP4K7
+v4tu7cXq79F16xHoKolJaIHnCznZrK3Hmp846/+x0VWn4Ic05f3qB89WSl7i6oa2
+pdgDE5bteSFtEhNXvzy+eobcKqhhqT3TF6oQiD9ufz2s5eTRJmu31sVUOW2+2LaZ
+QipQu7G+awpqKh/k95GZNTXhWrDP1OQTtzYZui+l9Ri4lqM1LAlLb35zo6rqUj1A
+HfyQrwADFzRQRY/zPijI8I8fnfChGLuW3q/WC0CyrTIkviE/JGY7XmmyT/KxABhY
+rDCHWHXQ4nz14PulRBtvTJfMhHBcMM8guYzleeeGrcHX5y5IG7JR1QL+Y+h2zW0d
+29RbusXKAkaN7SqlAGB8NIwU5x/Y9UWD0ar9IEXQT2bDkXHWTk2qvbAXwvqkv3r5
+qbB9AgMBAAEwDQYJKoZIhvcNAQELBQADggIBAD0T3FqFhuJ5OGbYaay5Iw5EKLS+
+njNIDNvsy9Q8+F1aa/F27KJfi8x8EQj1YabwOt1gS201jwJshC+AirrJUxdGBJqC
+cVDPGeqY+zMKd+17ZgWKza81Y9qYBjx01v//Et5YYKmeS8q3xTsvbloJJ5D7vx5y
+VcNwnO7yx5/IMDWCIAbw5j2BikILW0gMCfBK90o4z7Le7kPFLreLiUCfXYZjfbT7
+bT2v8Oy0OISVNLQjajxepK5+C9Qupaj5nL0GtTj37FOs6rulcWEWqX+kGXSctvrA
+nuzcjGUkuQBOcMjEUaYRKLZ+Tghla4pFgJLrfKQgW+5Mahbnz3ehvzDc3LcScYCj
+u0qyP+w5rW8/Tm9vE9QqwblUX7wZ4/zqTDSv5spdHi5x4Q77MomjDEfP83QnEAhg
+Y4wixJBas64227rxJJQT30C2QcuwYMz4STQgjSGDPfomUr8tVPM8JcU4pq+fg7g9
+T0MvfNAWgMhUZllhxTntbHVbv43A2p3eEE0fuW5SOJyAvt9ROZMvQcDWJfw0TDey
+dn9+Bz7CMbJvZAoV8P4Gm1+iuWZhUWBG7FdrXEzhbDh2GkSmd3jmUSISVA1G061b
+5QVkW5NAr2jZrWhyTXiL0AFbD7QNKTJHma/IcRoJlxQ9rncIdgPLMNBbjridb4dn
+PllRbfAWuBgV7vLu
+-----END CERTIFICATE-----
diff --git a/pkg/tlsclientconfig/testdata/full/client-cert-2.key b/pkg/tlsclientconfig/testdata/full/client-cert-2.key
new file mode 100644
index 0000000..998bc5e
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/full/client-cert-2.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQD0CE8cEgInjZDZ
+Qw3GVJRnokJpu7RBfUyqghdOoy7zPjvJwPw5jSCMPOJgHrix9v77+45Etx8+TBI9
+XbmogAM4Y0a3B0y4+t7bXKaJj8nOTsltRorlWFiiynWVkT2okgXiFfrghjGsbwMz
+4r2Ijc9RoWGLdCX/ODmZvKAmUxxCP4zUaeep2EAyAFt3cH+i6UNncnN59TtnsrA6
+dNgL1nyK/cPGoFIDMpFs4jdmV3N37P/FCdU2vNWE7rrsE3xAdk7VJs8WKpeFo8c1
+zi/6zez6pcBu0mdauoaAAj+Cu7+Lbu3F6u/RdesR6CqJSWiB5ws52aytx5qfOOv/
+sdFVp+CHNOX96gfPVkpe4uqGtqXYAxOW7XkhbRITV788vnqG3CqoYak90xeqEIg/
+bn89rOXk0SZrt9bFVDltvti2mUIqULuxvmsKaiof5PeRmTU14Vqwz9TkE7c2Gbov
+pfUYuJajNSwJS29+c6Oq6lI9QB38kK8AAxc0UEWP8z4oyPCPH53woRi7lt6v1gtA
+sq0yJL4hPyRmO15psk/ysQAYWKwwh1h10OJ89eD7pUQbb0yXzIRwXDDPILmM5Xnn
+hq3B1+cuSBuyUdUC/mPods1tHdvUW7rFygJGje0qpQBgfDSMFOcf2PVFg9Gq/SBF
+0E9mw5Fx1k5Nqr2wF8L6pL96+amwfQIDAQABAoICAENiu/2jV2b2p4XnBzm6B1Tq
+sG4j/+2JnQ8EZ002sHNIvxfCK6P0vYUcFuK1+QhjjRul5i5BZuY7+VlqtSa7WqhK
+ea9224/E7p8iYXZg9zf595MuRJJ6J9ekEn4foigXUnqQ2TsAs1zLNtYwCWhYyGPh
+LV9lzkxAiV9Tf+G1V3tPsyLAr9DtvyRPhvJfZU9mHE/Hge2ucx36cMFjnHkAXFnB
+IzuI77ykTRYFDYk8Is19607MnUzFLizMA1/HLatbP/+J/OtBDaBAjnTkH8mg7Yx1
+EsNHZHTgRt24QTNnhsgI9K2PZ3OunR/PvVc1px8f3rC11AIUuTS4ciqkPorplY0W
+Wcgy9+saF2EL6Peb5zaWTAiCewrcI2ME9G6ytZtWovHzJqyE4iJMaQtLeczoaTk5
+lkl8r+zkAWGnxb6Xz1qd1UuAyXfZO3Ux7shxDJKxoQrgkHvAyEHkyBhJe3ghKElh
+C+mGr1z15R+buL8daUC+SG2z0H9gTcYiOcOznGUhtOgR2EPalAC/xKEtYjsEHf3F
+VtljEu29KPYcLokBFcI35Tij9KVfGJePMAEji8lLqrImwpPzsoPEdUVx7jvdy6gt
+hCsgIo8Vpd/3D12Q4ul+E7ztsWX6wYoxIYGK2/KSAjd0fpla/UFwkrpSSnqkGLR0
+qhoxZz1158vuAtQCb7DdAoIBAQD8bHdBmWovRRywjPaLGPngVAkR4z5yo1OM9lRE
+So6O9zyZvk7cJ4JQYpS3o1nXY9mw5YAG2qPrArXBIWwHq0vi7gkhng7gbmn/1j7o
+ZrgEE5z//tt0Gl5Yqs35uiMcOlTrQ/1DsHWfJDw6udkQG5EwyrNRvcxaeXGx9dWc
+Yzfq93WODSFWhzhgy0CG0q2Zj/VtgoO6gwkZI9KX9X7ELKyCfIqczqMYge5uxp0l
+dTS6kNrhUACGCU5yBREKX/XwNxreFS6AuCG0KhBjwX3NeuBfKC2yGAOxr91Yz3l4
+o91CKFXDVXWtpvJCW/zIaeSfByqhgBYUpZPg4VniNqMheB87AoIBAQD3fWiLbidV
+lRQ0O4jjAxtsUBKC95oxz4ipDerLlEAL7WA8h0eU05BLv8Nn5Ql5N7m/LTM6XWsg
+cWMObk6UDS4N1pR7Sg1sPAlzK52x8CYLDswnYWjeejCP5yY8v8u3NCqCPuxvDlPp
+0InR82xRunFEEG9ukC06krGDyMWCwaRbQRJBkRagBvUJA3/A+ZAC2vdN5WhPtZT3
+LT01T2Olk19URLgFTo1hia2o6L5cq7TXeNsZoNfLlaSV33/+fnYqb3F0SmDX+mOV
+8zRV4bp9Qdc2vNkdzzC8s2EXr3UBl4miT/ovV0X5v+KPyfH2Rf/Hf0AoBhiHWRIO
+NT9L912QFOOnAoIBABzXR8j2/mroOp7xfDnoYKSA9YhVrozqnGE+w+IJosAy/3mR
+hPEikoEcwmE5CMrTXcwYbMhbst7nMF0gtHcr2z/Scriklo6ODw5sLEPheKT+mLGn
+LOvXF3CKE361Bc3z3EAFRKq3PrkwKrGLCoIMpEou3s44IWE0/wiWThHQRFNUctoI
+Jgb87DQjBPxilfM1v5UDlIl6708wCJ/ULOe9Mvi1wiCoe3oRXmzJxKrC+YNXiaq0
+uVqXNZ9RdOD6ld8cbLVzNhz+7Nro83ZyZS1VHM3CiXYPyFxE+8Vp7zcZge5NLX9k
+BE4TBRsP55H+h2CkMPrC58L0KDFJjjuKgpkQYIECggEAb5scNBRepJdv3wYh+s2B
+5lxNnMXvwRqntCTUhy25nCdVyLXwr8qgPaiihA2jMgjROMc3XNCX6K95Th6sTNOM
+uyzTFK4WU4LXeBppKL71SPNJWVDyK7HKiHpioe6T/XAG42lg0cwSR1SFcipl5I1B
+WsJWnfNikhFo/9bgStDsP0Ke6vZ0z4GTqpbrW6ivKrp336beXWOzY6wA+DNu6lIF
+IUlD+xCrbRrbN1qNzdiY2rpjg7Em32YCLJv3alq5CvXqodiQx5Tgp9Re+4Opx6aT
+WNncxzaR8eaqmDXFfAxMQufyGLswkSnZD6Kv/LEgYWSfF+13zkF6hPG6M5W/maPx
+7QKCAQEA+EH/bZrfFIthC4pj2PZ7yR6AjvB0CUEc+s2Vk60kGCx7Ykl4OZfxvHHf
+A/4890Lb1+lEEo7+mBqq3IOOWKKYz+K7a81G668X3KltmJd3twMWzDy9+RN2sGbY
+ww3GQqS2B99N/E5e1N3TLm01aeqEUJqyYqsT0cx+vGQyiIqPF/1SLSqy0ZAl4ZrV
+Zb+Zl/EJxi+wPYuax90HFgHRc1RAkN5YDicLlmiPCUWjx9gPN5LOXKMQmh7J7cs1
+n1YP3erz76BQA9q0dkJmHr54FXxkBaFR3SGvIjxjArkNSvl2GuT73kgfbVFfT5QB
+Kesl5Q7sHOaJXry12QuWE5/Kj4absA==
+-----END PRIVATE KEY-----
diff --git a/pkg/tlsclientconfig/testdata/missing-cert/client-cert-1.key b/pkg/tlsclientconfig/testdata/missing-cert/client-cert-1.key
new file mode 120000
index 0000000..c8cf88d
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/missing-cert/client-cert-1.key
@@ -0,0 +1 @@
+../full/client-cert-1.key \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/missing-key/client-cert-1.cert b/pkg/tlsclientconfig/testdata/missing-key/client-cert-1.cert
new file mode 120000
index 0000000..2c62e46
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/missing-key/client-cert-1.cert
@@ -0,0 +1 @@
+../full/client-cert-1.cert \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/unreadable-ca/unreadable.crt b/pkg/tlsclientconfig/testdata/unreadable-ca/unreadable.crt
new file mode 120000
index 0000000..4cd6460
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/unreadable-ca/unreadable.crt
@@ -0,0 +1 @@
+/this/does/not/exist \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.cert b/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.cert
new file mode 120000
index 0000000..4cd6460
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.cert
@@ -0,0 +1 @@
+/this/does/not/exist \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.key b/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.key
new file mode 120000
index 0000000..c8cf88d
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.key
@@ -0,0 +1 @@
+../full/client-cert-1.key \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.cert b/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.cert
new file mode 120000
index 0000000..2c62e46
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.cert
@@ -0,0 +1 @@
+../full/client-cert-1.cert \ No newline at end of file
diff --git a/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.key b/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.key
new file mode 120000
index 0000000..4cd6460
--- /dev/null
+++ b/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.key
@@ -0,0 +1 @@
+/this/does/not/exist \ No newline at end of file
diff --git a/pkg/tlsclientconfig/tlsclientconfig.go b/pkg/tlsclientconfig/tlsclientconfig.go
new file mode 100644
index 0000000..c6ec84b
--- /dev/null
+++ b/pkg/tlsclientconfig/tlsclientconfig.go
@@ -0,0 +1,103 @@
+package tlsclientconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
+func SetupCertificates(dir string, tlsc *tls.Config) error {
+ logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
+ fs, err := os.ReadDir(dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if os.IsPermission(err) {
+ logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err)
+ return nil
+ }
+ return err
+ }
+
+ for _, f := range fs {
+ fullPath := filepath.Join(dir, f.Name())
+ if strings.HasSuffix(f.Name(), ".crt") {
+ logrus.Debugf(" crt: %s", fullPath)
+ data, err := os.ReadFile(fullPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // Dangling symbolic link?
+ // Race with someone who deleted the
+ // file after we read the directory's
+ // list of contents?
+ logrus.Warnf("error reading certificate %q: %v", fullPath, err)
+ continue
+ }
+ return err
+ }
+ if tlsc.RootCAs == nil {
+ systemPool, err := x509.SystemCertPool()
+ if err != nil {
+ return fmt.Errorf("unable to get system cert pool: %w", err)
+ }
+ tlsc.RootCAs = systemPool
+ }
+ tlsc.RootCAs.AppendCertsFromPEM(data)
+ }
+ if strings.HasSuffix(f.Name(), ".cert") {
+ certName := f.Name()
+ keyName := certName[:len(certName)-5] + ".key"
+ logrus.Debugf(" cert: %s", fullPath)
+ if !hasFile(fs, keyName) {
+ return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ }
+ cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
+ if err != nil {
+ return err
+ }
+ tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
+ }
+ if strings.HasSuffix(f.Name(), ".key") {
+ keyName := f.Name()
+ certName := keyName[:len(keyName)-4] + ".cert"
+ logrus.Debugf(" key: %s", fullPath)
+ if !hasFile(fs, certName) {
+ return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
+ }
+ }
+ }
+ return nil
+}
+
+func hasFile(files []os.DirEntry, name string) bool {
+ return slices.ContainsFunc(files, func(f os.DirEntry) bool {
+ return f.Name() == name
+ })
+}
+
+// NewTransport Creates a default transport
+func NewTransport() *http.Transport {
+ direct := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: direct.DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+ IdleConnTimeout: 90 * time.Second,
+ MaxIdleConns: 100,
+ }
+ return tr
+}
diff --git a/pkg/tlsclientconfig/tlsclientconfig_test.go b/pkg/tlsclientconfig/tlsclientconfig_test.go
new file mode 100644
index 0000000..aa1f7ff
--- /dev/null
+++ b/pkg/tlsclientconfig/tlsclientconfig_test.go
@@ -0,0 +1,134 @@
+package tlsclientconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/internal/set"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSetupCertificates(t *testing.T) {
+ // Success
+ tlsc := tls.Config{}
+ err := SetupCertificates("testdata/full", &tlsc)
+ require.NoError(t, err)
+ require.NotNil(t, tlsc.RootCAs)
+
+ // SystemCertPool is implemented natively, and .Subjects() does not
+ // return raw certificates, on some systems (as of Go 1.18,
+ // Windows, macOS, iOS); so, .Subjects() is deprecated.
+ // We still use .Subjects() in these tests, because they work
+ // acceptably even in the native case, and they work fine on Linux,
+ // which we care about the most.
+
+ // For an unknown reason, with Go 1.18, as of Mar 15 2022,
+ // (golangci-lint) reports staticcheck SA1019 about using
+ // the deprecated .Subjects() function, but the //lint:ignore
+ // directives are ineffective (and cause extra warnings about
+ // pointless lint:ignore directives). So, use the big hammer
+ // of silencing staticcheck entirely; that should be removed
+ // as soon as practical.
+
+ // On systems where SystemCertPool is not special-cased, RootCAs include SystemCertPool;
+ // On systems where SystemCertPool is special cased, this compares two empty sets
+ // and succeeds.
+ // There isn’t a plausible alternative to calling .Subjects() here.
+ loadedSubjectBytes := set.New[string]()
+ // lint:ignore SA1019 Receiving no data for system roots is acceptable.
+ for _, s := range tlsc.RootCAs.Subjects() { //nolint staticcheck: the lint:ignore directive is somehow not recognized (and causes an extra warning!)
+ loadedSubjectBytes.Add(string(s))
+ }
+ systemCertPool, err := x509.SystemCertPool()
+ require.NoError(t, err)
+ // lint:ignore SA1019 Receiving no data for system roots is acceptable.
+ for _, s := range systemCertPool.Subjects() { //nolint staticcheck: the lint:ignore directive is somehow not recognized (and causes an extra warning!)
+ ok := loadedSubjectBytes.Contains(string(s))
+ assert.True(t, ok)
+ }
+
+ // RootCAs include our certificates.
+ // We could possibly test without .Subjects() this by validating certificates
+ // signed by our test CAs.
+ loadedSubjectCNs := set.New[string]()
+ // lint:ignore SA1019 We only care about non-system roots here.
+ for _, s := range tlsc.RootCAs.Subjects() { //nolint staticcheck: the lint:ignore directive is somehow not recognized (and causes an extra warning!)
+ subjectRDN := pkix.RDNSequence{}
+ rest, err := asn1.Unmarshal(s, &subjectRDN)
+ require.NoError(t, err)
+ require.Empty(t, rest)
+ subject := pkix.Name{}
+ subject.FillFromRDNSequence(&subjectRDN)
+ loadedSubjectCNs.Add(subject.CommonName)
+ }
+ ok := loadedSubjectCNs.Contains("containers/image test CA certificate 1")
+ assert.True(t, ok)
+ ok = loadedSubjectCNs.Contains("containers/image test CA certificate 2")
+ assert.True(t, ok)
+ // Certificates include our certificates
+ require.Len(t, tlsc.Certificates, 2)
+ names := []string{}
+ for _, c := range tlsc.Certificates {
+ require.Len(t, c.Certificate, 1)
+ parsed, err := x509.ParseCertificate(c.Certificate[0])
+ require.NoError(t, err)
+ names = append(names, parsed.Subject.CommonName)
+ }
+ assert.ElementsMatch(t, []string{
+ "containers/image test client certificate 1",
+ "containers/image test client certificate 2",
+ }, names)
+
+ // Directory does not exist
+ tlsc = tls.Config{}
+ err = SetupCertificates("/this/does/not/exist", &tlsc)
+ require.NoError(t, err)
+ assert.Equal(t, &tls.Config{}, &tlsc)
+
+ // Directory not accessible
+ unreadableDir := t.TempDir()
+ defer func() {
+ _ = os.Chmod(unreadableDir, 0700)
+ }()
+ err = os.Chmod(unreadableDir, 000)
+ require.NoError(t, err)
+ tlsc = tls.Config{}
+ err = SetupCertificates(unreadableDir, &tlsc)
+ assert.NoError(t, err)
+ assert.Equal(t, &tls.Config{}, &tlsc)
+
+ // Other error reading the directory
+ tlsc = tls.Config{}
+ err = SetupCertificates("/dev/null/is/not/a/directory", &tlsc)
+ assert.Error(t, err)
+
+ // Unreadable system cert pool untested
+ // Unreadable CA certificate
+ tlsc = tls.Config{}
+ err = SetupCertificates("testdata/unreadable-ca", &tlsc)
+ assert.NoError(t, err)
+ assert.Nil(t, tlsc.RootCAs)
+
+ // Missing key file
+ tlsc = tls.Config{}
+ err = SetupCertificates("testdata/missing-key", &tlsc)
+ assert.Error(t, err)
+ // Missing certificate file
+ tlsc = tls.Config{}
+ err = SetupCertificates("testdata/missing-cert", &tlsc)
+ assert.Error(t, err)
+
+ // Unreadable key file
+ tlsc = tls.Config{}
+ err = SetupCertificates("testdata/unreadable-key", &tlsc)
+ assert.Error(t, err)
+ // Unreadable certificate file
+ tlsc = tls.Config{}
+ err = SetupCertificates("testdata/unreadable-cert", &tlsc)
+ assert.Error(t, err)
+}
diff --git a/registries.conf b/registries.conf
new file mode 100644
index 0000000..d424dc5
--- /dev/null
+++ b/registries.conf
@@ -0,0 +1,77 @@
+# For more information on this configuration file, see containers-registries.conf(5).
+#
+# NOTE: RISK OF USING UNQUALIFIED IMAGE NAMES
+# We recommend always using fully qualified image names including the registry
+# server (full dns name), namespace, image name, and tag
+# (e.g., registry.redhat.io/ubi8/ubi:latest). Pulling by digest (i.e.,
+# quay.io/repository/name@digest) further eliminates the ambiguity of tags.
+# When using short names, there is always an inherent risk that the image being
+# pulled could be spoofed. For example, a user wants to pull an image named
+# `foobar` from a registry and expects it to come from myregistry.com. If
+# myregistry.com is not first in the search list, an attacker could place a
+# different `foobar` image at a registry earlier in the search list. The user
+# would accidentally pull and run the attacker's image and code rather than the
+# intended content. We recommend only adding registries which are completely
+# trusted (i.e., registries which don't allow unknown or anonymous users to
+# create accounts with arbitrary names). This will prevent an image from being
+# spoofed, squatted or otherwise made insecure. If it is necessary to use one
+# of these registries, it should be added at the end of the list.
+#
+# # An array of host[:port] registries to try when pulling an unqualified image, in order.
+# unqualified-search-registries = ["example.com"]
+#
+# [[registry]]
+# # The "prefix" field is used to choose the relevant [[registry]] TOML table;
+# # (only) the TOML table with the longest match for the input image name
+# # (taking into account namespace/repo/tag/digest separators) is used.
+# #
+# # The prefix can also be of the form: *.example.com for wildcard subdomain
+# # matching.
+# #
+# # If the prefix field is missing, it defaults to be the same as the "location" field.
+# prefix = "example.com/foo"
+#
+# # If true, unencrypted HTTP as well as TLS connections with untrusted
+# # certificates are allowed.
+# insecure = false
+#
+# # If true, pulling images with matching names is forbidden.
+# blocked = false
+#
+# # The physical location of the "prefix"-rooted namespace.
+# #
+# # By default, this is equal to "prefix" (in which case "prefix" can be omitted
+# # and the [[registry]] TOML table can only specify "location").
+# #
+# # Example: Given
+# # prefix = "example.com/foo"
+# # location = "internal-registry-for-example.com/bar"
+# # requests for the image example.com/foo/myimage:latest will actually work with the
+# # internal-registry-for-example.com/bar/myimage:latest image.
+#
+# # The location can be empty if prefix is in a
+# # wildcarded format: "*.example.com". In this case, the input reference will
+# # be used as-is without any rewrite.
+# location = internal-registry-for-example.com/bar"
+#
+# # (Possibly-partial) mirrors for the "prefix"-rooted namespace.
+# #
+# # The mirrors are attempted in the specified order; the first one that can be
+# # contacted and contains the image will be used (and if none of the mirrors contains the image,
+# # the primary location specified by the "registry.location" field, or using the unmodified
+# # user-specified reference, is tried last).
+# #
+# # Each TOML table in the "mirror" array can contain the following fields, with the same semantics
+# # as if specified in the [[registry]] TOML table directly:
+# # - location
+# # - insecure
+# [[registry.mirror]]
+# location = "example-mirror-0.local/mirror-for-foo"
+# [[registry.mirror]]
+# location = "example-mirror-1.local/mirrors/foo"
+# insecure = true
+# # Given the above, a pull of example.com/foo/image:latest will try:
+# # 1. example-mirror-0.local/mirror-for-foo/image:latest
+# # 2. example-mirror-1.local/mirrors/foo/image:latest
+# # 3. internal-registry-for-example.com/bar/image:latest
+# # in order, and use the first one that exists.
diff --git a/sif/load.go b/sif/load.go
new file mode 100644
index 0000000..70758ad
--- /dev/null
+++ b/sif/load.go
@@ -0,0 +1,210 @@
+package sif
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
+const injectedScriptTargetPath = "/podman/runscript"
+
+// parseDefFile parses a SIF definition file from reader,
+// and returns non-trivial contents of the %environment and %runscript sections.
+func parseDefFile(reader io.Reader) ([]string, []string, error) {
+ type parserState int
+ const (
+ parsingOther parserState = iota
+ parsingEnvironment
+ parsingRunscript
+ )
+
+ environment := []string{}
+ runscript := []string{}
+
+ state := parsingOther
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ switch {
+ case s == `%environment`:
+ state = parsingEnvironment
+ case s == `%runscript`:
+ state = parsingRunscript
+ case strings.HasPrefix(s, "%"):
+ state = parsingOther
+ case state == parsingEnvironment:
+ if s != "" && !strings.HasPrefix(s, "#") {
+ environment = append(environment, s)
+ }
+ case state == parsingRunscript:
+ runscript = append(runscript, s)
+ default: // parsingOther: ignore the line
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
+ }
+ return environment, runscript, nil
+}
+
+// generateInjectedScript generates a shell script based on
+// SIF definition file %environment and %runscript data, and returns it.
+func generateInjectedScript(environment []string, runscript []string) []byte {
+ script := fmt.Sprintf("#!/bin/bash\n"+
+ "%s\n"+
+ "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
+ return []byte(script)
+}
+
+// processDefFile finds sif.DataDeffile in sifImage, if any,
+// and returns:
+// - the command to run
+// - contents of a script to inject as injectedScriptTargetPath, or nil
+func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
+ var environment, runscript []string
+
+ desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
+ if err == nil {
+ environment, runscript, err = parseDefFile(desc.GetReader())
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ var command string
+ var injectedScript []byte
+ if len(environment) == 0 && len(runscript) == 0 {
+ command = "bash"
+ injectedScript = nil
+ } else {
+ injectedScript = generateInjectedScript(environment, runscript)
+ command = injectedScriptTargetPath
+ }
+
+ return command, injectedScript, nil
+}
+
+func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
+ if injectedScript == nil {
+ return nil
+ }
+ filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
+ parentDirPath := filepath.Dir(filePath)
+ if err := os.MkdirAll(parentDirPath, 0755); err != nil {
+ return fmt.Errorf("creating %s: %w", parentDirPath, err)
+ }
+ if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
+ return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
+ }
+ return nil
+}
+
+// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
+// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
+// if necessary.
+func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ defer os.RemoveAll(extractedRootPath)
+
+ // Almost everything in extractedRootPath comes from squashFSPath.
+ conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
+ extractedRootPath, squashFSPath, extractedRootPath, tarPath)
+ script := "#!/bin/sh\n" + conversionCommand + "\n"
+ if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
+ return err
+ }
+ defer os.Remove(scriptPath)
+
+ // On top of squashFSPath, we only add injectedScript, if necessary.
+ if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
+ cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("converting image: %w, output: %s", err, string(output))
+ }
+ logrus.Debugf("... finished converting squashfs to tar")
+ return nil
+}
+
+// convertSIFToElements processes sifImage and creates/returns
+// the relevant elements for constructing an OCI-like image:
+// - A path to a tar file containing a root filesystem,
+// - A command to run.
+// The returned tar file path is inside tempDir, which can be assumed to be empty
+// at start, and is exclusively used by the current process (i.e. it is safe
+// to use hard-coded relative paths within it).
+func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
+ // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
+ // so we can just hard-code a set of unique values here.
+ // We create and/or manage cleanup of these two paths.
+ squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
+ tarPath := filepath.Join(tempDir, "rootfs.tar")
+ // We only allocate these paths, the user is responsible for cleaning them up.
+ extractedRootPath := filepath.Join(tempDir, "rootfs")
+ scriptPath := filepath.Join(tempDir, "script")
+
+ succeeded := false
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
+ // to run both creation and consumption of extractedRootPath in the same fakeroot context.
+ // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
+ // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
+ // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
+ // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
+ defer os.Remove(squashFSPath)
+ defer func() {
+ if !succeeded {
+ os.Remove(tarPath)
+ }
+ }()
+
+ command, injectedScript, err := processDefFile(sifImage)
+ if err != nil {
+ return "", nil, err
+ }
+
+ rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
+ if err != nil {
+ return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
+ }
+ // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
+ // has an -o option that allows extracting a squashfs from the SIF file directly,
+ // but that version is not currently available in RHEL 8.
+ logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
+ if err := func() error { // A scope for defer
+ f, err := os.Create(squashFSPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ return "", nil, err
+ }
+ logrus.Debugf("... finished creating a temporary squashfs image")
+
+ if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
+ return "", nil, err
+ }
+ succeeded = true
+ return tarPath, []string{command}, nil
+}
diff --git a/sif/load_test.go b/sif/load_test.go
new file mode 100644
index 0000000..ee9bc72
--- /dev/null
+++ b/sif/load_test.go
@@ -0,0 +1,58 @@
+package sif
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseDefFile(t *testing.T) {
+ for _, c := range []struct {
+ name string
+ input string
+ environment []string
+ runscript []string
+ }{
+ {"Empty input", "", []string{}, []string{}},
+ {
+ name: "Basic smoke test",
+ input: "Bootstrap: library\n" +
+ "%environment\n" +
+ " export FOO=world\n" +
+ " export BAR=baz\n" +
+ "%runscript\n" +
+ ` echo "Hello $FOO"` + "\n" +
+ " sleep 5\n" +
+ "%help\n" +
+ " Abandon all hope.\n",
+ environment: []string{"export FOO=world", "export BAR=baz"},
+ runscript: []string{`echo "Hello $FOO"`, "sleep 5"},
+ },
+ {
+ name: "Trailing section marker",
+ input: "Bootstrap: library\n" +
+ "%environment\n" +
+ " export FOO=world\n" +
+ "%runscript",
+ environment: []string{"export FOO=world"},
+ runscript: []string{},
+ },
+ } {
+ env, rs, err := parseDefFile(bytes.NewReader([]byte(c.input)))
+ require.NoError(t, err, c.name)
+ assert.Equal(t, c.environment, env, c.name)
+ assert.Equal(t, c.runscript, rs, c.name)
+ }
+}
+
+func TestGenerateInjectedScript(t *testing.T) {
+ res := generateInjectedScript([]string{"export FOO=world", "export BAR=baz"},
+ []string{`echo "Hello $FOO"`, "sleep 5"})
+ assert.Equal(t, "#!/bin/bash\n"+
+ "export FOO=world\n"+
+ "export BAR=baz\n"+
+ `echo "Hello $FOO"`+"\n"+
+ "sleep 5\n", string(res))
+}
diff --git a/sif/src.go b/sif/src.go
new file mode 100644
index 0000000..261cfbe
--- /dev/null
+++ b/sif/src.go
@@ -0,0 +1,206 @@
+package sif
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecs "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+type sifImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ ref sifReference
+ workDir string
+ layerDigest digest.Digest
+ layerSize int64
+ layerFile string
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+}
+
+// getBlobInfo returns the digest, and size of the provided file.
+func getBlobInfo(path string) (digest.Digest, int64, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
+ }
+ defer f.Close()
+
+ // TODO: Instead of writing the tar file to disk, and reading
+ // it here again, stream the tar file to a pipe and
+ // compute the digest while writing it to disk.
+ logrus.Debugf("Computing a digest of the SIF conversion output...")
+ digester := digest.Canonical.Digester()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(digester.Hash(), f)
+ if err != nil {
+ return "", -1, fmt.Errorf("reading %q: %w", path, err)
+ }
+ digest := digester.Digest()
+ logrus.Debugf("... finished computing the digest of the SIF conversion output")
+
+ return digest, size, nil
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource extracts SIF objects and saves them in a temp directory.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) {
+ sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
+ if err != nil {
+ return nil, fmt.Errorf("loading SIF file: %w", err)
+ }
+ defer func() {
+ _ = sifImg.UnloadContainer()
+ }()
+
+ workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif")
+ if err != nil {
+ return nil, fmt.Errorf("creating temp directory: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.RemoveAll(workDir)
+ }
+ }()
+
+ layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
+ if err != nil {
+ return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
+ }
+
+ layerDigest, layerSize, err := getBlobInfo(layerPath)
+ if err != nil {
+ return nil, fmt.Errorf("gathering blob information: %w", err)
+ }
+
+ created := sifImg.ModifiedAt()
+ config := imgspecv1.Image{
+ Created: &created,
+ Platform: imgspecv1.Platform{
+ Architecture: sifImg.PrimaryArch(),
+ OS: "linux",
+ },
+ Config: imgspecv1.ImageConfig{
+ Cmd: commandLine,
+ },
+ RootFS: imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{layerDigest},
+ },
+ History: []imgspecv1.History{
+ {
+ Created: &created,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
+ Comment: "imported from SIF, uuid: " + sifImg.ID(),
+ },
+ {
+ Created: &created,
+ CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
+ EmptyLayer: true,
+ },
+ },
+ }
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
+ }
+ configDigest := digest.Canonical.FromBytes(configBytes)
+
+ manifest := imgspecv1.Manifest{
+ Versioned: imgspecs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Config: imgspecv1.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(configBytes)),
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ },
+ Layers: []imgspecv1.Descriptor{{
+ Digest: layerDigest,
+ Size: layerSize,
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ }},
+ }
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
+ }
+
+ succeeded = true
+ s := &sifImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
+
+ ref: ref,
+ workDir: workDir,
+ layerDigest: layerDigest,
+ layerSize: layerSize,
+ layerFile: layerPath,
+ config: configBytes,
+ configDigest: configDigest,
+ manifest: manifestBytes,
+ }
+ s.Compat = impl.AddCompat(s)
+ return s, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *sifImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *sifImageSource) Close() error {
+ return os.RemoveAll(s.workDir)
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ switch info.Digest {
+ case s.configDigest:
+ return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil
+ case s.layerDigest:
+ reader, err := os.Open(s.layerFile)
+ if err != nil {
+ return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
+ }
+ return reader, s.layerSize, nil
+ default:
+ return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
+ }
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.New("manifest lists are not supported by the sif transport")
+ }
+ return s.manifest, imgspecv1.MediaTypeImageManifest, nil
+}
diff --git a/sif/src_test.go b/sif/src_test.go
new file mode 100644
index 0000000..8402e23
--- /dev/null
+++ b/sif/src_test.go
@@ -0,0 +1,5 @@
+package sif
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*sifImageSource)(nil)
diff --git a/sif/transport.go b/sif/transport.go
new file mode 100644
index 0000000..4c09010
--- /dev/null
+++ b/sif/transport.go
@@ -0,0 +1,160 @@
+package sif
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for SIF images.
+var Transport = sifTransport{}
+
+type sifTransport struct{}
+
+func (t sifTransport) Name() string {
+ return "sif"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// sifReference is an ImageReference for SIF images.
+type sifReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ file string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no sif.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// sifTransport.ParseReference; callers who intentionally deal with SIF files
+// can use sif.NewReference.
+
+// NewReference returns an image file reference for a specified path.
+func NewReference(file string) (types.ImageReference, error) {
+ // We do not expose an API supplying the resolvedFile; we could, but recomputing it
+ // is generally cheap enough that we prefer being confident about the properties of resolvedFile.
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+ return sifReference{file: file, resolvedFile: resolved}, nil
+}
+
+func (ref sifReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref sifReference) StringWithinTransport() string {
+ return ref.file
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref sifReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref sifReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedFile
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref sifReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by sifTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return nil, errors.New(`"sif:" locations can only be read from, not written to`)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for sif: images")
+}
diff --git a/sif/transport_test.go b/sif/transport_test.go
new file mode 100644
index 0000000..8fe738f
--- /dev/null
+++ b/sif/transport_test.go
@@ -0,0 +1,175 @@
+package sif
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ _ "github.com/containers/image/v5/internal/testing/explicitfilepath-tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "sif", Transport.Name())
+}
+
+func TestTransportParseReference(t *testing.T) {
+ testNewReference(t, Transport.ParseReference)
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ for _, scope := range []string{
+ "/etc/passwd",
+ "/this/does/not/exist",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ for _, scope := range []string{
+ "relative/path",
+ "/double//slashes",
+ "/has/./dot",
+ "/has/dot/../dot",
+ "/trailing/slash/",
+ "/",
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
+
+func TestNewReference(t *testing.T) {
+ testNewReference(t, NewReference)
+}
+
+// testNewReference is a test shared for Transport.ParseReference and NewReference.
+func testNewReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
+ tmpDir := t.TempDir()
+ tmpFile := filepath.Join(tmpDir, "image.sif")
+ err := os.WriteFile(tmpFile, nil, 0600)
+ require.NoError(t, err)
+
+ for _, file := range []string{
+ "/dev/null",
+ tmpFile,
+ "relativepath",
+ tmpDir + "/thisdoesnotexist",
+ } {
+ ref, err := fn(file)
+ require.NoError(t, err, file)
+ sifRef, ok := ref.(sifReference)
+ require.True(t, ok)
+ assert.Equal(t, file, sifRef.file, file)
+ }
+
+ _, err = fn(tmpDir + "/thisparentdoesnotexist/something")
+ assert.Error(t, err)
+}
+
+// refToTempFile creates a temporary file and returns a reference to it.
+// The caller should
+//
+// defer os.Remove(tmpFile)
+func refToTempFile(t *testing.T) (ref types.ImageReference, tmpDir string) {
+ f, err := os.CreateTemp("", "sif-transport-test")
+ require.NoError(t, err)
+ tmpFile := f.Name()
+ err = f.Close()
+ require.NoError(t, err)
+ ref, err = NewReference(tmpFile)
+ require.NoError(t, err)
+ return ref, tmpFile
+}
+
+func TestReferenceTransport(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ assert.Equal(t, Transport, ref.Transport())
+}
+
+func TestReferenceStringWithinTransport(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ assert.Equal(t, tmpFile, ref.StringWithinTransport())
+}
+
+func TestReferenceDockerReference(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ assert.Nil(t, ref.DockerReference())
+}
+
+func TestReferencePolicyConfigurationIdentity(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+
+ assert.Equal(t, tmpFile, ref.PolicyConfigurationIdentity())
+ // A non-canonical path. Test just one, the various other cases are
+ // tested in explicitfilepath.ResolvePathToFullyExplicit.
+ ref, err := NewReference("/./" + tmpFile)
+ require.NoError(t, err)
+ assert.Equal(t, tmpFile, ref.PolicyConfigurationIdentity())
+}
+
+func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ // We don't really know enough to make a full equality test here.
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.NotEmpty(t, ns)
+ assert.Equal(t, filepath.Dir(tmpFile), ns[0])
+
+ // Test with a known path where the directory should exist. Test just one non-canonical
+ // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
+ for _, path := range []string{"/usr/share/probablydoesnotexist.sif", "/usr/share/././probablydoesnoexist.sif"} {
+ _, err := os.Lstat(filepath.Dir(path))
+ require.NoError(t, err)
+ ref, err := NewReference(path)
+ require.NoError(t, err)
+ ns := ref.PolicyConfigurationNamespaces()
+ require.NotNil(t, ns)
+ assert.Equal(t, []string{"/usr/share", "/usr"}, ns)
+ }
+
+ // "/" as a corner case.
+ ref, err := NewReference("/")
+ require.NoError(t, err)
+ assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
+}
+
+func TestReferenceNewImage(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ // A pretty pointless smoke test for now;
+ // we don't want to require every developer of c/image to have fakeroot etc. around.
+ _, err := ref.NewImage(context.Background(), nil)
+ assert.Error(t, err) // Empty file is not valid
+}
+
+func TestReferenceNewImageSource(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ // A pretty pointless smoke test for now;
+ // we don't want to require every developer of c/image to have fakeroot etc. around.
+ _, err := ref.NewImageSource(context.Background(), nil)
+ assert.Error(t, err) // Empty file is not valid
+}
+
+func TestReferenceNewImageDestination(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ _, err := ref.NewImageDestination(context.Background(), nil)
+ assert.Error(t, err)
+}
+
+func TestReferenceDeleteImage(t *testing.T) {
+ ref, tmpFile := refToTempFile(t)
+ defer os.Remove(tmpFile)
+ err := ref.DeleteImage(context.Background(), nil)
+ assert.Error(t, err)
+}
diff --git a/signature/docker.go b/signature/docker.go
new file mode 100644
index 0000000..d6075f8
--- /dev/null
+++ b/signature/docker.go
@@ -0,0 +1,102 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/opencontainers/go-digest"
+ "golang.org/x/exp/slices"
+)
+
+// SignOptions includes optional parameters for signing container images.
+type SignOptions struct {
+ // Passphare to use when signing with the key identity.
+ Passphrase string
+}
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity, and the specified options.
+func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return nil, err
+ }
+ sig := newUntrustedSignature(manifestDigest, dockerReference)
+
+ var passphrase string
+ if options != nil {
+ passphrase = options.Passphrase
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return nil, errors.New("invalid passphrase: must not contain a line break")
+ }
+ }
+
+ return sig.sign(mech, keyIdentity, passphrase)
+}
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil)
+}
+
+// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
+// using mech.
+func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
+ expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
+ sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity})
+ return sig, err
+}
+
+// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities
+// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that
+// was used to verify it.
+func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte,
+ expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) {
+ expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
+ if err != nil {
+ return nil, "", err
+ }
+ var matchedKeyIdentity string
+ sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ if !slices.Contains(expectedKeyIdentities, keyIdentity) {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities))
+ }
+ matchedKeyIdentity = keyIdentity
+ return nil
+ },
+ validateSignedDockerReference: func(signedDockerReference string) error {
+ signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
+ if err != nil {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
+ }
+ if signedRef.String() != expectedRef.String() {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
+ signedDockerReference, expectedDockerReference))
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
+ if err != nil {
+ return err
+ }
+ if !matches {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return nil, "", err
+ }
+ return sig, matchedKeyIdentity, err
+}
diff --git a/signature/docker_test.go b/signature/docker_test.go
new file mode 100644
index 0000000..fb2f52e
--- /dev/null
+++ b/signature/docker_test.go
@@ -0,0 +1,236 @@
+package signature
+
+import (
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/internal/testing/gpgagent"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSignDockerManifest(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ manifest, err := os.ReadFile("fixtures/image.manifest.json")
+ require.NoError(t, err)
+
+ // Successful signing
+ signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ require.NoError(t, err)
+
+ verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, verified.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest)
+
+ // Error computing Docker manifest
+ invalidManifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ _, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.Error(t, err)
+
+ // Error creating blob to sign
+ _, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint)
+ assert.Error(t, err)
+
+ // Error signing
+ _, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist")
+ assert.Error(t, err)
+}
+
+func TestSignDockerManifestWithPassphrase(t *testing.T) {
+ err := gpgagent.KillGPGAgent(testGPGHomeDirectory)
+ require.NoError(t, err)
+
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ manifest, err := os.ReadFile("fixtures/image.manifest.json")
+ require.NoError(t, err)
+
+ // Invalid passphrase
+ _, err = SignDockerManifestWithOptions(manifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase, &SignOptions{Passphrase: TestPassphrase + "\n"})
+ assert.ErrorContains(t, err, "invalid passphrase")
+
+ // Wrong passphrase
+ _, err = SignDockerManifestWithOptions(manifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase, &SignOptions{Passphrase: "wrong"})
+ require.Error(t, err)
+
+ // No passphrase
+ _, err = SignDockerManifestWithOptions(manifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase, nil)
+ require.Error(t, err)
+
+ // Successful signing
+ signature, err := SignDockerManifestWithOptions(manifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase, &SignOptions{Passphrase: TestPassphrase})
+ require.NoError(t, err)
+
+ verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase)
+ assert.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, verified.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest)
+
+ // Error computing Docker manifest
+ invalidManifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ _, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprintWithPassphrase)
+ assert.Error(t, err)
+
+ // Error creating blob to sign
+ _, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprintWithPassphrase)
+ assert.Error(t, err)
+
+ // Error signing
+ _, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist")
+ assert.Error(t, err)
+}
+
+func TestVerifyDockerManifestSignature(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+ manifest, err := os.ReadFile("fixtures/image.manifest.json")
+ require.NoError(t, err)
+ signature, err := os.ReadFile("fixtures/image.signature")
+ require.NoError(t, err)
+
+ // Successful verification
+ sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
+
+ // Verification using a different canonicalization of TestImageSignatureReference
+ sig, err = VerifyDockerManifestSignature(signature, manifest, "docker.io/"+TestImageSignatureReference, mech, TestKeyFingerprint)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
+
+ // For extra paranoia, test that we return nil data on error.
+
+ // Invalid docker reference on input
+ sig, err = VerifyDockerManifestSignature(signature, manifest, "UPPERCASEISINVALID", mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Error computing Docker manifest
+ invalidManifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Error verifying signature
+ corruptSignature, err := os.ReadFile("fixtures/corrupt.signature")
+ require.NoError(t, err)
+ sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Key fingerprint mismatch
+ sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint")
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Invalid reference in the signature
+ invalidReferenceSignature, err := os.ReadFile("fixtures/invalid-reference.signature")
+ require.NoError(t, err)
+ sig, err = VerifyDockerManifestSignature(invalidReferenceSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Docker reference mismatch
+ sig, err = VerifyDockerManifestSignature(signature, manifest, "example.com/does-not/match", mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+
+ // Docker manifest digest mismatch
+ sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+}
+
+func TestVerifyImageManifestSignatureUsingKeyIdentityList(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+ manifest, err := os.ReadFile("fixtures/image.manifest.json")
+ require.NoError(t, err)
+ signature, err := os.ReadFile("fixtures/image.signature")
+ require.NoError(t, err)
+
+ // Successful verification
+ sig, keyIdentity, err := VerifyImageManifestSignatureUsingKeyIdentityList(signature, manifest, TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
+ assert.Equal(t, TestKeyFingerprint, keyIdentity)
+
+ // Verification using a different canonicalization of TestImageSignatureReference
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, manifest, "docker.io/"+TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
+ assert.Equal(t, TestKeyFingerprint, keyIdentity)
+
+ // For extra paranoia, test that we return nil data on error.
+
+ // Invalid docker reference on input
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, manifest, "UPPERCASEISINVALID", mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Error computing Docker manifest
+ invalidManifest, err := os.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, invalidManifest, TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Error verifying signature
+ corruptSignature, err := os.ReadFile("fixtures/corrupt.signature")
+ require.NoError(t, err)
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(corruptSignature, manifest, TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Key fingerprint mismatch
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, manifest, TestImageSignatureReference, mech, TestFingerprintListWithoutKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Invalid reference in the signature
+ invalidReferenceSignature, err := os.ReadFile("fixtures/invalid-reference.signature")
+ require.NoError(t, err)
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(invalidReferenceSignature, manifest, TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Docker reference mismatch
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, manifest, "example.com/does-not/match", mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+
+ // Docker manifest digest mismatch
+ sig, keyIdentity, err = VerifyImageManifestSignatureUsingKeyIdentityList(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestFingerprintListWithKey)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, "", keyIdentity)
+}
diff --git a/signature/fixtures/.gitignore b/signature/fixtures/.gitignore
new file mode 100644
index 0000000..2772b97
--- /dev/null
+++ b/signature/fixtures/.gitignore
@@ -0,0 +1,6 @@
+/*.gpg~
+/.gpg-v21-migrated
+/private-keys-v1.d
+/random_seed
+/gnupg_spawn_agent_sentinel.lock
+/.#*
diff --git a/signature/fixtures/corrupt.signature b/signature/fixtures/corrupt.signature
new file mode 100644
index 0000000..95c2908
--- /dev/null
+++ b/signature/fixtures/corrupt.signature
Binary files differ
diff --git a/signature/fixtures/corrupt.signature-v3 b/signature/fixtures/corrupt.signature-v3
new file mode 100644
index 0000000..51f986b
--- /dev/null
+++ b/signature/fixtures/corrupt.signature-v3
Binary files differ
diff --git a/signature/fixtures/cosign.pub b/signature/fixtures/cosign.pub
new file mode 100644
index 0000000..8dae995
--- /dev/null
+++ b/signature/fixtures/cosign.pub
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFNLqFhf4fiN6o/glAuYnq2jYUeL0
+vRuLu/z39pmbVwS9ff5AYnlwaP9sxREajdLY9ynM6G1sy6AAmb7Z63TsLg==
+-----END PUBLIC KEY-----
diff --git a/signature/fixtures/cosign2.pub b/signature/fixtures/cosign2.pub
new file mode 100644
index 0000000..ca46da8
--- /dev/null
+++ b/signature/fixtures/cosign2.pub
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEwOkOF9xpfG8ghueIhnZ66ooujwt1
++ReV3HupgKnGFYnEh3Hh1YTg5L6kN1Yakkt5WltRoav8/R3hpCtUO3Rldw==
+-----END PUBLIC KEY-----
diff --git a/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/manifest.json b/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/manifest.json
new file mode 100644
index 0000000..b1ce3f6
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/manifest.json
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 1508,
+ "digest": "sha256:3b0f78b718417dfa432fd8da26d0e3ac4ca3566d0825b0d2b056ccc4dd29e644"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 2568440,
+ "digest": "sha256:1df32bae7504a32024616c66017cd5df04dd98eaf150f8df45fffef2547a3c54"
+ }
+ ]
+} \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/signature-1 b/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/signature-1
new file mode 100644
index 0000000..332e0f9
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-fulcio-rekor-valid/signature-1
Binary files differ
diff --git a/signature/fixtures/dir-img-cosign-key-rekor-valid/manifest.json b/signature/fixtures/dir-img-cosign-key-rekor-valid/manifest.json
new file mode 100644
index 0000000..b1ce3f6
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-key-rekor-valid/manifest.json
@@ -0,0 +1,16 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 1508,
+ "digest": "sha256:3b0f78b718417dfa432fd8da26d0e3ac4ca3566d0825b0d2b056ccc4dd29e644"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 2568440,
+ "digest": "sha256:1df32bae7504a32024616c66017cd5df04dd98eaf150f8df45fffef2547a3c54"
+ }
+ ]
+} \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-key-rekor-valid/signature-1 b/signature/fixtures/dir-img-cosign-key-rekor-valid/signature-1
new file mode 100644
index 0000000..befbf6c
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-key-rekor-valid/signature-1
Binary files differ
diff --git a/signature/fixtures/dir-img-cosign-manifest-digest-error/manifest.json b/signature/fixtures/dir-img-cosign-manifest-digest-error/manifest.json
new file mode 120000
index 0000000..3dee14b
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-manifest-digest-error/manifest.json
@@ -0,0 +1 @@
+../v2s1-invalid-signatures.manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-manifest-digest-error/signature-1 b/signature/fixtures/dir-img-cosign-manifest-digest-error/signature-1
new file mode 120000
index 0000000..c5bec7e
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-manifest-digest-error/signature-1
@@ -0,0 +1 @@
+../dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-mixed/manifest.json b/signature/fixtures/dir-img-cosign-mixed/manifest.json
new file mode 120000
index 0000000..3c1dbae
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-mixed/manifest.json
@@ -0,0 +1 @@
+../dir-img-cosign-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-mixed/signature-1 b/signature/fixtures/dir-img-cosign-mixed/signature-1
new file mode 120000
index 0000000..0aee1d5
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-mixed/signature-1
@@ -0,0 +1 @@
+../unknown-cosign-key.signature \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-mixed/signature-2 b/signature/fixtures/dir-img-cosign-mixed/signature-2
new file mode 120000
index 0000000..c5bec7e
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-mixed/signature-2
@@ -0,0 +1 @@
+../dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-modified-manifest/manifest.json b/signature/fixtures/dir-img-cosign-modified-manifest/manifest.json
new file mode 100644
index 0000000..40a935b
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-modified-manifest/manifest.json
@@ -0,0 +1,17 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 1512,
+ "digest": "sha256:961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e66dd4"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 2896510,
+ "digest": "sha256:9d16cba9fb961d1aafec9542f2bf7cb64acfc55245f9e4eb5abecd4cdc38d749"
+ }
+ ],
+ "extra": "this manifest has been modified"
+}
diff --git a/signature/fixtures/dir-img-cosign-modified-manifest/signature-1 b/signature/fixtures/dir-img-cosign-modified-manifest/signature-1
new file mode 120000
index 0000000..c5bec7e
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-modified-manifest/signature-1
@@ -0,0 +1 @@
+../dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-no-manifest/signature-1 b/signature/fixtures/dir-img-cosign-no-manifest/signature-1
new file mode 120000
index 0000000..c5bec7e
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-no-manifest/signature-1
@@ -0,0 +1 @@
+../dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-other-attachment/manifest.json b/signature/fixtures/dir-img-cosign-other-attachment/manifest.json
new file mode 120000
index 0000000..3c1dbae
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-other-attachment/manifest.json
@@ -0,0 +1 @@
+../dir-img-cosign-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-other-attachment/signature-1 b/signature/fixtures/dir-img-cosign-other-attachment/signature-1
new file mode 100644
index 0000000..f809ba6
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-other-attachment/signature-1
Binary files differ
diff --git a/signature/fixtures/dir-img-cosign-valid-2/manifest.json b/signature/fixtures/dir-img-cosign-valid-2/manifest.json
new file mode 120000
index 0000000..3c1dbae
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid-2/manifest.json
@@ -0,0 +1 @@
+../dir-img-cosign-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-valid-2/signature-1 b/signature/fixtures/dir-img-cosign-valid-2/signature-1
new file mode 120000
index 0000000..c5bec7e
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid-2/signature-1
@@ -0,0 +1 @@
+../dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-valid-2/signature-2 b/signature/fixtures/dir-img-cosign-valid-2/signature-2
new file mode 100644
index 0000000..5a828ec
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid-2/signature-2
Binary files differ
diff --git a/signature/fixtures/dir-img-cosign-valid-with-tag/manifest.json b/signature/fixtures/dir-img-cosign-valid-with-tag/manifest.json
new file mode 100644
index 0000000..72fb41f
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid-with-tag/manifest.json
@@ -0,0 +1 @@
+{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":1512,"digest":"sha256:961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e66dd4"},"layers":[{"mediaType":"application/vnd.docker.image.rootfs.diff.tar.gzip","size":2896510,"digest":"sha256:9d16cba9fb961d1aafec9542f2bf7cb64acfc55245f9e4eb5abecd4cdc38d749"}]} \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-valid-with-tag/signature-1 b/signature/fixtures/dir-img-cosign-valid-with-tag/signature-1
new file mode 100644
index 0000000..efba75f
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid-with-tag/signature-1
Binary files differ
diff --git a/signature/fixtures/dir-img-cosign-valid/manifest.json b/signature/fixtures/dir-img-cosign-valid/manifest.json
new file mode 100644
index 0000000..72fb41f
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid/manifest.json
@@ -0,0 +1 @@
+{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":1512,"digest":"sha256:961769676411f082461f9ef46626dd7a2d1e2b2a38e6a44364bcbecf51e66dd4"},"layers":[{"mediaType":"application/vnd.docker.image.rootfs.diff.tar.gzip","size":2896510,"digest":"sha256:9d16cba9fb961d1aafec9542f2bf7cb64acfc55245f9e4eb5abecd4cdc38d749"}]} \ No newline at end of file
diff --git a/signature/fixtures/dir-img-cosign-valid/signature-1 b/signature/fixtures/dir-img-cosign-valid/signature-1
new file mode 100644
index 0000000..46e7533
--- /dev/null
+++ b/signature/fixtures/dir-img-cosign-valid/signature-1
Binary files differ
diff --git a/signature/fixtures/dir-img-manifest-digest-error/manifest.json b/signature/fixtures/dir-img-manifest-digest-error/manifest.json
new file mode 120000
index 0000000..3dee14b
--- /dev/null
+++ b/signature/fixtures/dir-img-manifest-digest-error/manifest.json
@@ -0,0 +1 @@
+../v2s1-invalid-signatures.manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-manifest-digest-error/signature-1 b/signature/fixtures/dir-img-manifest-digest-error/signature-1
new file mode 120000
index 0000000..f010fd4
--- /dev/null
+++ b/signature/fixtures/dir-img-manifest-digest-error/signature-1
@@ -0,0 +1 @@
+../dir-img-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-mixed/manifest.json b/signature/fixtures/dir-img-mixed/manifest.json
new file mode 120000
index 0000000..ff7d2ff
--- /dev/null
+++ b/signature/fixtures/dir-img-mixed/manifest.json
@@ -0,0 +1 @@
+../dir-img-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-mixed/signature-1 b/signature/fixtures/dir-img-mixed/signature-1
new file mode 120000
index 0000000..b27cdc4
--- /dev/null
+++ b/signature/fixtures/dir-img-mixed/signature-1
@@ -0,0 +1 @@
+../invalid-blob.signature \ No newline at end of file
diff --git a/signature/fixtures/dir-img-mixed/signature-2 b/signature/fixtures/dir-img-mixed/signature-2
new file mode 120000
index 0000000..f010fd4
--- /dev/null
+++ b/signature/fixtures/dir-img-mixed/signature-2
@@ -0,0 +1 @@
+../dir-img-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-modified-manifest/manifest.json b/signature/fixtures/dir-img-modified-manifest/manifest.json
new file mode 100644
index 0000000..82fde38
--- /dev/null
+++ b/signature/fixtures/dir-img-modified-manifest/manifest.json
@@ -0,0 +1,27 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ],
+ "extra": "this manifest has been modified"
+}
diff --git a/signature/fixtures/dir-img-modified-manifest/signature-1 b/signature/fixtures/dir-img-modified-manifest/signature-1
new file mode 120000
index 0000000..f010fd4
--- /dev/null
+++ b/signature/fixtures/dir-img-modified-manifest/signature-1
@@ -0,0 +1 @@
+../dir-img-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-no-manifest/signature-1 b/signature/fixtures/dir-img-no-manifest/signature-1
new file mode 120000
index 0000000..f010fd4
--- /dev/null
+++ b/signature/fixtures/dir-img-no-manifest/signature-1
@@ -0,0 +1 @@
+../dir-img-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-unsigned/manifest.json b/signature/fixtures/dir-img-unsigned/manifest.json
new file mode 120000
index 0000000..ff7d2ff
--- /dev/null
+++ b/signature/fixtures/dir-img-unsigned/manifest.json
@@ -0,0 +1 @@
+../dir-img-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-valid-2/manifest.json b/signature/fixtures/dir-img-valid-2/manifest.json
new file mode 120000
index 0000000..ff7d2ff
--- /dev/null
+++ b/signature/fixtures/dir-img-valid-2/manifest.json
@@ -0,0 +1 @@
+../dir-img-valid/manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-valid-2/signature-1 b/signature/fixtures/dir-img-valid-2/signature-1
new file mode 120000
index 0000000..f010fd4
--- /dev/null
+++ b/signature/fixtures/dir-img-valid-2/signature-1
@@ -0,0 +1 @@
+../dir-img-valid/signature-1 \ No newline at end of file
diff --git a/signature/fixtures/dir-img-valid-2/signature-2 b/signature/fixtures/dir-img-valid-2/signature-2
new file mode 100644
index 0000000..dbba8f4
--- /dev/null
+++ b/signature/fixtures/dir-img-valid-2/signature-2
Binary files differ
diff --git a/signature/fixtures/dir-img-valid/manifest.json b/signature/fixtures/dir-img-valid/manifest.json
new file mode 120000
index 0000000..c5bd254
--- /dev/null
+++ b/signature/fixtures/dir-img-valid/manifest.json
@@ -0,0 +1 @@
+../image.manifest.json \ No newline at end of file
diff --git a/signature/fixtures/dir-img-valid/signature-1 b/signature/fixtures/dir-img-valid/signature-1
new file mode 100644
index 0000000..d0e1872
--- /dev/null
+++ b/signature/fixtures/dir-img-valid/signature-1
Binary files differ
diff --git a/signature/fixtures/double.signature b/signature/fixtures/double.signature
new file mode 100644
index 0000000..76b17e2
--- /dev/null
+++ b/signature/fixtures/double.signature
Binary files differ
diff --git a/signature/fixtures/expired.signature b/signature/fixtures/expired.signature
new file mode 100644
index 0000000..c609c37
--- /dev/null
+++ b/signature/fixtures/expired.signature
Binary files differ
diff --git a/signature/fixtures/fulcio-cert b/signature/fixtures/fulcio-cert
new file mode 100644
index 0000000..734b8bb
--- /dev/null
+++ b/signature/fixtures/fulcio-cert
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiOgAwIBAgIUG45uaC2z8VvuOwzzm79RPfckoxwwCgYIKoZIzj0EAwMw
+NzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRl
+cm1lZGlhdGUwHhcNMjIxMjEyMTg0ODE4WhcNMjIxMjEyMTg1ODE4WjAAMFkwEwYH
+KoZIzj0CAQYIKoZIzj0DAQcDQgAEJBurkhSRmHukHVh3VA8nYuSWqZ4ltafDwyWl
+c3c9NaLovmu+NC4NiUtMLifL/P3nqedbnctKBuYmfISGiZlVyKOCAUIwggE+MA4G
+A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUAmvG
+4K6lNk/sk/f4aiahjYRqR+cwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4Y
+ZD8wHQYDVR0RAQH/BBMwEYEPbWl0ckByZWRoYXQuY29tMCwGCisGAQQBg78wAQEE
+Hmh0dHBzOi8vZ2l0aHViLmNvbS9sb2dpbi9vYXV0aDCBiQYKKwYBBAHWeQIEAgR7
+BHkAdwB1AN09MGrGxxEyYxkeHJlnNwKiSl643jyt/4eKcoAvKe6OAAABhQeqlwUA
+AAQDAEYwRAIgVi2SvN5ReGYiOShY8LFSRG5D6oATKqHb6kN/DJh2rAUCIANS6Aqp
+xaXiHwTHUr3xPSGa5i6hywmbbRC6N0kIeDM6MAoGCCqGSM49BAMDA2gAMGUCMCLu
+cZtESIN0lm64Co/bZ68CCRkWlktX4mmJiRhKi9c9QD62C5SuZhc0vvo6MOKmTQIx
+AIo/MozZeoU7rdNj0pZKuBeCmMqmJaDSsw19tKi/b1pEmuw+Sf2BI25GgIbKztG1
+9w==
+-----END CERTIFICATE-----
diff --git a/signature/fixtures/fulcio-chain b/signature/fixtures/fulcio-chain
new file mode 100644
index 0000000..1c1e9f7
--- /dev/null
+++ b/signature/fixtures/fulcio-chain
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMw
+KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y
+MjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3Jl
+LmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0C
+AQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV7
+7LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS
+0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYB
+BQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjp
+KFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZI
+zj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJR
+nZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsP
+mygUY7Ii2zbdCdliiow=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMw
+KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y
+MTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3Jl
+LmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7
+XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxex
+X69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92j
+YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRY
+wB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQ
+KsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCM
+WP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9
+TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ
+-----END CERTIFICATE----- \ No newline at end of file
diff --git a/signature/fixtures/fulcio_v1.crt.pem b/signature/fixtures/fulcio_v1.crt.pem
new file mode 100644
index 0000000..3afc46b
--- /dev/null
+++ b/signature/fixtures/fulcio_v1.crt.pem
@@ -0,0 +1,13 @@
+-----BEGIN CERTIFICATE-----
+MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMw
+KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y
+MTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3Jl
+LmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7
+XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxex
+X69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92j
+YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRY
+wB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQ
+KsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCM
+WP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9
+TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ
+-----END CERTIFICATE----- \ No newline at end of file
diff --git a/signature/fixtures/image.manifest.json b/signature/fixtures/image.manifest.json
new file mode 100644
index 0000000..198da23
--- /dev/null
+++ b/signature/fixtures/image.manifest.json
@@ -0,0 +1,26 @@
+{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": 7023,
+ "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 32654,
+ "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 16724,
+ "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
+ },
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "size": 73109,
+ "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
+ }
+ ]
+} \ No newline at end of file
diff --git a/signature/fixtures/image.signature b/signature/fixtures/image.signature
new file mode 100644
index 0000000..f892721
--- /dev/null
+++ b/signature/fixtures/image.signature
Binary files differ
diff --git a/signature/fixtures/invalid-blob.signature b/signature/fixtures/invalid-blob.signature
new file mode 100644
index 0000000..c8db18c
--- /dev/null
+++ b/signature/fixtures/invalid-blob.signature
Binary files differ
diff --git a/signature/fixtures/invalid-blob.signature-v3 b/signature/fixtures/invalid-blob.signature-v3
new file mode 100644
index 0000000..246bdd9
--- /dev/null
+++ b/signature/fixtures/invalid-blob.signature-v3
Binary files differ
diff --git a/signature/fixtures/invalid-reference.signature b/signature/fixtures/invalid-reference.signature
new file mode 100644
index 0000000..0236598
--- /dev/null
+++ b/signature/fixtures/invalid-reference.signature
Binary files differ
diff --git a/signature/fixtures/no-optional-fields.signature b/signature/fixtures/no-optional-fields.signature
new file mode 100644
index 0000000..482ae3a
--- /dev/null
+++ b/signature/fixtures/no-optional-fields.signature
Binary files differ
diff --git a/signature/fixtures/policy.json b/signature/fixtures/policy.json
new file mode 100644
index 0000000..22d68d8
--- /dev/null
+++ b/signature/fixtures/policy.json
@@ -0,0 +1,160 @@
+{
+ "default": [
+ {
+ "type": "reject"
+ }
+ ],
+ "transports": {
+ "dir": {
+ "": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ]
+ },
+ "docker": {
+ "example.com/playground": [
+ {
+ "type": "insecureAcceptAnything"
+ }
+ ],
+ "example.com/production": [
+ {
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/keys/employee-gpg-keyring"
+ }
+ ],
+ "example.com/hardened": [
+ {
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/keys/employee-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchRepository"
+ }
+ },
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyPath": "/keys/public-key-signing-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchExact"
+ }
+ },
+ {
+ "type": "signedBaseLayer",
+ "baseLayerIdentity": {
+ "type": "exactRepository",
+ "dockerRepository": "registry.access.redhat.com/rhel7/rhel"
+ }
+ }
+ ],
+ "example.com/hardened-x509": [
+ {
+ "type": "signedBy",
+ "keyType": "X509Certificates",
+ "keyPath": "/keys/employee-cert-file",
+ "signedIdentity": {
+ "type": "matchRepository"
+ }
+ },
+ {
+ "type": "signedBy",
+ "keyType": "signedByX509CAs",
+ "keyPath": "/keys/public-key-signing-ca-file"
+ }
+ ],
+ "registry.access.redhat.com": [
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyPath": "/keys/RH-key-signing-key-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchRepoDigestOrExact"
+ }
+ }
+ ],
+ "registry.redhat.io/beta": [
+ {
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPaths": ["/keys/RH-production-signing-key-gpg-keyring", "/keys/RH-beta-signing-key-gpg-keyring"]
+ }
+ ],
+ "private-mirror:5000/vendor-mirror": [
+ {
+ "type": "signedBy",
+ "keyType": "GPGKeys",
+ "keyPath": "/keys/vendor-gpg-keyring",
+ "signedIdentity": {
+ "type": "remapIdentity",
+ "prefix": "private-mirror:5000/vendor-mirror",
+ "signedPrefix": "vendor.example.com"
+ }
+ }
+ ],
+ "*.access.redhat.com": [
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyPath": "/keys/RH-key-signing-key-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchRepoDigestOrExact"
+ }
+ }
+ ],
+ "*.redhat.com": [
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyPath": "/keys/RH-key-signing-key-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchRepoDigestOrExact"
+ }
+ }
+ ],
+ "*.com": [
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyPath": "/keys/RH-key-signing-key-gpg-keyring",
+ "signedIdentity": {
+ "type": "matchRepoDigestOrExact"
+ }
+ }
+ ],
+ "bogus/key-data-example": [
+ {
+ "type": "signedBy",
+ "keyType": "signedByGPGKeys",
+ "keyData": "bm9uc2Vuc2U="
+ }
+ ],
+ "bogus/signed-identity-example": [
+ {
+ "type": "signedBaseLayer",
+ "baseLayerIdentity": {
+ "type": "exactReference",
+ "dockerReference": "registry.access.redhat.com/rhel7/rhel:latest"
+ }
+ }
+ ],
+ "example.com/sigstore/key-data-example": [
+ {
+ "type": "sigstoreSigned",
+ "keyData": "bm9uc2Vuc2U="
+ }
+ ],
+ "example.com/sigstore/key-path-example": [
+ {
+ "type": "sigstoreSigned",
+ "keyPath": "/keys/public-key",
+ "signedIdentity": {
+ "type": "matchRepository"
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/signature/fixtures/public-key-1.gpg b/signature/fixtures/public-key-1.gpg
new file mode 100644
index 0000000..c97f2e0
--- /dev/null
+++ b/signature/fixtures/public-key-1.gpg
Binary files differ
diff --git a/signature/fixtures/public-key-2.gpg b/signature/fixtures/public-key-2.gpg
new file mode 100644
index 0000000..c15d06b
--- /dev/null
+++ b/signature/fixtures/public-key-2.gpg
Binary files differ
diff --git a/signature/fixtures/public-key.gpg b/signature/fixtures/public-key.gpg
new file mode 100644
index 0000000..46901d5
--- /dev/null
+++ b/signature/fixtures/public-key.gpg
@@ -0,0 +1,19 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL
+tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6
+H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0
+EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ
+CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS
+1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2
+0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x
+jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d
+IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P
+0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa
+/MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn
+lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k
+UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI
+5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE=
+=tJAt
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/signature/fixtures/pubring.gpg b/signature/fixtures/pubring.gpg
new file mode 100644
index 0000000..9e7712b
--- /dev/null
+++ b/signature/fixtures/pubring.gpg
Binary files differ
diff --git a/signature/fixtures/rekor-payload b/signature/fixtures/rekor-payload
new file mode 120000
index 0000000..04a2563
--- /dev/null
+++ b/signature/fixtures/rekor-payload
@@ -0,0 +1 @@
+../internal/testdata/rekor-payload \ No newline at end of file
diff --git a/signature/fixtures/rekor-set b/signature/fixtures/rekor-set
new file mode 120000
index 0000000..5cd8159
--- /dev/null
+++ b/signature/fixtures/rekor-set
@@ -0,0 +1 @@
+../internal/testdata/rekor-set \ No newline at end of file
diff --git a/signature/fixtures/rekor-sig b/signature/fixtures/rekor-sig
new file mode 120000
index 0000000..16b66d7
--- /dev/null
+++ b/signature/fixtures/rekor-sig
@@ -0,0 +1 @@
+../internal/testdata/rekor-sig \ No newline at end of file
diff --git a/signature/fixtures/rekor.pub b/signature/fixtures/rekor.pub
new file mode 120000
index 0000000..ac451b9
--- /dev/null
+++ b/signature/fixtures/rekor.pub
@@ -0,0 +1 @@
+../internal/testdata/rekor.pub \ No newline at end of file
diff --git a/signature/fixtures/secring.gpg b/signature/fixtures/secring.gpg
new file mode 100644
index 0000000..1c27499
--- /dev/null
+++ b/signature/fixtures/secring.gpg
Binary files differ
diff --git a/signature/fixtures/some-rsa-key.pub b/signature/fixtures/some-rsa-key.pub
new file mode 100644
index 0000000..1d56ec1
--- /dev/null
+++ b/signature/fixtures/some-rsa-key.pub
@@ -0,0 +1,14 @@
+-----BEGIN PUBLIC KEY-----
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1bofelEsr9ZYEDlNZN+Z
+K4LbdXi1grcUYGk9S7UsaZV4ovq++j5yYAiM2mWBxcmD65pokyanLEz/0ya9zSez
+C1dY337ecoRj7OgLzrMJarKJu3xDYe/jCylQZCke7bobsIahi00i6sU0Feh94ULt
+aSiBaOEZzYvbE6QBsfQWwH2EC/Ch0Dsy6zpZQxnsEGJkpr/ed9UYwUlgaGAouL4N
+A5flQvDNWyKNMBsosrTHE+yRYiouAytszJ1WzMQrR8n3ngsMaJtM5FZClyT1FbB5
+YnvoFcTUqVQqMFCK6DBQvMp5mOBHAdWaHXTAP9cKFK73CdQwHigP3ayXRyq/Npjd
+tSpk1AW12Ctkiu9jIX1upGpwJCz0nevsXm5CL2jdxbzDPxIHV4w4kHxt6CcCWJ3V
+DSqTIV8xGMyRahUsmhnRXeRSF0UaQdzI9ZPKWW20bYjZ3qJawO3YhJZUwxZKiAm6
+Px2nd4lVpJXZLZ/DJ7eXJ0rWiwC2Y3C+1FlXdWtbocersg6a7oW/VNe+unVznHWm
+N0GSp1IobRsvP6t5ITIJiQguROl8PVNS7Wu4vzDndy1cH4NBCyrQopQLJ29U51S4
+2tqYlSlJReWjvSidCSZDa9/YXU9LZqVWmkSKwMcCKCRACIYOLxFZJjsxj5hOIOkT
+4EeUfiK04GAV1QKVloZ9b2sCAwEAAQ==
+-----END PUBLIC KEY-----
diff --git a/signature/fixtures/trustdb.gpg b/signature/fixtures/trustdb.gpg
new file mode 100644
index 0000000..fca2002
--- /dev/null
+++ b/signature/fixtures/trustdb.gpg
Binary files differ
diff --git a/signature/fixtures/unknown-cosign-key.signature b/signature/fixtures/unknown-cosign-key.signature
new file mode 100644
index 0000000..e05ba51
--- /dev/null
+++ b/signature/fixtures/unknown-cosign-key.signature
Binary files differ
diff --git a/signature/fixtures/unknown-key.signature b/signature/fixtures/unknown-key.signature
new file mode 100644
index 0000000..393ace4
--- /dev/null
+++ b/signature/fixtures/unknown-key.signature
Binary files differ
diff --git a/signature/fixtures/unknown-key.signature-v3 b/signature/fixtures/unknown-key.signature-v3
new file mode 100644
index 0000000..67f429b
--- /dev/null
+++ b/signature/fixtures/unknown-key.signature-v3
Binary files differ
diff --git a/signature/fixtures/unsigned-encrypted.signature b/signature/fixtures/unsigned-encrypted.signature
new file mode 100644
index 0000000..7da65de
--- /dev/null
+++ b/signature/fixtures/unsigned-encrypted.signature
Binary files differ
diff --git a/signature/fixtures/unsigned-literal.signature b/signature/fixtures/unsigned-literal.signature
new file mode 100644
index 0000000..9b660cb
--- /dev/null
+++ b/signature/fixtures/unsigned-literal.signature
Binary files differ
diff --git a/signature/fixtures/v2s1-invalid-signatures.manifest.json b/signature/fixtures/v2s1-invalid-signatures.manifest.json
new file mode 100644
index 0000000..96def40
--- /dev/null
+++ b/signature/fixtures/v2s1-invalid-signatures.manifest.json
@@ -0,0 +1,11 @@
+{
+ "schemaVersion": 1,
+ "name": "mitr/busybox",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ ],
+ "history": [
+ ],
+ "signatures": 1
+}
diff --git a/signature/fixtures_info_test.go b/signature/fixtures_info_test.go
new file mode 100644
index 0000000..e370bbc
--- /dev/null
+++ b/signature/fixtures_info_test.go
@@ -0,0 +1,29 @@
+package signature
+
+import "github.com/opencontainers/go-digest"
+
+const (
+ // TestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
+ TestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
+ // TestImageSignatureReference is the Docker image reference signed in "image.signature"
+ TestImageSignatureReference = "testing/manifest"
+ // TestKeyFingerprint is the fingerprint of the private key in this directory.
+ TestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
+ // TestOtherFingerprint1 is a random fingerprint.
+ TestOtherFingerprint1 = "0123456789ABCDEF0123456789ABCDEF01234567"
+ // TestOtherFingerprint2 is a random fingerprint.
+ TestOtherFingerprint2 = "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"
+ // TestKeyShortID is the short ID of the private key in this directory.
+ TestKeyShortID = "DB72F2188BB46CC8"
+ // TestKeyFingerprintWithPassphrase is the fingerprint of the private key with passphrase in this directory.
+ TestKeyFingerprintWithPassphrase = "E3EB7611D815211F141946B5B0CDE60B42557346"
+ // TestPassphrase is the passphrase for TestKeyFingerprintWithPassphrase.
+ TestPassphrase = "WithPassphrase123"
+)
+
+var (
+ // TestFingerprintListWithKey slice of multiple fingerprints including the fingerprint of the private key in this directory.
+ TestFingerprintListWithKey = []string{TestKeyFingerprint, TestOtherFingerprint1, TestOtherFingerprint2}
+ // TestFingerprintListWithoutKey slice of multiple fingerprints not including the fingerprint of the private key in this directory.
+ TestFingerprintListWithoutKey = []string{TestOtherFingerprint1, TestOtherFingerprint2}
+)
diff --git a/signature/fulcio_cert.go b/signature/fulcio_cert.go
new file mode 100644
index 0000000..ef5d3df
--- /dev/null
+++ b/signature/fulcio_cert.go
@@ -0,0 +1,204 @@
+package signature
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/sigstore/fulcio/pkg/certificate"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "golang.org/x/exp/slices"
+)
+
+// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
+// Users should call validate() on the policy before using it.
+type fulcioTrustRoot struct {
+ caCertificates *x509.CertPool
+ oidcIssuer string
+ subjectEmail string
+}
+
+func (f *fulcioTrustRoot) validate() error {
+ if f.oidcIssuer == "" {
+ return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer")
+ }
+ if f.subjectEmail == "" {
+ return errors.New("Internal inconsistency: Fulcio use set up without subject email")
+ }
+ return nil
+}
+
+// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate;
+// it fails if the extension is not present in the certificate, or on any inconsistency.
+func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) {
+ // == Validate the recorded OIDC issuer
+ gotOIDCIssuer1 := false
+ gotOIDCIssuer2 := false
+ var oidcIssuer1, oidcIssuer2 string
+ // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies
+ // between certificate.OIDIssuer and certificate.OIDIssuerV2.
+ // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19,
+ // reject duplicates manually.
+ for _, untrustedExt := range untrustedCertificate.Extensions {
+ if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it.
+ if gotOIDCIssuer1 {
+ // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
+ // already in ParseCertificate.
+ return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension")
+ }
+ oidcIssuer1 = string(untrustedExt.Value)
+ gotOIDCIssuer1 = true
+ } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) {
+ if gotOIDCIssuer2 {
+ // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
+ // already in ParseCertificate.
+ return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension")
+ }
+ rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2)
+ if err != nil {
+ return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err))
+ }
+ if len(rest) != 0 {
+ return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data")
+ }
+ gotOIDCIssuer2 = true
+ }
+ }
+ switch {
+ case gotOIDCIssuer1 && gotOIDCIssuer2:
+ if oidcIssuer1 != oidcIssuer2 {
+ return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v",
+ oidcIssuer1, oidcIssuer2))
+ }
+ return oidcIssuer1, nil
+ case gotOIDCIssuer1:
+ return oidcIssuer1, nil
+ case gotOIDCIssuer2:
+ return oidcIssuer2, nil
+ default:
+ return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension")
+ }
+}
+
+func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) {
+ // == Verify the certificate is correctly signed
+ var untrustedIntermediatePool *x509.CertPool // = nil
+ // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar,
+ // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load
+ // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert
+ // for intermediate certificates.
+ if len(untrustedIntermediateChainBytes) > 0 {
+ untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes)
+ if err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err))
+ }
+ untrustedIntermediatePool = x509.NewCertPool()
+ if len(untrustedIntermediateChain) > 1 {
+ for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] {
+ untrustedIntermediatePool.AddCert(untrustedIntermediateCert)
+ }
+ }
+ }
+
+ untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes)
+ if err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err))
+ }
+ switch len(untrustedLeafCerts) {
+ case 0:
+ return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data")
+ case 1:
+ break // OK
+ default:
+ return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data")
+ }
+ untrustedCertificate := untrustedLeafCerts[0]
+
+ // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs;
+ // we match SAN ourselves, so override that.
+ if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 {
+ var remaining []asn1.ObjectIdentifier
+ for _, oid := range untrustedCertificate.UnhandledCriticalExtensions {
+ if !oid.Equal(cryptoutils.SANOID) {
+ remaining = append(remaining, oid)
+ }
+ }
+ untrustedCertificate.UnhandledCriticalExtensions = remaining
+ }
+
+ if _, err := untrustedCertificate.Verify(x509.VerifyOptions{
+ Intermediates: untrustedIntermediatePool,
+ Roots: f.caCertificates,
+ // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation),
+ // and validates the leaf certificate against relevantTime manually.
+ // We verify the full certificate chain against relevantTime instead.
+ // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference.
+ CurrentTime: relevantTime,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
+ }); err != nil {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err))
+ }
+
+ // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied).
+ //
+ // We don’t currently do that.
+ //
+ // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that
+ // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by
+ // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor.
+ //
+ // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly
+ // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative
+ // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually
+ // logging in using OpenID are not going to maintain a record of those actions.
+ //
+ // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures
+ // by correctly-issued certificates.
+ //
+ // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
+ // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
+ // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
+
+ // == Validate the recorded OIDC issuer
+ oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate)
+ if err != nil {
+ return nil, err
+ }
+ if oidcIssuer != f.oidcIssuer {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer))
+ }
+
+ // == Validate the OIDC subject
+ if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
+ return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
+ f.subjectEmail,
+ untrustedCertificate.EmailAddresses))
+ }
+ // FIXME: Match more subject types? Cosign does:
+ // - .DNSNames (can’t be issued by Fulcio)
+ // - .IPAddresses (can’t be issued by Fulcio)
+ // - .URIs (CAN be issued by Fulcio)
+ // - OtherName values in SAN (CAN be issued by Fulcio)
+ // - Various values about GitHub workflows (CAN be issued by Fulcio)
+ // What does it… mean to get an OAuth2 identity for an IP address?
+ // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for
+ // issuers and/or subjects and/or combinations? Regexps? More?
+
+ return untrustedCertificate.PublicKey, nil
+}
+
+func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
+ untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
+ untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
+ rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes,
+ untrustedBase64Signature, untrustedPayloadBytes)
+ if err != nil {
+ return nil, err
+ }
+ return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes)
+}
diff --git a/signature/fulcio_cert_test.go b/signature/fulcio_cert_test.go
new file mode 100644
index 0000000..e283ae4
--- /dev/null
+++ b/signature/fulcio_cert_test.go
@@ -0,0 +1,476 @@
+package signature
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/pem"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/sigstore/fulcio/pkg/certificate"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+// assert that crypto.PublicKey matches the on in certPEM.
+func assertPublicKeyMatchesCert(t *testing.T, certPEM []byte, pk crypto.PublicKey) {
+ pkInterface, ok := pk.(interface {
+ Equal(x crypto.PublicKey) bool
+ })
+ require.True(t, ok)
+ certs, err := cryptoutils.UnmarshalCertificatesFromPEM(certPEM)
+ require.NoError(t, err)
+ require.Len(t, certs, 1)
+ equal := pkInterface.Equal(certs[0].PublicKey)
+ assert.True(t, equal)
+}
+
+func TestFulcioTrustRootValidate(t *testing.T) {
+ certs := x509.NewCertPool() // Empty is valid enough for our purposes.
+
+ for _, tr := range []fulcioTrustRoot{
+ {
+ caCertificates: certs,
+ oidcIssuer: "",
+ subjectEmail: "email",
+ },
+ {
+ caCertificates: certs,
+ oidcIssuer: "issuer",
+ subjectEmail: "",
+ },
+ } {
+ err := tr.validate()
+ assert.Error(t, err)
+ }
+
+ tr := fulcioTrustRoot{
+ caCertificates: certs,
+ oidcIssuer: "issuer",
+ subjectEmail: "email",
+ }
+ err := tr.validate()
+ assert.NoError(t, err)
+}
+
+// oidIssuerV1Ext creates an certificate.OIDIssuer extension
+func oidIssuerV1Ext(value string) pkix.Extension {
+ return pkix.Extension{
+ Id: certificate.OIDIssuer, //nolint:staticcheck // This is deprecated, but we must continue to accept it.
+ Value: []byte(value),
+ }
+}
+
+// asn1MarshalTest is asn1.MarshalWithParams that must not fail
+func asn1MarshalTest(t *testing.T, value any, params string) []byte {
+ bytes, err := asn1.MarshalWithParams(value, params)
+ require.NoError(t, err)
+ return bytes
+}
+
+// oidIssuerV2Ext creates an certificate.OIDIssuerV2 extension
+func oidIssuerV2Ext(t *testing.T, value string) pkix.Extension {
+ return pkix.Extension{
+ Id: certificate.OIDIssuerV2,
+ Value: asn1MarshalTest(t, value, "utf8"),
+ }
+}
+
+func TestFulcioIssuerInCertificate(t *testing.T) {
+ referenceTime := time.Now()
+ fulcioExtensions, err := certificate.Extensions{Issuer: "https://github.com/login/oauth"}.Render()
+ require.NoError(t, err)
+ for _, c := range []struct {
+ name string
+ extensions []pkix.Extension
+ errorFragment string
+ expected string
+ }{
+ {
+ name: "Missing issuer",
+ extensions: nil,
+ errorFragment: "Fulcio certificate is missing the issuer extension",
+ },
+ {
+ name: "Duplicate issuer v1 extension",
+ extensions: []pkix.Extension{
+ oidIssuerV1Ext("https://github.com/login/oauth"),
+ oidIssuerV1Ext("this does not match"),
+ },
+ // Match both our message and the Go 1.19 message: "certificate contains duplicate extensions"
+ errorFragment: "duplicate",
+ },
+ {
+ name: "Duplicate issuer v2 extension",
+ extensions: []pkix.Extension{
+ oidIssuerV2Ext(t, "https://github.com/login/oauth"),
+ oidIssuerV2Ext(t, "this does not match"),
+ },
+ // Match both our message and the Go 1.19 message: "certificate contains duplicate extensions"
+ errorFragment: "duplicate",
+ },
+ {
+ name: "Completely invalid issuer v2 extension - error parsing",
+ extensions: []pkix.Extension{
+ {
+ Id: certificate.OIDIssuerV2,
+ Value: asn1MarshalTest(t, 1, ""), // not a string type
+ },
+ },
+ errorFragment: "invalid ASN.1 in OIDC issuer v2 extension: asn1: structure error",
+ },
+ {
+ name: "Completely invalid issuer v2 extension - trailing data",
+ extensions: []pkix.Extension{
+ {
+ Id: certificate.OIDIssuerV2,
+ Value: append(slices.Clone(asn1MarshalTest(t, "https://", "utf8")), asn1MarshalTest(t, "example.com", "utf8")...),
+ },
+ },
+ errorFragment: "invalid ASN.1 in OIDC issuer v2 extension, trailing data",
+ },
+ {
+ name: "One valid issuer v1",
+ extensions: []pkix.Extension{oidIssuerV1Ext("https://github.com/login/oauth")},
+ expected: "https://github.com/login/oauth",
+ },
+ {
+ name: "One valid issuer v2",
+ extensions: []pkix.Extension{oidIssuerV2Ext(t, "https://github.com/login/oauth")},
+ expected: "https://github.com/login/oauth",
+ },
+ {
+ name: "Inconsistent issuer v1 and v2",
+ extensions: []pkix.Extension{
+ oidIssuerV1Ext("https://github.com/login/oauth"),
+ oidIssuerV2Ext(t, "this does not match"),
+ },
+ errorFragment: "inconsistent OIDC issuer extension values",
+ },
+ {
+ name: "Both issuer v1 and v2",
+ extensions: []pkix.Extension{
+ oidIssuerV1Ext("https://github.com/login/oauth"),
+ oidIssuerV2Ext(t, "https://github.com/login/oauth"),
+ },
+ expected: "https://github.com/login/oauth",
+ },
+ {
+ name: "Fulcio interoperability",
+ extensions: fulcioExtensions,
+ expected: "https://github.com/login/oauth",
+ },
+ } {
+ testLeafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err, c.name)
+ testLeafSN, err := cryptoutils.GenerateSerialNumber()
+ require.NoError(t, err, c.name)
+ testLeafContents := x509.Certificate{
+ SerialNumber: testLeafSN,
+ Subject: pkix.Name{CommonName: "leaf"},
+ NotBefore: referenceTime.Add(-1 * time.Minute),
+ NotAfter: referenceTime.Add(1 * time.Hour),
+ ExtraExtensions: c.extensions,
+ EmailAddresses: []string{"test-user@example.com"},
+ }
+ // To be fairly representative, we do generate and parse a _real_ certificate, but we just use a self-signed certificate instead
+ // of bothering with a CA.
+ testLeafCert, err := x509.CreateCertificate(rand.Reader, &testLeafContents, &testLeafContents, testLeafKey.Public(), testLeafKey)
+ require.NoError(t, err, c.name)
+ testLeafPEM := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: testLeafCert,
+ })
+
+ parsedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(testLeafPEM)
+ if err != nil {
+ require.NotEqual(t, "", c.errorFragment)
+ assert.ErrorContains(t, err, c.errorFragment, c.name)
+ } else {
+ require.Len(t, parsedLeafCerts, 1)
+ parsedLeafCert := parsedLeafCerts[0]
+
+ res, err := fulcioIssuerInCertificate(parsedLeafCert)
+ if c.errorFragment == "" {
+ require.NoError(t, err, c.name)
+ assert.Equal(t, c.expected, res)
+ } else {
+ assert.ErrorContains(t, err, c.errorFragment, c.name)
+ assert.Equal(t, "", res)
+ }
+ }
+ }
+}
+
+func TestFulcioTrustRootVerifyFulcioCertificateAtTime(t *testing.T) {
+ fulcioCACertificates := x509.NewCertPool()
+ fulcioCABundlePEM, err := os.ReadFile("fixtures/fulcio_v1.crt.pem")
+ require.NoError(t, err)
+ ok := fulcioCACertificates.AppendCertsFromPEM(fulcioCABundlePEM)
+ require.True(t, ok)
+ fulcioCertBytes, err := os.ReadFile("fixtures/fulcio-cert")
+ require.NoError(t, err)
+ fulcioChainBytes, err := os.ReadFile("fixtures/fulcio-chain")
+ require.NoError(t, err)
+
+ // A successful verification
+ tr := fulcioTrustRoot{
+ caCertificates: fulcioCACertificates,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "mitr@redhat.com",
+ }
+ pk, err := tr.verifyFulcioCertificateAtTime(time.Unix(1670870899, 0), fulcioCertBytes, fulcioChainBytes)
+ require.NoError(t, err)
+ assertPublicKeyMatchesCert(t, fulcioCertBytes, pk)
+
+ // Invalid intermediate certificates
+ pk, err = tr.verifyFulcioCertificateAtTime(time.Unix(1670870899, 0), fulcioCertBytes, []byte("not a certificate"))
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+
+ // No intermediate certificates: verification fails as is …
+ pk, err = tr.verifyFulcioCertificateAtTime(time.Unix(1670870899, 0), fulcioCertBytes, []byte{})
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+ // … but succeeds if we add the intermediate certificates to the root of trust
+ intermediateCertPool := x509.NewCertPool()
+ ok = intermediateCertPool.AppendCertsFromPEM(fulcioChainBytes)
+ require.True(t, ok)
+ trWithIntermediates := fulcioTrustRoot{
+ caCertificates: intermediateCertPool,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "mitr@redhat.com",
+ }
+ pk, err = trWithIntermediates.verifyFulcioCertificateAtTime(time.Unix(1670870899, 0), fulcioCertBytes, []byte{})
+ require.NoError(t, err)
+ assertPublicKeyMatchesCert(t, fulcioCertBytes, pk)
+
+ // Invalid leaf certificate
+ for _, c := range [][]byte{
+ []byte("not a certificate"),
+ {}, // Empty
+ bytes.Repeat(fulcioCertBytes, 2), // More than one certificate
+ } {
+ pk, err := tr.verifyFulcioCertificateAtTime(time.Unix(1670870899, 0), c, fulcioChainBytes)
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+ }
+
+ // Unexpected relevantTime
+ for _, tm := range []time.Time{
+ time.Date(2022, time.December, 12, 18, 48, 17, 0, time.UTC),
+ time.Date(2022, time.December, 12, 18, 58, 19, 0, time.UTC),
+ } {
+ pk, err := tr.verifyFulcioCertificateAtTime(tm, fulcioCertBytes, fulcioChainBytes)
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+ }
+
+ referenceTime := time.Now()
+ testCAKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ testCASN, err := cryptoutils.GenerateSerialNumber()
+ require.NoError(t, err)
+ testCAContents := x509.Certificate{
+ SerialNumber: testCASN,
+ Subject: pkix.Name{CommonName: "root CA"},
+ NotBefore: referenceTime.Add(-1 * time.Minute),
+ NotAfter: referenceTime.Add(1 * time.Hour),
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+ testCACertBytes, err := x509.CreateCertificate(rand.Reader, &testCAContents, &testCAContents,
+ testCAKey.Public(), testCAKey)
+ require.NoError(t, err)
+ testCACert, err := x509.ParseCertificate(testCACertBytes)
+ require.NoError(t, err)
+ testCACertPool := x509.NewCertPool()
+ testCACertPool.AddCert(testCACert)
+
+ for _, c := range []struct {
+ name string
+ fn func(cert *x509.Certificate)
+ errorFragment string
+ }{
+ {
+ // OtherName SAN element, with none of the Go-parsed SAN elements present,
+ // should not be a reason to reject the certificate entirely;
+ // but we don’t actually support matching it, so this basically tests that the code
+ // gets far enough to do subject matching.
+ name: "OtherName in SAN",
+ fn: func(cert *x509.Certificate) {
+ // Setting SAN in ExtraExtensions causes EmailAddresses to be ignored,
+ // so we need to construct the whole SAN manually.
+ sansBytes, err := asn1.Marshal([]asn1.RawValue{
+ {
+ Class: 2,
+ Tag: 0,
+ IsCompound: false,
+ Bytes: []byte("otherName"),
+ },
+ })
+ require.NoError(t, err)
+ cert.ExtraExtensions = append(cert.ExtraExtensions, pkix.Extension{
+ Id: cryptoutils.SANOID,
+ Critical: true,
+ Value: sansBytes,
+ })
+ },
+ errorFragment: "Required email test-user@example.com not found",
+ },
+ { // Other completely unrecognized critical extensions still cause failures
+ name: "Unhandled critical extension",
+ fn: func(cert *x509.Certificate) {
+ cert.ExtraExtensions = append(cert.ExtraExtensions, pkix.Extension{
+ Id: asn1.ObjectIdentifier{2, 99999, 99998, 99997, 99996},
+ Critical: true,
+ Value: []byte("whatever"),
+ })
+ },
+ errorFragment: "unhandled critical extension",
+ },
+ {
+ name: "Missing issuer",
+ fn: func(cert *x509.Certificate) {
+ cert.ExtraExtensions = nil // Remove the issuer extension
+ },
+ errorFragment: "Fulcio certificate is missing the issuer extension",
+ },
+ {
+ name: "Duplicate issuer extension",
+ fn: func(cert *x509.Certificate) {
+ cert.ExtraExtensions = append([]pkix.Extension{oidIssuerV1Ext("this does not match")}, cert.ExtraExtensions...)
+ },
+ // Match both our message and the Go 1.19 message: "certificate contains duplicate extensions"
+ errorFragment: "duplicate",
+ },
+ {
+ name: "Issuer mismatch",
+ fn: func(cert *x509.Certificate) {
+ cert.ExtraExtensions = []pkix.Extension{oidIssuerV1Ext("this does not match")}
+ },
+ errorFragment: "Unexpected Fulcio OIDC issuer",
+ },
+ {
+ name: "Missing subject email",
+ fn: func(cert *x509.Certificate) {
+ cert.EmailAddresses = nil
+ },
+ errorFragment: "Required email test-user@example.com not found",
+ },
+ {
+ name: "Multiple emails, one matches",
+ fn: func(cert *x509.Certificate) {
+ cert.EmailAddresses = []string{"a@example.com", "test-user@example.com", "c@example.com"}
+ },
+ errorFragment: "",
+ },
+ {
+ name: "Email mismatch",
+ fn: func(cert *x509.Certificate) {
+ cert.EmailAddresses = []string{"a@example.com"}
+ },
+ errorFragment: "Required email test-user@example.com not found",
+ },
+ {
+ name: "Multiple emails, no matches",
+ fn: func(cert *x509.Certificate) {
+ cert.EmailAddresses = []string{"a@example.com", "b@example.com", "c@example.com"}
+ },
+ errorFragment: "Required email test-user@example.com not found",
+ },
+ } {
+ testLeafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err, c.name)
+ testLeafSN, err := cryptoutils.GenerateSerialNumber()
+ require.NoError(t, err, c.name)
+ testLeafContents := x509.Certificate{
+ SerialNumber: testLeafSN,
+ Subject: pkix.Name{CommonName: "leaf"},
+ NotBefore: referenceTime.Add(-1 * time.Minute),
+ NotAfter: referenceTime.Add(1 * time.Hour),
+ ExtraExtensions: []pkix.Extension{oidIssuerV1Ext("https://github.com/login/oauth")},
+ EmailAddresses: []string{"test-user@example.com"},
+ }
+ c.fn(&testLeafContents)
+ testLeafCert, err := x509.CreateCertificate(rand.Reader, &testLeafContents, testCACert, testLeafKey.Public(), testCAKey)
+ require.NoError(t, err, c.name)
+ tr := fulcioTrustRoot{
+ caCertificates: testCACertPool,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "test-user@example.com",
+ }
+ testLeafPEM := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: testLeafCert,
+ })
+ pk, err := tr.verifyFulcioCertificateAtTime(referenceTime, testLeafPEM, []byte{})
+ if c.errorFragment == "" {
+ require.NoError(t, err, c.name)
+ assertPublicKeyMatchesCert(t, testLeafPEM, pk)
+ } else {
+ assert.ErrorContains(t, err, c.errorFragment, c.name)
+ assert.Nil(t, pk, c.name)
+ }
+ }
+}
+
+func TestVerifyRekorFulcio(t *testing.T) {
+ caCertificates := x509.NewCertPool()
+ fulcioCABundlePEM, err := os.ReadFile("fixtures/fulcio_v1.crt.pem")
+ require.NoError(t, err)
+ ok := caCertificates.AppendCertsFromPEM(fulcioCABundlePEM)
+ require.True(t, ok)
+ certBytes, err := os.ReadFile("fixtures/fulcio-cert")
+ require.NoError(t, err)
+ chainBytes, err := os.ReadFile("fixtures/fulcio-chain")
+ require.NoError(t, err)
+ rekorKeyPEM, err := os.ReadFile("fixtures/rekor.pub")
+ require.NoError(t, err)
+ rekorKey, err := cryptoutils.UnmarshalPEMToPublicKey(rekorKeyPEM)
+ require.NoError(t, err)
+ rekorKeyECDSA, ok := rekorKey.(*ecdsa.PublicKey)
+ require.True(t, ok)
+ setBytes, err := os.ReadFile("fixtures/rekor-set")
+ require.NoError(t, err)
+ sigBase64, err := os.ReadFile("fixtures/rekor-sig")
+ require.NoError(t, err)
+ payloadBytes, err := os.ReadFile("fixtures/rekor-payload")
+ require.NoError(t, err)
+
+ // Success
+ pk, err := verifyRekorFulcio(rekorKeyECDSA, &fulcioTrustRoot{
+ caCertificates: caCertificates,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "mitr@redhat.com",
+ }, setBytes, certBytes, chainBytes, string(sigBase64), payloadBytes)
+ require.NoError(t, err)
+ assertPublicKeyMatchesCert(t, certBytes, pk)
+
+ // Rekor failure
+ pk, err = verifyRekorFulcio(rekorKeyECDSA, &fulcioTrustRoot{
+ caCertificates: caCertificates,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "mitr@redhat.com",
+ }, setBytes, certBytes, chainBytes, string(sigBase64), []byte("this payload does not match"))
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+
+ // Fulcio failure
+ pk, err = verifyRekorFulcio(rekorKeyECDSA, &fulcioTrustRoot{
+ caCertificates: caCertificates,
+ oidcIssuer: "https://github.com/login/oauth",
+ subjectEmail: "this-does-not-match@example.com",
+ }, setBytes, certBytes, chainBytes, string(sigBase64), payloadBytes)
+ assert.Error(t, err)
+ assert.Nil(t, pk)
+}
diff --git a/signature/internal/errors.go b/signature/internal/errors.go
new file mode 100644
index 0000000..7872f0f
--- /dev/null
+++ b/signature/internal/errors.go
@@ -0,0 +1,15 @@
+package internal
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+// This is publicly visible as signature.InvalidSignatureError
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+func NewInvalidSignatureError(msg string) InvalidSignatureError {
+ return InvalidSignatureError{msg: msg}
+}
diff --git a/signature/internal/errors_test.go b/signature/internal/errors_test.go
new file mode 100644
index 0000000..ee243d3
--- /dev/null
+++ b/signature/internal/errors_test.go
@@ -0,0 +1,14 @@
+package internal
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestInvalidSignatureError(t *testing.T) {
+ // A stupid test just to keep code coverage
+ s := "test"
+ err := NewInvalidSignatureError(s)
+ assert.Equal(t, s, err.Error())
+}
diff --git a/signature/internal/fixtures_info_test.go b/signature/internal/fixtures_info_test.go
new file mode 100644
index 0000000..8da249d
--- /dev/null
+++ b/signature/internal/fixtures_info_test.go
@@ -0,0 +1,15 @@
+package internal
+
+import "github.com/opencontainers/go-digest"
+
+const (
+ // TestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
+ TestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
+ // TestImageSignatureReference is the Docker image reference signed in "image.signature"
+ TestImageSignatureReference = "testing/manifest"
+
+ // TestSigstoreManifestDigest is the manifest digest of "valid.signature"
+ TestSigstoreManifestDigest = digest.Digest("sha256:634a8f35b5f16dcf4aaa0822adc0b1964bb786fca12f6831de8ddc45e5986a00")
+ // TestSigstoreSignatureReference is the Docker reference signed in "valid.signature"
+ TestSigstoreSignatureReference = "192.168.64.2:5000/cosign-signed-single-sample"
+)
diff --git a/signature/internal/json.go b/signature/internal/json.go
new file mode 100644
index 0000000..a9d127e
--- /dev/null
+++ b/signature/internal/json.go
@@ -0,0 +1,90 @@
+package internal
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/set"
+)
+
+// JSONFormatError is returned when JSON does not match expected format.
+type JSONFormatError string
+
+func (err JSONFormatError) Error() string {
+ return string(err)
+}
+
+// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
+// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
+//
+// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
+// we could use reflection to automate this. Later?
+func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
+ seenKeys := set.New[string]()
+
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return JSONFormatError(err.Error())
+ }
+ if t != json.Delim('{') {
+ return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
+ }
+ for {
+ t, err := dec.Token()
+ if err != nil {
+ return JSONFormatError(err.Error())
+ }
+ if t == json.Delim('}') {
+ break
+ }
+
+ key, ok := t.(string)
+ if !ok {
+ // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
+ return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
+ }
+ if seenKeys.Contains(key) {
+ return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
+ }
+ seenKeys.Add(key)
+
+ valuePtr := fieldResolver(key)
+ if valuePtr == nil {
+ return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
+ }
+ // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
+ if err := dec.Decode(valuePtr); err != nil {
+ return JSONFormatError(err.Error())
+ }
+ }
+ if _, err := dec.Token(); err != io.EOF {
+ return JSONFormatError("Unexpected data after JSON object")
+ }
+ return nil
+}
+
+// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
+// must be present exactly once, and none other fields are accepted.
+func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
+ seenKeys := set.New[string]()
+ if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
+ if valuePtr, ok := exactFields[key]; ok {
+ seenKeys.Add(key)
+ return valuePtr
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ for key := range exactFields {
+ if !seenKeys.Contains(key) {
+ return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
+ }
+ }
+ return nil
+}
diff --git a/signature/internal/json_test.go b/signature/internal/json_test.go
new file mode 100644
index 0000000..f3a6ff0
--- /dev/null
+++ b/signature/internal/json_test.go
@@ -0,0 +1,138 @@
+package internal
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mSA map[string]any // To minimize typing the long name
+
+// implementsUnmarshalJSON is a minimalistic type used to detect that
+// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
+// pointers.
+type implementsUnmarshalJSON bool
+
+// Compile-time check that Policy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*implementsUnmarshalJSON)(nil)
+
+func (dest *implementsUnmarshalJSON) UnmarshalJSON(data []byte) error {
+ _ = data // We don't care, not really.
+ *dest = true // Mark handler as called
+ return nil
+}
+
+func TestParanoidUnmarshalJSONObject(t *testing.T) {
+ type testStruct struct {
+ A string
+ B int
+ }
+ ts := testStruct{}
+ var unmarshalJSONCalled implementsUnmarshalJSON
+ tsResolver := func(key string) any {
+ switch key {
+ case "a":
+ return &ts.A
+ case "b":
+ return &ts.B
+ case "implementsUnmarshalJSON":
+ return &unmarshalJSONCalled
+ default:
+ return nil
+ }
+ }
+
+ // Empty object
+ ts = testStruct{}
+ err := ParanoidUnmarshalJSONObject([]byte(`{}`), tsResolver)
+ require.NoError(t, err)
+ assert.Equal(t, testStruct{}, ts)
+
+ // Success
+ ts = testStruct{}
+ err = ParanoidUnmarshalJSONObject([]byte(`{"a":"x", "b":2}`), tsResolver)
+ require.NoError(t, err)
+ assert.Equal(t, testStruct{A: "x", B: 2}, ts)
+
+ // json.Unmarshaler is used for decoding values
+ ts = testStruct{}
+ unmarshalJSONCalled = implementsUnmarshalJSON(false)
+ err = ParanoidUnmarshalJSONObject([]byte(`{"implementsUnmarshalJSON":true}`), tsResolver)
+ require.NoError(t, err)
+ assert.Equal(t, unmarshalJSONCalled, implementsUnmarshalJSON(true))
+
+ // Various kinds of invalid input
+ for _, input := range []string{
+ ``, // Empty input
+ `&`, // Entirely invalid JSON
+ `1`, // Not an object
+ `{&}`, // Invalid key JSON
+ `{1:1}`, // Key not a string
+ `{"b":1, "b":1}`, // Duplicate key
+ `{"thisdoesnotexist":1}`, // Key rejected by resolver
+ `{"a":&}`, // Invalid value JSON
+ `{"a":1}`, // Type mismatch
+ `{"a":"value"}{}`, // Extra data after object
+ } {
+ ts = testStruct{}
+ err := ParanoidUnmarshalJSONObject([]byte(input), tsResolver)
+ assert.Error(t, err, input)
+ }
+}
+
+func TestParanoidUnmarshalJSONObjectExactFields(t *testing.T) {
+ var stringValue string
+ var float64Value float64
+ var rawValue json.RawMessage
+ var unmarshallCalled implementsUnmarshalJSON
+ exactFields := map[string]any{
+ "string": &stringValue,
+ "float64": &float64Value,
+ "raw": &rawValue,
+ "unmarshaller": &unmarshallCalled,
+ }
+
+ // Empty object
+ err := ParanoidUnmarshalJSONObjectExactFields([]byte(`{}`), map[string]any{})
+ require.NoError(t, err)
+
+ // Success
+ err = ParanoidUnmarshalJSONObjectExactFields([]byte(`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`), exactFields)
+ require.NoError(t, err)
+ assert.Equal(t, "a", stringValue)
+ assert.Equal(t, 3.5, float64Value)
+ assert.Equal(t, json.RawMessage(`{"a":"b"}`), rawValue)
+ assert.Equal(t, implementsUnmarshalJSON(true), unmarshallCalled)
+
+ // Various kinds of invalid input
+ for _, input := range []string{
+ ``, // Empty input
+ `&`, // Entirely invalid JSON
+ `1`, // Not an object
+ `{&}`, // Invalid key JSON
+ `{1:1}`, // Key not a string
+ `{"string": "a", "string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Duplicate key
+ `{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true, "thisisunknown", 1}`, // Unknown key
+ `{"string": &, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Invalid value JSON
+ `{"string": 1, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Type mismatch
+ `{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}{}`, // Extra data after object
+ } {
+ err := ParanoidUnmarshalJSONObjectExactFields([]byte(input), exactFields)
+ assert.Error(t, err, input)
+ }
+}
+
+// Return the result of modifying validJSON with fn
+func modifiedJSON(t *testing.T, validJSON []byte, modifyFn func(mSA)) []byte {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ modifyFn(tmp)
+
+ modifiedJSON, err := json.Marshal(tmp)
+ require.NoError(t, err)
+ return modifiedJSON
+}
diff --git a/signature/internal/rekor_set.go b/signature/internal/rekor_set.go
new file mode 100644
index 0000000..d439b5f
--- /dev/null
+++ b/signature/internal/rekor_set.go
@@ -0,0 +1,237 @@
+package internal
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "time"
+
+ "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
+ "github.com/sigstore/rekor/pkg/generated/models"
+)
+
+// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema.
+// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies.
+const HashedRekordV001APIVersion = "0.0.1"
+
+// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET
+// (note that this a signature-specific format, not a format directly used by the Rekor API).
+// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder.
+type UntrustedRekorSET struct {
+ UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload
+ UntrustedPayload json.RawMessage
+}
+
+type UntrustedRekorPayload struct {
+ Body []byte // In cosign, this is an any, but only a string works
+ IntegratedTime int64
+ LogIndex int64
+ LogID string
+}
+
+// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
+ return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
+ "Payload": &s.UntrustedPayload,
+ })
+}
+
+// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler
+var _ json.Marshaler = UntrustedRekorSET{}
+var _ json.Marshaler = (*UntrustedRekorSET)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]any{
+ "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
+ "Payload": s.UntrustedPayload,
+ })
+}
+
+// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
+ err := p.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
+ return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "body": &p.Body,
+ "integratedTime": &p.IntegratedTime,
+ "logIndex": &p.LogIndex,
+ "logID": &p.LogID,
+ })
+}
+
+// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler
+var _ json.Marshaler = UntrustedRekorPayload{}
+var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]any{
+ "body": p.Body,
+ "integratedTime": p.IntegratedTime,
+ "logIndex": p.LogIndex,
+ "logID": p.LogID,
+ })
+}
+
+// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
+// Returns bundle upload time on success.
+func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
+ // FIXME: Should the publicKey parameter hard-code ecdsa?
+
+ // == Parse SET bytes
+ var untrustedSET UntrustedRekorSET
+ // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature...
+ if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil {
+ return time.Time{}, NewInvalidSignatureError(err.Error())
+ }
+ // == Verify SET signature
+ // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary,
+ // assuming jsoncanonicalizer is designed to operate on untrusted data.
+ untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
+ }
+ untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
+ if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
+ return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
+ }
+
+ // == Parse SET payload
+ // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation,
+ // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker
+ // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing
+ // of the SET payload.
+ var rekorPayload UntrustedRekorPayload
+ if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error()))
+ }
+ // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal
+ // hashedRekor.Spec and so on.
+ // Especially if we anticipate needing to decode different data formats…
+ // That would also allow being much more strict about JSON.
+ //
+ // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place.
+ var hashedRekord models.Hashedrekord
+ if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err))
+ }
+ // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us.
+ if hashedRekord.APIVersion == nil {
+ return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version")
+ }
+ if *hashedRekord.APIVersion != HashedRekordV001APIVersion {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion))
+ }
+ hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
+ if err != nil {
+ // Coverage: hashedRekord.Spec is an any that was just unmarshaled,
+ // so this should never fail.
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
+ }
+ var hashedRekordV001 models.HashedrekordV001Schema
+ if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err))
+ }
+
+ // == Match unverifiedKeyOrCertBytes
+ if hashedRekordV001.Signature == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`)
+ }
+ if hashedRekordV001.Signature.PublicKey == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`)
+
+ }
+ rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content)
+ if rekorKeyOrCertPEM == nil {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format")
+ }
+ if len(rest) != 0 {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data")
+ }
+ // FIXME: For public keys, let the caller provide the DER-formatted blob instead
+ // of round-tripping through PEM.
+ unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes)
+ if unverifiedKeyOrCertPEM == nil {
+ return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format")
+ }
+ if len(rest) != 0 {
+ return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data")
+ }
+ // NOTE: This compares the PEM payload, but not the object type or headers.
+ if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) {
+ return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match")
+ }
+ // == Match unverifiedSignatureBytes
+ unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err))
+ }
+ if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v",
+ string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes)))
+ }
+
+ // == Match unverifiedPayloadBytes
+ if hashedRekordV001.Data == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`)
+ }
+ if hashedRekordV001.Data.Hash == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`)
+ }
+ if hashedRekordV001.Data.Hash.Algorithm == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
+ }
+ if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm))
+ }
+ if hashedRekordV001.Data.Hash.Value == nil {
+ return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`)
+ }
+ rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value)
+ if err != nil {
+ return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err))
+
+ }
+ unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes)
+ if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) {
+ return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match")
+ }
+
+ // == All OK; return the relevant time.
+ return time.Unix(rekorPayload.IntegratedTime, 0), nil
+}
diff --git a/signature/internal/rekor_set_test.go b/signature/internal/rekor_set_test.go
new file mode 100644
index 0000000..0cc8483
--- /dev/null
+++ b/signature/internal/rekor_set_test.go
@@ -0,0 +1,400 @@
+package internal
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Verify that input can be unmarshaled as an UntrustedRekorSET.
+func successfullyUnmarshalUntrustedRekorSET(t *testing.T, input []byte) UntrustedRekorSET {
+ var s UntrustedRekorSET
+ err := json.Unmarshal(input, &s)
+ require.NoError(t, err, string(input))
+
+ return s
+}
+
+// Verify that input can't be unmarshaled as an UntrustedRekorSET.
+func assertUnmarshalUntrustedRekorSETFails(t *testing.T, input []byte) {
+ var s UntrustedRekorSET
+ err := json.Unmarshal(input, &s)
+ assert.Error(t, err, string(input))
+}
+
+func TestUntrustedRekorSETUnmarshalJSON(t *testing.T) {
+ // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
+ // UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
+ assertUnmarshalUntrustedRekorSETFails(t, []byte("&"))
+ var s UntrustedRekorSET
+ err := s.UnmarshalJSON([]byte("&"))
+ assert.Error(t, err)
+
+ // Not an object
+ assertUnmarshalUntrustedRekorSETFails(t, []byte("1"))
+
+ // Start with a valid JSON.
+ validSET := UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: []byte("signedTimestamp#@!"),
+ UntrustedPayload: json.RawMessage(`["payload#@!"]`),
+ }
+ validJSON, err := json.Marshal(validSET)
+ require.NoError(t, err)
+
+ // Success
+ s = successfullyUnmarshalUntrustedRekorSET(t, validJSON)
+ assert.Equal(t, validSET, s)
+
+ // A /usr/bin/cosign-generated payload is handled correctly
+ setBytes, err := os.ReadFile("testdata/rekor-set")
+ require.NoError(t, err)
+ s = successfullyUnmarshalUntrustedRekorSET(t, setBytes)
+ expectedSET, err := base64.StdEncoding.DecodeString(`MEYCIQDdeujdGLpMTgFdew9wsSJ3WF7olX9PawgzGeX2RmJd8QIhAPxGJf+HjUFVpQc0hgPaUSK8LsONJ08fZFEBVKDeLj4S`)
+ require.NoError(t, err)
+ assert.Equal(t, UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: expectedSET,
+ UntrustedPayload: []byte(`{"body":"eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiIwZmQxZTk4MzJjYzVhNWY1MDJlODAwZmU5Y2RlZWZiZDMxMzYyZGYxNmZlOGMyMjUwZDMwOGFlYTNmYjFmYzY5In19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FUUNJRUpjOTZlMDQxVkFoS0EwM1N2ZkNZYldvZElNSVFQeUF0V3lEUDRGblBxcEFpQWFJUzYwRWpoUkRoU2Fub0Zzb0l5OGZLcXFLZVc1cHMvdExYU0dwYXlpMmc9PSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTnVWRU5EUVdsUFowRjNTVUpCWjBsVlJ6UTFkV0ZETW5vNFZuWjFUM2Q2ZW0wM09WSlFabU5yYjNoM2QwTm5XVWxMYjFwSmVtb3dSVUYzVFhjS1RucEZWazFDVFVkQk1WVkZRMmhOVFdNeWJHNWpNMUoyWTIxVmRWcEhWakpOVWpSM1NFRlpSRlpSVVVSRmVGWjZZVmRrZW1SSE9YbGFVekZ3WW01U2JBcGpiVEZzV2tkc2FHUkhWWGRJYUdOT1RXcEplRTFxUlhsTlZHY3dUMFJGTkZkb1kwNU5ha2w0VFdwRmVVMVVaekZQUkVVMFYycEJRVTFHYTNkRmQxbElDa3R2V2tsNmFqQkRRVkZaU1V0dldrbDZhakJFUVZGalJGRm5RVVZLUW5WeWEyaFRVbTFJZFd0SVZtZ3pWa0U0YmxsMVUxZHhXalJzZEdGbVJIZDVWMndLWXpOak9VNWhURzkyYlhVclRrTTBUbWxWZEUxTWFXWk1MMUF6Ym5GbFpHSnVZM1JMUW5WWmJXWkpVMGRwV214V2VVdFBRMEZWU1hkblowVXJUVUUwUndwQk1WVmtSSGRGUWk5M1VVVkJkMGxJWjBSQlZFSm5UbFpJVTFWRlJFUkJTMEpuWjNKQ1owVkdRbEZqUkVGNlFXUkNaMDVXU0ZFMFJVWm5VVlZCYlhaSENqUkxObXhPYXk5emF5OW1OR0ZwWVdocVdWSnhVaXRqZDBoM1dVUldVakJxUWtKbmQwWnZRVlV6T1ZCd2VqRlphMFZhWWpWeFRtcHdTMFpYYVhocE5Ga0tXa1E0ZDBoUldVUldVakJTUVZGSUwwSkNUWGRGV1VWUVlsZHNNR05yUW5sYVYxSnZXVmhSZFZreU9YUk5RM2RIUTJselIwRlJVVUpuTnpoM1FWRkZSUXBJYldnd1pFaENlazlwT0haYU1td3dZVWhXYVV4dFRuWmlVemx6WWpKa2NHSnBPWFpaV0ZZd1lVUkRRbWxSV1V0TGQxbENRa0ZJVjJWUlNVVkJaMUkzQ2tKSWEwRmtkMEl4UVU0d09VMUhja2Q0ZUVWNVdYaHJaVWhLYkc1T2QwdHBVMncyTkROcWVYUXZOR1ZMWTI5QmRrdGxOazlCUVVGQ2FGRmxjV3gzVlVFS1FVRlJSRUZGV1hkU1FVbG5WbWt5VTNaT05WSmxSMWxwVDFOb1dUaE1SbE5TUnpWRU5tOUJWRXR4U0dJMmEwNHZSRXBvTW5KQlZVTkpRVTVUTmtGeGNBcDRZVmhwU0hkVVNGVnlNM2hRVTBkaE5XazJhSGwzYldKaVVrTTJUakJyU1dWRVRUWk5RVzlIUTBOeFIxTk5ORGxDUVUxRVFUSm5RVTFIVlVOTlEweDFDbU5hZEVWVFNVNHdiRzAyTkVOdkwySmFOamhEUTFKclYyeHJkRmcwYlcxS2FWSm9TMms1WXpsUlJEWXlRelZUZFZwb1l6QjJkbTgyVFU5TGJWUlJTWGdLUVVsdkwwMXZlbHBsYjFVM2NtUk9hakJ3V2t0MVFtVkRiVTF4YlVwaFJGTnpkekU1ZEV0cEwySXhjRVZ0ZFhjclUyWXlRa2t5TlVkblNXSkxlblJITVFvNWR6MDlDaTB0TFMwdFJVNUVJRU5GVWxSSlJrbERRVlJGTFMwdExTMEsifX19fQ==","integratedTime":1670870899,"logIndex":8949589,"logID":"c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d"}`),
+ }, s)
+
+ // Various ways to corrupt the JSON
+ breakFns := []func(mSA){
+ // A top-level field is missing
+ func(v mSA) { delete(v, "SignedEntryTimestamp") },
+ func(v mSA) { delete(v, "Payload") },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // "SignedEntryTimestamp" not a string
+ func(v mSA) { v["critical"] = 1 },
+ // "Payload" not an object
+ func(v mSA) { v["optional"] = 1 },
+ }
+ for _, fn := range breakFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ assertUnmarshalUntrustedRekorSETFails(t, testJSON)
+ }
+}
+
+// Verify that input can be unmarshaled as an UntrustedRekorPayload.
+func successfullyUnmarshalUntrustedRekorPayload(t *testing.T, input []byte) UntrustedRekorPayload {
+ var s UntrustedRekorPayload
+ err := json.Unmarshal(input, &s)
+ require.NoError(t, err, string(input))
+
+ return s
+}
+
+// Verify that input can't be unmarshaled as an UntrustedRekorPayload.
+func assertUnmarshalUntrustedRekorPayloadFails(t *testing.T, input []byte) {
+ var s UntrustedRekorPayload
+ err := json.Unmarshal(input, &s)
+ assert.Error(t, err, string(input))
+}
+
+func TestUntrustedRekorPayloadUnmarshalJSON(t *testing.T) {
+ // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
+ // UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
+ assertUnmarshalUntrustedRekorPayloadFails(t, []byte("&"))
+ var p UntrustedRekorPayload
+ err := p.UnmarshalJSON([]byte("&"))
+ assert.Error(t, err)
+
+ // Not an object
+ assertUnmarshalUntrustedRekorPayloadFails(t, []byte("1"))
+
+ // Start with a valid JSON.
+ validPayload := UntrustedRekorPayload{
+ Body: []byte(`["json"]`),
+ IntegratedTime: 1,
+ LogIndex: 2,
+ LogID: "abc",
+ }
+ validJSON, err := validPayload.MarshalJSON()
+ require.NoError(t, err)
+
+ // Success
+ p = successfullyUnmarshalUntrustedRekorPayload(t, validJSON)
+ assert.Equal(t, validPayload, p)
+
+ // A /usr/bin/cosign-generated payload is handled correctly
+ setBytes, err := os.ReadFile("testdata/rekor-set")
+ require.NoError(t, err)
+ s := successfullyUnmarshalUntrustedRekorSET(t, setBytes)
+ p = successfullyUnmarshalUntrustedRekorPayload(t, s.UntrustedPayload)
+ expectedBody, err := base64.StdEncoding.DecodeString(`eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiIwZmQxZTk4MzJjYzVhNWY1MDJlODAwZmU5Y2RlZWZiZDMxMzYyZGYxNmZlOGMyMjUwZDMwOGFlYTNmYjFmYzY5In19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FUUNJRUpjOTZlMDQxVkFoS0EwM1N2ZkNZYldvZElNSVFQeUF0V3lEUDRGblBxcEFpQWFJUzYwRWpoUkRoU2Fub0Zzb0l5OGZLcXFLZVc1cHMvdExYU0dwYXlpMmc9PSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTnVWRU5EUVdsUFowRjNTVUpCWjBsVlJ6UTFkV0ZETW5vNFZuWjFUM2Q2ZW0wM09WSlFabU5yYjNoM2QwTm5XVWxMYjFwSmVtb3dSVUYzVFhjS1RucEZWazFDVFVkQk1WVkZRMmhOVFdNeWJHNWpNMUoyWTIxVmRWcEhWakpOVWpSM1NFRlpSRlpSVVVSRmVGWjZZVmRrZW1SSE9YbGFVekZ3WW01U2JBcGpiVEZzV2tkc2FHUkhWWGRJYUdOT1RXcEplRTFxUlhsTlZHY3dUMFJGTkZkb1kwNU5ha2w0VFdwRmVVMVVaekZQUkVVMFYycEJRVTFHYTNkRmQxbElDa3R2V2tsNmFqQkRRVkZaU1V0dldrbDZhakJFUVZGalJGRm5RVVZLUW5WeWEyaFRVbTFJZFd0SVZtZ3pWa0U0YmxsMVUxZHhXalJzZEdGbVJIZDVWMndLWXpOak9VNWhURzkyYlhVclRrTTBUbWxWZEUxTWFXWk1MMUF6Ym5GbFpHSnVZM1JMUW5WWmJXWkpVMGRwV214V2VVdFBRMEZWU1hkblowVXJUVUUwUndwQk1WVmtSSGRGUWk5M1VVVkJkMGxJWjBSQlZFSm5UbFpJVTFWRlJFUkJTMEpuWjNKQ1owVkdRbEZqUkVGNlFXUkNaMDVXU0ZFMFJVWm5VVlZCYlhaSENqUkxObXhPYXk5emF5OW1OR0ZwWVdocVdWSnhVaXRqZDBoM1dVUldVakJxUWtKbmQwWnZRVlV6T1ZCd2VqRlphMFZhWWpWeFRtcHdTMFpYYVhocE5Ga0tXa1E0ZDBoUldVUldVakJTUVZGSUwwSkNUWGRGV1VWUVlsZHNNR05yUW5sYVYxSnZXVmhSZFZreU9YUk5RM2RIUTJselIwRlJVVUpuTnpoM1FWRkZSUXBJYldnd1pFaENlazlwT0haYU1td3dZVWhXYVV4dFRuWmlVemx6WWpKa2NHSnBPWFpaV0ZZd1lVUkRRbWxSV1V0TGQxbENRa0ZJVjJWUlNVVkJaMUkzQ2tKSWEwRmtkMEl4UVU0d09VMUhja2Q0ZUVWNVdYaHJaVWhLYkc1T2QwdHBVMncyTkROcWVYUXZOR1ZMWTI5QmRrdGxOazlCUVVGQ2FGRmxjV3gzVlVFS1FVRlJSRUZGV1hkU1FVbG5WbWt5VTNaT05WSmxSMWxwVDFOb1dUaE1SbE5TUnpWRU5tOUJWRXR4U0dJMmEwNHZSRXBvTW5KQlZVTkpRVTVUTmtGeGNBcDRZVmhwU0hkVVNGVnlNM2hRVTBkaE5XazJhSGwzYldKaVVrTTJUakJyU1dWRVRUWk5RVzlIUTBOeFIxTk5ORGxDUVUxRVFUSm5RVTFIVlVOTlEweDFDbU5hZEVWVFNVNHdiRzAyTkVOdkwySmFOamhEUTFKclYyeHJkRmcwYlcxS2FWSm9TMms1WXpsUlJEWXlRelZUZFZwb1l6QjJkbTgyVFU5TGJWUlJTWGdLUVVsdkwwMXZlbHBsYjFVM2NtUk9hakJ3V2t0MVFtVkRiVTF4YlVwaFJGTnpkekU1ZEV0cEwySXhjRVZ0ZFhjclUyWXlRa2t5TlVkblNXSkxlblJITVFvNWR6MDlDaTB0TFMwdFJVNUVJRU5GVWxSSlJrbERRVlJGTFMwdExTMEsifX19fQ==`)
+ require.NoError(t, err)
+ assert.Equal(t, UntrustedRekorPayload{
+ Body: expectedBody,
+ IntegratedTime: 1670870899,
+ LogIndex: 8949589,
+ LogID: "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d",
+ }, p)
+
+ // Various ways to corrupt the JSON
+ breakFns := []func(mSA){
+ // A top-level field is missing
+ func(v mSA) { delete(v, "body") },
+ func(v mSA) { delete(v, "integratedTime") },
+ func(v mSA) { delete(v, "logIndex") },
+ func(v mSA) { delete(v, "logID") },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // "body" not a string
+ func(v mSA) { v["body"] = 1 },
+ // "integratedTime" not an integer
+ func(v mSA) { v["integratedTime"] = "hello" },
+ // "logIndex" not an integer
+ func(v mSA) { v["logIndex"] = "hello" },
+ // "logID" not a string
+ func(v mSA) { v["logID"] = 1 },
+ }
+ for _, fn := range breakFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ assertUnmarshalUntrustedRekorPayloadFails(t, testJSON)
+ }
+}
+
+func TestVerifyRekorSET(t *testing.T) {
+ cosignRekorKeyPEM, err := os.ReadFile("testdata/rekor.pub")
+ require.NoError(t, err)
+ cosignRekorKey, err := cryptoutils.UnmarshalPEMToPublicKey(cosignRekorKeyPEM)
+ require.NoError(t, err)
+ cosignRekorKeyECDSA, ok := cosignRekorKey.(*ecdsa.PublicKey)
+ require.True(t, ok)
+ cosignSETBytes, err := os.ReadFile("testdata/rekor-set")
+ require.NoError(t, err)
+ cosignCertBytes, err := os.ReadFile("testdata/rekor-cert")
+ require.NoError(t, err)
+ cosignSigBase64, err := os.ReadFile("testdata/rekor-sig")
+ require.NoError(t, err)
+ cosignPayloadBytes, err := os.ReadFile("testdata/rekor-payload")
+ require.NoError(t, err)
+
+ // Successful verification
+ tm, err := VerifyRekorSET(cosignRekorKeyECDSA, cosignSETBytes, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ require.NoError(t, err)
+ assert.Equal(t, time.Unix(1670870899, 0), tm)
+
+ // For extra paranoia, test that we return a zero time on error.
+
+ // A completely invalid SET.
+ tm, err = VerifyRekorSET(cosignRekorKeyECDSA, []byte{}, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ tm, err = VerifyRekorSET(cosignRekorKeyECDSA, []byte("invalid signature"), cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ testSigner, err := sigstoreSignature.LoadECDSASigner(testKey, crypto.SHA256)
+ require.NoError(t, err)
+
+ // JSON canonicalization fails:
+ // This payload is invalid because it has duplicate fields.
+ // Right now, that particular failure (unlike more blatantly invalid JSON) is allowed
+ // by json.Marshal, but detected by jsoncanonicalizer.Transform.
+ invalidPayload := []byte(`{"logIndex":1, "integratedTime":2,"body":"abc","logID":"def","body":"ABC"}`)
+ invalidPayloadSig, err := testSigner.SignMessage(bytes.NewReader(invalidPayload))
+ require.NoError(t, err)
+ invalidSET, err := json.Marshal(UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: invalidPayloadSig,
+ UntrustedPayload: json.RawMessage(invalidPayload),
+ })
+ require.NoError(t, err)
+ tm, err = VerifyRekorSET(&testKey.PublicKey, invalidSET, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ // Cryptographic verification fails (a mismatched public key)
+ tm, err = VerifyRekorSET(&testKey.PublicKey, cosignSETBytes, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ // Parsing UntrustedRekorPayload fails
+ invalidPayload = []byte(`{}`)
+ invalidPayloadSig, err = testSigner.SignMessage(bytes.NewReader(invalidPayload))
+ require.NoError(t, err)
+ invalidSET, err = json.Marshal(UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: invalidPayloadSig,
+ UntrustedPayload: json.RawMessage(invalidPayload),
+ })
+ require.NoError(t, err)
+ tm, err = VerifyRekorSET(&testKey.PublicKey, invalidSET, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ // A correctly signed UntrustedRekorPayload is invalid
+ cosignPayloadSHA256 := sha256.Sum256(cosignPayloadBytes)
+ cosignSigBytes, err := base64.StdEncoding.DecodeString(string(cosignSigBase64))
+ require.NoError(t, err)
+ validHashedRekord := models.Hashedrekord{
+ APIVersion: swag.String(HashedRekordV001APIVersion),
+ Spec: models.HashedrekordV001Schema{
+ Data: &models.HashedrekordV001SchemaData{
+ Hash: &models.HashedrekordV001SchemaDataHash{
+ Algorithm: swag.String(models.HashedrekordV001SchemaDataHashAlgorithmSha256),
+ Value: swag.String(hex.EncodeToString(cosignPayloadSHA256[:])),
+ },
+ },
+ Signature: &models.HashedrekordV001SchemaSignature{
+ Content: strfmt.Base64(cosignSigBytes),
+ PublicKey: &models.HashedrekordV001SchemaSignaturePublicKey{
+ Content: strfmt.Base64(cosignCertBytes),
+ },
+ },
+ },
+ }
+ validHashedRekordJSON, err := json.Marshal(validHashedRekord)
+ require.NoError(t, err)
+ for _, fn := range []func(mSA){
+ // A Hashedrekord field is missing
+ func(v mSA) { delete(v, "apiVersion") },
+ func(v mSA) { delete(v, "kind") }, // "kind" is not visible in the type definition, but required by the implementation
+ func(v mSA) { delete(v, "spec") },
+ // This, along with many other extra fields, is currently accepted. That is NOT an API commitment.
+ // func(v mSA) { v["unexpected"] = 1 }, // Extra top-level field:
+ // Invalid apiVersion
+ func(v mSA) { v["apiVersion"] = nil },
+ func(v mSA) { v["apiVersion"] = 1 },
+ func(v mSA) { v["apiVersion"] = mSA{} },
+ func(v mSA) { v["apiVersion"] = "99.0.99" },
+ // Invalid kind
+ func(v mSA) { v["kind"] = nil },
+ func(v mSA) { v["kind"] = 1 },
+ func(v mSA) { v["kind"] = "notHashedRekord" },
+ // Invalid spec
+ func(v mSA) { v["spec"] = nil },
+ func(v mSA) { v["spec"] = 1 },
+ // A HashedRekordV001Schema field is missing
+ func(v mSA) { delete(x(v, "spec"), "data") },
+ func(v mSA) { delete(x(v, "spec"), "signature") },
+ // Invalid spec.data
+ func(v mSA) { x(v, "spec")["data"] = nil },
+ func(v mSA) { x(v, "spec")["data"] = 1 },
+ // Missing spec.data.hash
+ func(v mSA) { delete(x(v, "spec", "data"), "hash") },
+ // Invalid spec.data.hash
+ func(v mSA) { x(v, "spec", "data")["hash"] = nil },
+ func(v mSA) { x(v, "spec", "data")["hash"] = 1 },
+ // A spec.data.hash field is missing
+ func(v mSA) { delete(x(v, "spec", "data", "hash"), "algorithm") },
+ func(v mSA) { delete(x(v, "spec", "data", "hash"), "value") },
+ // Invalid spec.data.hash.algorithm
+ func(v mSA) { x(v, "spec", "data", "hash")["algorithm"] = nil },
+ func(v mSA) { x(v, "spec", "data", "hash")["algorithm"] = 1 },
+ // Invalid spec.data.hash.value
+ func(v mSA) { x(v, "spec", "data", "hash")["value"] = nil },
+ func(v mSA) { x(v, "spec", "data", "hash")["value"] = 1 },
+ func(v mSA) { // An odd number of hexadecimal digits
+ x(v, "spec", "data", "hash")["value"] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ },
+ // spec.data.hash does not match
+ func(v mSA) {
+ x(v, "spec", "data", "hash")["value"] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ },
+ // A non-sha256 hash
+ func(v mSA) {
+ x(v, "spec", "data", "hash")["algorithm"] = "sha512"
+ h := sha512.Sum512(cosignPayloadBytes)
+ x(v, "spec", "data", "hash")["value"] = hex.EncodeToString(h[:])
+ },
+ // Invalid spec.signature
+ func(v mSA) { x(v, "spec")["signature"] = nil },
+ func(v mSA) { x(v, "spec")["signature"] = 1 },
+ // A spec.signature field is missing
+ func(v mSA) { delete(x(v, "spec", "signature"), "content") },
+ func(v mSA) { delete(x(v, "spec", "signature"), "publicKey") },
+ // Invalid spec.signature.content
+ func(v mSA) { x(v, "spec", "signature")["content"] = nil },
+ func(v mSA) { x(v, "spec", "signature")["content"] = 1 },
+ func(v mSA) { x(v, "spec", "signature")["content"] = "" },
+ func(v mSA) { x(v, "spec", "signature")["content"] = "+" }, // Invalid base64
+ // spec.signature.content does not match
+ func(v mSA) {
+ x(v, "spec", "signature")["content"] = base64.StdEncoding.EncodeToString([]byte("does not match"))
+ },
+ // Invalid spec.signature.publicKey
+ func(v mSA) { x(v, "spec", "signature")["publicKey"] = nil },
+ func(v mSA) { x(v, "spec", "signature")["publicKey"] = 1 },
+ // Missing spec.signature.publicKey.content
+ func(v mSA) { delete(x(v, "spec", "signature", "publicKey"), "content") },
+ // Invalid spec.signature.publicKey.content
+ func(v mSA) { x(v, "spec", "signature", "publicKey")["content"] = nil },
+ func(v mSA) { x(v, "spec", "signature", "publicKey")["content"] = 1 },
+ func(v mSA) { x(v, "spec", "signature", "publicKey")["content"] = "" },
+ func(v mSA) { x(v, "spec", "signature", "publicKey")["content"] = "+" }, // Invalid base64
+ func(v mSA) {
+ x(v, "spec", "signature", "publicKey")["content"] = base64.StdEncoding.EncodeToString([]byte("not PEM"))
+ },
+ func(v mSA) { // Multiple PEM blocks
+ x(v, "spec", "signature", "publicKey")["content"] = base64.StdEncoding.EncodeToString(bytes.Repeat(cosignCertBytes, 2))
+ },
+ // spec.signature.publicKey.content does not match
+ func(v mSA) {
+ otherKey, err := testSigner.PublicKey()
+ require.NoError(t, err)
+ otherPEM, err := cryptoutils.MarshalPublicKeyToPEM(otherKey)
+ require.NoError(t, err)
+ x(v, "spec", "signature", "publicKey")["content"] = base64.StdEncoding.EncodeToString(otherPEM)
+ },
+ } {
+ testHashedRekordJSON := modifiedJSON(t, validHashedRekordJSON, fn)
+ testPayload, err := json.Marshal(UntrustedRekorPayload{
+ Body: testHashedRekordJSON,
+ IntegratedTime: 1,
+ LogIndex: 2,
+ LogID: "logID",
+ })
+ require.NoError(t, err)
+ testPayloadSig, err := testSigner.SignMessage(bytes.NewReader(testPayload))
+ require.NoError(t, err)
+ testSET, err := json.Marshal(UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: testPayloadSig,
+ UntrustedPayload: json.RawMessage(testPayload),
+ })
+ require.NoError(t, err)
+ tm, err = VerifyRekorSET(&testKey.PublicKey, testSET, cosignCertBytes, string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+ }
+
+ // Invalid unverifiedBase64Signature parameter
+ truncatedBase64 := cosignSigBase64
+ truncatedBase64 = truncatedBase64[:len(truncatedBase64)-1]
+ tm, err = VerifyRekorSET(cosignRekorKeyECDSA, cosignSETBytes, cosignCertBytes,
+ string(truncatedBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+
+ // Invalid unverifiedKeyOrCertBytes
+ for _, c := range [][]byte{
+ nil,
+ {},
+ []byte("this is not PEM"),
+ bytes.Repeat(cosignCertBytes, 2),
+ } {
+ tm, err = VerifyRekorSET(cosignRekorKeyECDSA, cosignSETBytes, c,
+ string(cosignSigBase64), cosignPayloadBytes)
+ assert.Error(t, err)
+ assert.Zero(t, tm)
+ }
+}
diff --git a/signature/internal/sigstore_payload.go b/signature/internal/sigstore_payload.go
new file mode 100644
index 0000000..f8ec665
--- /dev/null
+++ b/signature/internal/sigstore_payload.go
@@ -0,0 +1,202 @@
+package internal
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+const (
+ sigstoreSignatureType = "cosign container image signature"
+ sigstoreHarcodedHashAlgorithm = crypto.SHA256
+)
+
+// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
+type UntrustedSigstorePayload struct {
+ untrustedDockerManifestDigest digest.Digest
+ untrustedDockerReference string // FIXME: more precise type?
+ untrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ untrustedTimestamp *int64
+}
+
+// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
+// the specified primary contents and appropriate metadata.
+func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "containers/image " + version.Version
+ timestamp := time.Now().Unix()
+ return UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: dockerManifestDigest,
+ untrustedDockerReference: dockerReference,
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: &timestamp,
+ }
+}
+
+// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler
+var _ json.Marshaler = UntrustedSigstorePayload{}
+var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
+ if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]any{
+ "type": sigstoreSignatureType,
+ "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.untrustedDockerReference},
+ }
+ optional := map[string]any{}
+ if s.untrustedCreatorID != nil {
+ optional["creator"] = *s.untrustedCreatorID
+ }
+ if s.untrustedTimestamp != nil {
+ optional["timestamp"] = *s.untrustedTimestamp
+ }
+ signature := map[string]any{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler
+var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(JSONFormatError); ok {
+ err = NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
+// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ // /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
+ if !bytes.Equal(optional, []byte("null")) {
+ if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return &timestamp
+ default:
+ var ignore any
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ }
+ if gotCreatorID {
+ s.untrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ }
+ s.untrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != sigstoreSignatureType {
+ return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.untrustedDockerManifestDigest = digest.Digest(digestString)
+
+ return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
+ "docker-reference": &s.untrustedDockerReference,
+ })
+}
+
+// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable.
+// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type SigstorePayloadAcceptanceRules struct {
+ ValidateSignedDockerReference func(string) error
+ ValidateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
+// match expected values, both as specified by rules, and returns it.
+// We return an *UntrustedSigstorePayload, although nothing actually uses it,
+// just to double-check against stupid typos.
+func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
+ verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
+ if err != nil {
+ return nil, fmt.Errorf("creating verifier: %w", err)
+ }
+
+ unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
+ if err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
+ }
+ // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
+ return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
+ }
+
+ var unmatchedPayload UntrustedSigstorePayload
+ if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
+ return nil, NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // SigstorePayloadAcceptanceRules have accepted this value.
+ return &unmatchedPayload, nil
+}
diff --git a/signature/internal/sigstore_payload_test.go b/signature/internal/sigstore_payload_test.go
new file mode 100644
index 0000000..9c4db40
--- /dev/null
+++ b/signature/internal/sigstore_payload_test.go
@@ -0,0 +1,329 @@
+package internal
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+// A short-hand way to get a JSON object field value or panic. No error handling done, we know
+// what we are working with, a panic in a test is good enough, and fitting test cases on a single line
+// is a priority.
+func x(m mSA, fields ...string) mSA {
+ for _, field := range fields {
+ // Not .(mSA) because type assertion of an unnamed type to a named type always fails (the types
+ // are not "identical"), but the assignment is fine because they are "assignable".
+ m = m[field].(map[string]any)
+ }
+ return m
+}
+
+func TestNewUntrustedSigstorePayload(t *testing.T) {
+ timeBefore := time.Now()
+ sig := NewUntrustedSigstorePayload(TestImageManifestDigest, TestImageSignatureReference)
+ assert.Equal(t, TestImageManifestDigest, sig.untrustedDockerManifestDigest)
+ assert.Equal(t, TestImageSignatureReference, sig.untrustedDockerReference)
+ require.NotNil(t, sig.untrustedCreatorID)
+ assert.Equal(t, "containers/image "+version.Version, *sig.untrustedCreatorID)
+ require.NotNil(t, sig.untrustedTimestamp)
+ timeAfter := time.Now()
+ assert.True(t, timeBefore.Unix() <= *sig.untrustedTimestamp)
+ assert.True(t, *sig.untrustedTimestamp <= timeAfter.Unix())
+}
+
+func TestUntrustedSigstorePayloadMarshalJSON(t *testing.T) {
+ // Empty string values
+ s := NewUntrustedSigstorePayload("", "_")
+ _, err := s.MarshalJSON()
+ assert.Error(t, err)
+ s = NewUntrustedSigstorePayload("_", "")
+ _, err = s.MarshalJSON()
+ assert.Error(t, err)
+
+ // Success
+ // Use intermediate variables for these values so that we can take their addresses.
+ creatorID := "CREATOR"
+ timestamp := int64(1484683104)
+ for _, c := range []struct {
+ input UntrustedSigstorePayload
+ expected string
+ }{
+ {
+ UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: &timestamp,
+ },
+ "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"cosign container image signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":1484683104}}",
+ },
+ {
+ UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ },
+ "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"cosign container image signature\"},\"optional\":{}}",
+ },
+ } {
+ marshaled, err := c.input.MarshalJSON()
+ require.NoError(t, err)
+ assert.Equal(t, []byte(c.expected), marshaled)
+
+ // Also call MarshalJSON through the JSON package.
+ marshaled, err = json.Marshal(c.input)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte(c.expected), marshaled)
+ }
+}
+
+// Verify that input can be unmarshaled as an UntrustedSigstorePayload.
+func successfullyUnmarshalUntrustedSigstorePayload(t *testing.T, input []byte) UntrustedSigstorePayload {
+ var s UntrustedSigstorePayload
+ err := json.Unmarshal(input, &s)
+ require.NoError(t, err, string(input))
+
+ return s
+}
+
+// Verify that input can't be unmarshaled as an UntrustedSigstorePayload.
+func assertUnmarshalUntrustedSigstorePayloadFails(t *testing.T, input []byte) {
+ var s UntrustedSigstorePayload
+ err := json.Unmarshal(input, &s)
+ assert.Error(t, err, string(input))
+}
+
+func TestUntrustedSigstorePayloadUnmarshalJSON(t *testing.T) {
+ // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
+ // UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
+ assertUnmarshalUntrustedSigstorePayloadFails(t, []byte("&"))
+ var s UntrustedSigstorePayload
+ err := s.UnmarshalJSON([]byte("&"))
+ assert.Error(t, err)
+
+ // Not an object
+ assertUnmarshalUntrustedSigstorePayloadFails(t, []byte("1"))
+
+ // Start with a valid JSON.
+ validSig := NewUntrustedSigstorePayload("digest!@#", "reference#@!")
+ validJSON, err := validSig.MarshalJSON()
+ require.NoError(t, err)
+
+ // Success
+ s = successfullyUnmarshalUntrustedSigstorePayload(t, validJSON)
+ assert.Equal(t, validSig, s)
+
+ // A /usr/bin/cosign-generated payload is handled correctly
+ s = successfullyUnmarshalUntrustedSigstorePayload(t, []byte(`{"critical":{"identity":{"docker-reference":"192.168.64.2:5000/cosign-signed-multi"},"image":{"docker-manifest-digest":"sha256:43955d6857268cc948ae9b370b221091057de83c4962da0826f9a2bdc9bd6b44"},"type":"cosign container image signature"},"optional":null}`))
+ assert.Equal(t, UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: "sha256:43955d6857268cc948ae9b370b221091057de83c4962da0826f9a2bdc9bd6b44",
+ untrustedDockerReference: "192.168.64.2:5000/cosign-signed-multi",
+ untrustedCreatorID: nil,
+ untrustedTimestamp: nil,
+ }, s)
+
+ // Various ways to corrupt the JSON
+ breakFns := []func(mSA){
+ // A top-level field is missing
+ func(v mSA) { delete(v, "critical") },
+ func(v mSA) { delete(v, "optional") },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // "critical" not an object
+ func(v mSA) { v["critical"] = 1 },
+ // "optional" not an object
+ func(v mSA) { v["optional"] = 1 },
+ // A field of "critical" is missing
+ func(v mSA) { delete(x(v, "critical"), "type") },
+ func(v mSA) { delete(x(v, "critical"), "image") },
+ func(v mSA) { delete(x(v, "critical"), "identity") },
+ // Extra field of "critical"
+ func(v mSA) { x(v, "critical")["unexpected"] = 1 },
+ // Invalid "type"
+ func(v mSA) { x(v, "critical")["type"] = 1 },
+ func(v mSA) { x(v, "critical")["type"] = "unexpected" },
+ // Invalid "image" object
+ func(v mSA) { x(v, "critical")["image"] = 1 },
+ func(v mSA) { delete(x(v, "critical", "image"), "docker-manifest-digest") },
+ func(v mSA) { x(v, "critical", "image")["unexpected"] = 1 },
+ // Invalid "docker-manifest-digest"
+ func(v mSA) { x(v, "critical", "image")["docker-manifest-digest"] = 1 },
+ // Invalid "identity" object
+ func(v mSA) { x(v, "critical")["identity"] = 1 },
+ func(v mSA) { delete(x(v, "critical", "identity"), "docker-reference") },
+ func(v mSA) { x(v, "critical", "identity")["unexpected"] = 1 },
+ // Invalid "docker-reference"
+ func(v mSA) { x(v, "critical", "identity")["docker-reference"] = 1 },
+ // Invalid "creator"
+ func(v mSA) { x(v, "optional")["creator"] = 1 },
+ // Invalid "timestamp"
+ func(v mSA) { x(v, "optional")["timestamp"] = "unexpected" },
+ func(v mSA) { x(v, "optional")["timestamp"] = 0.5 }, // Fractional input
+ }
+ for _, fn := range breakFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ assertUnmarshalUntrustedSigstorePayloadFails(t, testJSON)
+ }
+
+ // Modifications to unrecognized fields in "optional" are allowed and ignored
+ allowedModificationFns := []func(mSA){
+ // Add an optional field
+ func(v mSA) { x(v, "optional")["unexpected"] = 1 },
+ }
+ for _, fn := range allowedModificationFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ s := successfullyUnmarshalUntrustedSigstorePayload(t, testJSON)
+ assert.Equal(t, validSig, s)
+ }
+
+ // Optional fields can be missing
+ validSig = UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ untrustedCreatorID: nil,
+ untrustedTimestamp: nil,
+ }
+ validJSON, err = validSig.MarshalJSON()
+ require.NoError(t, err)
+ s = successfullyUnmarshalUntrustedSigstorePayload(t, validJSON)
+ assert.Equal(t, validSig, s)
+}
+
+func TestVerifySigstorePayload(t *testing.T) {
+ publicKeyPEM, err := os.ReadFile("./testdata/cosign.pub")
+ require.NoError(t, err)
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
+ require.NoError(t, err)
+
+ type acceptanceData struct {
+ signedDockerReference string
+ signedDockerManifestDigest digest.Digest
+ }
+ var wanted, recorded acceptanceData
+ // recordingRules are a plausible SigstorePayloadAcceptanceRules implementations, but equally
+ // importantly record that we are passing the correct values to the rule callbacks.
+ recordingRules := SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(signedDockerReference string) error {
+ recorded.signedDockerReference = signedDockerReference
+ if signedDockerReference != wanted.signedDockerReference {
+ return errors.New("signedDockerReference mismatch")
+ }
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ recorded.signedDockerManifestDigest = signedDockerManifestDigest
+ if signedDockerManifestDigest != wanted.signedDockerManifestDigest {
+ return errors.New("signedDockerManifestDigest mismatch")
+ }
+ return nil
+ },
+ }
+
+ sigBlob, err := os.ReadFile("./testdata/valid.signature")
+ require.NoError(t, err)
+ genericSig, err := signature.FromBlob(sigBlob)
+ require.NoError(t, err)
+ sigstoreSig, ok := genericSig.(signature.Sigstore)
+ require.True(t, ok)
+ cryptoBase64Sig, ok := sigstoreSig.UntrustedAnnotations()[signature.SigstoreSignatureAnnotationKey]
+ require.True(t, ok)
+ signatureData := acceptanceData{
+ signedDockerReference: TestSigstoreSignatureReference,
+ signedDockerManifestDigest: TestSigstoreManifestDigest,
+ }
+
+ // Successful verification
+ wanted = signatureData
+ recorded = acceptanceData{}
+ res, err := VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), cryptoBase64Sig, recordingRules)
+ require.NoError(t, err)
+ assert.Equal(t, res, &UntrustedSigstorePayload{
+ untrustedDockerManifestDigest: TestSigstoreManifestDigest,
+ untrustedDockerReference: TestSigstoreSignatureReference,
+ untrustedCreatorID: nil,
+ untrustedTimestamp: nil,
+ })
+ assert.Equal(t, signatureData, recorded)
+
+ // For extra paranoia, test that we return a nil signature object on error.
+
+ // Invalid verifier
+ recorded = acceptanceData{}
+ invalidPublicKey := struct{}{} // crypto.PublicKey is, for some reason, just an any, so this is acceptable.
+ res, err = VerifySigstorePayload(invalidPublicKey, sigstoreSig.UntrustedPayload(), cryptoBase64Sig, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{}, recorded)
+
+ // Invalid base64 encoding
+ for _, invalidBase64Sig := range []string{
+ "&", // Invalid base64 characters
+ cryptoBase64Sig + "=", // Extra padding
+ cryptoBase64Sig[:len(cryptoBase64Sig)-1], // Truncated base64 data
+ } {
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), invalidBase64Sig, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{}, recorded)
+ }
+
+ // Invalid signature
+ validSignatureBytes, err := base64.StdEncoding.DecodeString(cryptoBase64Sig)
+ require.NoError(t, err)
+ for _, invalidSig := range [][]byte{
+ {}, // Empty signature
+ []byte("invalid signature"),
+ append(slices.Clone(validSignatureBytes), validSignatureBytes...),
+ } {
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), base64.StdEncoding.EncodeToString(invalidSig), recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{}, recorded)
+ }
+
+ // Valid signature of non-JSON
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, []byte("&"), "MEUCIARnnxZQPALBfqkB4aNAYXad79Qs6VehcrgIeZ8p7I2FAiEAzq2HXwXlz1iJeh+ucUR3L0zpjynQk6Rk0+/gXYp49RU=", recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{}, recorded)
+
+ // Valid signature of an unacceptable JSON
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, []byte("{}"), "MEUCIQDkySOBGxastVP0+koTA33NH5hXjwosFau4rxTPN6g48QIgb7eWKkGqfEpHMM3aT4xiqyP/170jEkdFuciuwN4mux4=", recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{}, recorded)
+
+ // Valid signature with a wrong manifest digest: asked for signedDockerManifestDigest
+ wanted = signatureData
+ wanted.signedDockerManifestDigest = "invalid digest"
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), cryptoBase64Sig, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, acceptanceData{
+ signedDockerManifestDigest: signatureData.signedDockerManifestDigest,
+ }, recorded)
+
+ // Valid signature with a wrong image reference
+ wanted = signatureData
+ wanted.signedDockerReference = "unexpected docker reference"
+ recorded = acceptanceData{}
+ res, err = VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), cryptoBase64Sig, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, res)
+ assert.Equal(t, signatureData, recorded)
+}
diff --git a/signature/internal/testdata/cosign.pub b/signature/internal/testdata/cosign.pub
new file mode 120000
index 0000000..96b2879
--- /dev/null
+++ b/signature/internal/testdata/cosign.pub
@@ -0,0 +1 @@
+../../fixtures/cosign.pub \ No newline at end of file
diff --git a/signature/internal/testdata/rekor-cert b/signature/internal/testdata/rekor-cert
new file mode 100644
index 0000000..734b8bb
--- /dev/null
+++ b/signature/internal/testdata/rekor-cert
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiOgAwIBAgIUG45uaC2z8VvuOwzzm79RPfckoxwwCgYIKoZIzj0EAwMw
+NzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRl
+cm1lZGlhdGUwHhcNMjIxMjEyMTg0ODE4WhcNMjIxMjEyMTg1ODE4WjAAMFkwEwYH
+KoZIzj0CAQYIKoZIzj0DAQcDQgAEJBurkhSRmHukHVh3VA8nYuSWqZ4ltafDwyWl
+c3c9NaLovmu+NC4NiUtMLifL/P3nqedbnctKBuYmfISGiZlVyKOCAUIwggE+MA4G
+A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUAmvG
+4K6lNk/sk/f4aiahjYRqR+cwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4Y
+ZD8wHQYDVR0RAQH/BBMwEYEPbWl0ckByZWRoYXQuY29tMCwGCisGAQQBg78wAQEE
+Hmh0dHBzOi8vZ2l0aHViLmNvbS9sb2dpbi9vYXV0aDCBiQYKKwYBBAHWeQIEAgR7
+BHkAdwB1AN09MGrGxxEyYxkeHJlnNwKiSl643jyt/4eKcoAvKe6OAAABhQeqlwUA
+AAQDAEYwRAIgVi2SvN5ReGYiOShY8LFSRG5D6oATKqHb6kN/DJh2rAUCIANS6Aqp
+xaXiHwTHUr3xPSGa5i6hywmbbRC6N0kIeDM6MAoGCCqGSM49BAMDA2gAMGUCMCLu
+cZtESIN0lm64Co/bZ68CCRkWlktX4mmJiRhKi9c9QD62C5SuZhc0vvo6MOKmTQIx
+AIo/MozZeoU7rdNj0pZKuBeCmMqmJaDSsw19tKi/b1pEmuw+Sf2BI25GgIbKztG1
+9w==
+-----END CERTIFICATE-----
diff --git a/signature/internal/testdata/rekor-payload b/signature/internal/testdata/rekor-payload
new file mode 100644
index 0000000..b3cf6c4
--- /dev/null
+++ b/signature/internal/testdata/rekor-payload
@@ -0,0 +1 @@
+{"critical":{"identity":{"docker-reference":"192.168.64.2:5000/test-repo-2100826901021538654/alpine"},"image":{"docker-manifest-digest":"sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f"},"type":"cosign container image signature"},"optional":null} \ No newline at end of file
diff --git a/signature/internal/testdata/rekor-set b/signature/internal/testdata/rekor-set
new file mode 100644
index 0000000..04ca1e5
--- /dev/null
+++ b/signature/internal/testdata/rekor-set
@@ -0,0 +1 @@
+{"SignedEntryTimestamp":"MEYCIQDdeujdGLpMTgFdew9wsSJ3WF7olX9PawgzGeX2RmJd8QIhAPxGJf+HjUFVpQc0hgPaUSK8LsONJ08fZFEBVKDeLj4S","Payload":{"body":"eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiIwZmQxZTk4MzJjYzVhNWY1MDJlODAwZmU5Y2RlZWZiZDMxMzYyZGYxNmZlOGMyMjUwZDMwOGFlYTNmYjFmYzY5In19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FUUNJRUpjOTZlMDQxVkFoS0EwM1N2ZkNZYldvZElNSVFQeUF0V3lEUDRGblBxcEFpQWFJUzYwRWpoUkRoU2Fub0Zzb0l5OGZLcXFLZVc1cHMvdExYU0dwYXlpMmc9PSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTnVWRU5EUVdsUFowRjNTVUpCWjBsVlJ6UTFkV0ZETW5vNFZuWjFUM2Q2ZW0wM09WSlFabU5yYjNoM2QwTm5XVWxMYjFwSmVtb3dSVUYzVFhjS1RucEZWazFDVFVkQk1WVkZRMmhOVFdNeWJHNWpNMUoyWTIxVmRWcEhWakpOVWpSM1NFRlpSRlpSVVVSRmVGWjZZVmRrZW1SSE9YbGFVekZ3WW01U2JBcGpiVEZzV2tkc2FHUkhWWGRJYUdOT1RXcEplRTFxUlhsTlZHY3dUMFJGTkZkb1kwNU5ha2w0VFdwRmVVMVVaekZQUkVVMFYycEJRVTFHYTNkRmQxbElDa3R2V2tsNmFqQkRRVkZaU1V0dldrbDZhakJFUVZGalJGRm5RVVZLUW5WeWEyaFRVbTFJZFd0SVZtZ3pWa0U0YmxsMVUxZHhXalJzZEdGbVJIZDVWMndLWXpOak9VNWhURzkyYlhVclRrTTBUbWxWZEUxTWFXWk1MMUF6Ym5GbFpHSnVZM1JMUW5WWmJXWkpVMGRwV214V2VVdFBRMEZWU1hkblowVXJUVUUwUndwQk1WVmtSSGRGUWk5M1VVVkJkMGxJWjBSQlZFSm5UbFpJVTFWRlJFUkJTMEpuWjNKQ1owVkdRbEZqUkVGNlFXUkNaMDVXU0ZFMFJVWm5VVlZCYlhaSENqUkxObXhPYXk5emF5OW1OR0ZwWVdocVdWSnhVaXRqZDBoM1dVUldVakJxUWtKbmQwWnZRVlV6T1ZCd2VqRlphMFZhWWpWeFRtcHdTMFpYYVhocE5Ga0tXa1E0ZDBoUldVUldVakJTUVZGSUwwSkNUWGRGV1VWUVlsZHNNR05yUW5sYVYxSnZXVmhSZFZreU9YUk5RM2RIUTJselIwRlJVVUpuTnpoM1FWRkZSUXBJYldnd1pFaENlazlwT0haYU1td3dZVWhXYVV4dFRuWmlVemx6WWpKa2NHSnBPWFpaV0ZZd1lVUkRRbWxSV1V0TGQxbENRa0ZJVjJWUlNVVkJaMUkzQ2tKSWEwRmtkMEl4UVU0d09VMUhja2Q0ZUVWNVdYaHJaVWhLYkc1T2QwdHBVMncyTkROcWVYUXZOR1ZMWTI5QmRrdGxOazlCUVVGQ2FGRmxjV3gzVlVFS1FVRlJSRUZGV1hkU1FVbG5WbWt5VTNaT05WSmxSMWxwVDFOb1dUaE1SbE5TUnpWRU5tOUJWRXR4U0dJMmEwNHZSRXBvTW5KQlZVTkpRVTVUTmtGeGNBcDRZVmhwU0hkVVNGVnlNM2hRVTBkaE5XazJhSGwzYldKaVVrTTJUakJyU1dWRVRUWk5RVzlIUTBOeFIxTk5ORGxDUVUxRVFUSm5RVTFIVlVOTlEweDFDbU5hZEVWVFNVNHdiRzAyTkVOdkwySmFOamhEUTFKclYyeHJkRmcwYlcxS2FWSm9TMms1WXpsUlJEWXlRelZUZFZwb1l6QjJkbTgyVFU5TGJWUlJTWGdLUVVsdkwwMXZlbHBsYjFVM2NtUk9hakJ3V2t0MVFtVkRiVTF4YlVwaFJGTnpkekU1ZEV0cEwySXhjRVZ0ZFhjclUyWXlRa2t5TlVkblNXSkxlblJITVFvNWR6MDlDaTB0TFMwdFJVNUVJRU5GVWxSSlJrbERRVlJGTFMwdExTMEsifX19fQ==","integratedTime":1670870899,"logIndex":8949589,"logID":"c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d"}} \ No newline at end of file
diff --git a/signature/internal/testdata/rekor-sig b/signature/internal/testdata/rekor-sig
new file mode 100644
index 0000000..5735a23
--- /dev/null
+++ b/signature/internal/testdata/rekor-sig
@@ -0,0 +1 @@
+MEQCIEJc96e041VAhKA03SvfCYbWodIMIQPyAtWyDP4FnPqpAiAaIS60EjhRDhSanoFsoIy8fKqqKeW5ps/tLXSGpayi2g== \ No newline at end of file
diff --git a/signature/internal/testdata/rekor.pub b/signature/internal/testdata/rekor.pub
new file mode 100644
index 0000000..050ef60
--- /dev/null
+++ b/signature/internal/testdata/rekor.pub
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwr
+kBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==
+-----END PUBLIC KEY-----
diff --git a/signature/internal/testdata/valid.signature b/signature/internal/testdata/valid.signature
new file mode 120000
index 0000000..b720f8c
--- /dev/null
+++ b/signature/internal/testdata/valid.signature
@@ -0,0 +1 @@
+../../fixtures/dir-img-cosign-valid/signature-1 \ No newline at end of file
diff --git a/signature/mechanism.go b/signature/mechanism.go
new file mode 100644
index 0000000..1d3fe0f
--- /dev/null
+++ b/signature/mechanism.go
@@ -0,0 +1,97 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ // This code is used only to parse the data in an explicitly-untrusted
+ // code path, where cryptography is not relevant. For now, continue to
+ // use this frozen deprecated implementation. When mechanism_openpgp.go
+ // migrates to another implementation, this should migrate as well.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
+)
+
+// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
+// Each mechanism should eventually be closed by calling Close().
+type SigningMechanism interface {
+ // Close removes resources associated with the mechanism, if any.
+ Close() error
+ // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+ SupportsSigning() error
+ // Sign creates a (non-detached) signature of input using keyIdentity.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ Sign(input []byte, keyIdentity string) ([]byte, error)
+ // Verify parses unverifiedSignature and returns the content and the signer's identity
+ Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
+ // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+ // along with a short identifier of the key used for signing.
+ // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
+ // is NOT the same as a "key identity" used in other calls to this interface, and
+ // the values may have no recognizable relationship if the public key is not available.
+ UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
+}
+
+// signingMechanismWithPassphrase is an internal extension of SigningMechanism.
+type signingMechanismWithPassphrase interface {
+ SigningMechanism
+
+ // Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error)
+}
+
+// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
+type SigningNotSupportedError string
+
+func (err SigningNotSupportedError) Error() string {
+ return string(err)
+}
+
+// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default
+// GPG configuration ($GNUPGHOME / ~/.gnupg)
+// The caller must call .Close() on the returned SigningMechanism.
+func NewGPGSigningMechanism() (SigningMechanism, error) {
+ return newGPGSigningMechanismInDirectory("")
+}
+
+// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ return newEphemeralGPGSigningMechanism([][]byte{blob})
+}
+
+// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls to this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
+ md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("The input is not a signature")
+ }
+ content, err := io.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: An error during reading the body can happen only if
+ // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
+ // to decrypt the contents anyway), or
+ // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t.
+ return nil, "", err
+ }
+
+ // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints
+ // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)!
+ return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil
+}
diff --git a/signature/mechanism_gpgme.go b/signature/mechanism_gpgme.go
new file mode 100644
index 0000000..2b2a7ad
--- /dev/null
+++ b/signature/mechanism_gpgme.go
@@ -0,0 +1,207 @@
+//go:build !containers_image_openpgp
+// +build !containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/proglottis/gpgme"
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using gpgme.
+type gpgmeSigningMechanism struct {
+ ctx *gpgme.Context
+ ephemeralDir string // If not "", a directory to be removed on Close()
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
+ ctx, err := newGPGMEContext(optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ return &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: "",
+ }, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blobs, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
+ dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-")
+ if err != nil {
+ return nil, nil, err
+ }
+ removeDir := true
+ defer func() {
+ if removeDir {
+ os.RemoveAll(dir)
+ }
+ }()
+ ctx, err := newGPGMEContext(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ mech := &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: dir,
+ }
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
+ }
+
+ removeDir = false
+ return mech, keyIdentities, nil
+}
+
+// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty.
+func newGPGMEContext(optionalDir string) (*gpgme.Context, error) {
+ ctx, err := gpgme.New()
+ if err != nil {
+ return nil, err
+ }
+ if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
+ return nil, err
+ }
+ if optionalDir != "" {
+ err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ctx.SetArmor(false)
+ ctx.SetTextMode(false)
+ return ctx, nil
+}
+
+func (m *gpgmeSigningMechanism) Close() error {
+ if m.ephemeralDir != "" {
+ os.RemoveAll(m.ephemeralDir) // Ignore an error, if any
+ }
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism);
+// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism.
+func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ inputData, err := gpgme.NewDataBytes(blob)
+ if err != nil {
+ return nil, err
+ }
+ res, err := m.ctx.Import(inputData)
+ if err != nil {
+ return nil, err
+ }
+ keyIdentities := []string{}
+ for _, i := range res.Imports {
+ if i.Result == nil {
+ keyIdentities = append(keyIdentities, i.Fingerprint)
+ }
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *gpgmeSigningMechanism) SupportsSigning() error {
+ return nil
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
+ key, err := m.ctx.GetKey(keyIdentity, true)
+ if err != nil {
+ return nil, err
+ }
+ inputData, err := gpgme.NewDataBytes(input)
+ if err != nil {
+ return nil, err
+ }
+ var sigBuffer bytes.Buffer
+ sigData, err := gpgme.NewDataWriter(&sigBuffer)
+ if err != nil {
+ return nil, err
+ }
+
+ if passphrase != "" {
+ // Callback to write the passphrase to the specified file descriptor.
+ callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error {
+ if prevWasBad {
+ return errors.New("bad passphrase")
+ }
+ _, err := gpgmeFD.WriteString(passphrase + "\n")
+ return err
+ }
+ if err := m.ctx.SetCallback(callback); err != nil {
+ return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err)
+ }
+
+ // Loopback mode will use the callback instead of prompting the user.
+ if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil {
+ return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err)
+ }
+ }
+
+ if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
+ return nil, err
+ }
+ return sigBuffer.Bytes(), nil
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ signedBuffer := bytes.Buffer{}
+ signedData, err := gpgme.NewDataWriter(&signedBuffer)
+ if err != nil {
+ return nil, "", err
+ }
+ unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
+ if err != nil {
+ return nil, "", err
+ }
+ _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
+ if err != nil {
+ return nil, "", err
+ }
+ if len(sigs) != 1 {
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs)))
+ }
+ sig := sigs[0]
+ // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
+ if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
+ // FIXME: Better error reporting eventually
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig))
+ }
+ return signedBuffer.Bytes(), sig.Fingerprint, nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls to this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/signature/mechanism_gpgme_test.go b/signature/mechanism_gpgme_test.go
new file mode 100644
index 0000000..82ca998
--- /dev/null
+++ b/signature/mechanism_gpgme_test.go
@@ -0,0 +1,49 @@
+//go:build !containers_image_openpgp
+// +build !containers_image_openpgp
+
+package signature
+
+import (
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/internal/testing/gpgagent"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Ensure we don’t leave around GPG agent processes.
+func TestMain(m *testing.M) {
+ code := m.Run()
+ if err := gpgagent.KillGPGAgent(testGPGHomeDirectory); err != nil {
+ logrus.Warnf("Error killing GPG agent: %v", err)
+ }
+ os.Exit(code)
+}
+
+func TestGPGMESigningMechanismClose(t *testing.T) {
+ // Closing an ephemeral mechanism removes the directory.
+ // (The non-ephemeral case is tested in the common TestGPGSigningMechanismClose)
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ gpgMech, ok := mech.(*gpgmeSigningMechanism)
+ require.True(t, ok)
+ dir := gpgMech.ephemeralDir
+ assert.NotEmpty(t, dir)
+ _, err = os.Lstat(dir)
+ require.NoError(t, err)
+ err = mech.Close()
+ assert.NoError(t, err)
+ _, err = os.Lstat(dir)
+ require.Error(t, err)
+ assert.True(t, os.IsNotExist(err))
+}
+
+func TestGPGMESigningMechanismSupportsSigning(t *testing.T) {
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ defer mech.Close()
+ err = mech.SupportsSigning()
+ assert.NoError(t, err)
+}
diff --git a/signature/mechanism_openpgp.go b/signature/mechanism_openpgp.go
new file mode 100644
index 0000000..5d6c1ac
--- /dev/null
+++ b/signature/mechanism_openpgp.go
@@ -0,0 +1,179 @@
+//go:build containers_image_openpgp
+// +build containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/containers/storage/pkg/homedir"
+ // This is a fallback code; the primary recommendation is to use the gpgme mechanism
+ // implementation, which is out-of-process and more appropriate for handling long-term private key material
+ // than any Go implementation.
+ // For this verify-only fallback, we haven't reviewed any of the
+ // existing alternatives to choose; so, for now, continue to
+ // use this frozen deprecated implementation.
+ //lint:ignore SA1019 See above
+ "golang.org/x/crypto/openpgp" //nolint:staticcheck
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
+type openpgpSigningMechanism struct {
+ keyring openpgp.EntityList
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+
+ gpgHome := optionalDir
+ if gpgHome == "" {
+ gpgHome = os.Getenv("GNUPGHOME")
+ if gpgHome == "" {
+ gpgHome = path.Join(homedir.Get(), ".gnupg")
+ }
+ }
+
+ pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ } else {
+ _, err := m.importKeysFromBytes(pubring)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+ keyIdentities := []string{}
+ for _, blob := range blobs {
+ ki, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyIdentities = append(keyIdentities, ki...)
+ }
+
+ return m, keyIdentities, nil
+}
+
+func (m *openpgpSigningMechanism) Close() error {
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob))
+ if err != nil {
+ k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob))
+ if e2 != nil {
+ return nil, err // The original error -- FIXME: is this better?
+ }
+ keyring = k
+ }
+
+ keyIdentities := []string{}
+ for _, entity := range keyring {
+ if entity.PrimaryKey == nil {
+ // Coverage: This should never happen, openpgp.ReadEntity fails with a
+ // openpgp.errors.StructuralError instead of returning an entity with this
+ // field set to nil.
+ continue
+ }
+ // Uppercase the fingerprint to be compatible with gpgme
+ keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)))
+ m.keyring = append(m.keyring, entity)
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *openpgpSigningMechanism) SupportsSigning() error {
+ return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
+ return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("not signed")
+ }
+ content, err := io.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
+ // (and possibly also signed, but it _must_ be encrypted) and the signing
+ // “modification detection code” detects a mismatch. But in that case,
+ // we would expect the signature verification to fail as well, and that is checked
+ // first. Besides, we are not supplying any decryption keys, so we really
+ // can never reach this “encrypted data MDC mismatch” path.
+ return nil, "", err
+ }
+ if md.SignatureError != nil {
+ return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
+ }
+ if md.SignedBy == nil {
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature))
+ }
+ if md.Signature != nil {
+ if md.Signature.SigLifetimeSecs != nil {
+ expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
+ if time.Now().After(expiry) {
+ return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry))
+ }
+ }
+ } else if md.SignatureV3 == nil {
+ // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
+ // or sets md.SignatureError.
+ return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set")
+ }
+
+ // Uppercase the fingerprint to be compatible with gpgme
+ return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls to this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/signature/mechanism_openpgp_test.go b/signature/mechanism_openpgp_test.go
new file mode 100644
index 0000000..b57eb70
--- /dev/null
+++ b/signature/mechanism_openpgp_test.go
@@ -0,0 +1,29 @@
+//go:build containers_image_openpgp
+// +build containers_image_openpgp
+
+package signature
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOpenpgpSigningMechanismSupportsSigning(t *testing.T) {
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ defer mech.Close()
+ err = mech.SupportsSigning()
+ assert.Error(t, err)
+ assert.IsType(t, SigningNotSupportedError(""), err)
+}
+
+func TestOpenpgpSigningMechanismSign(t *testing.T) {
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ defer mech.Close()
+ _, err = mech.Sign([]byte{}, TestKeyFingerprint)
+ assert.Error(t, err)
+ assert.IsType(t, SigningNotSupportedError(""), err)
+}
diff --git a/signature/mechanism_test.go b/signature/mechanism_test.go
new file mode 100644
index 0000000..ef67db6
--- /dev/null
+++ b/signature/mechanism_test.go
@@ -0,0 +1,330 @@
+package signature
+
+// These tests are expected to pass unmodified for _both_ mechanism_gpgme.go and mechanism_openpgp.go.
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testGPGHomeDirectory = "./fixtures"
+)
+
+// Many of the tests use two fixtures: V4 signature packets (*.signature), and V3 signature packets (*.signature-v3)
+
+// fixtureVariants loads V3 and V4 signature fixture variants based on the v4 fixture path, and returns a map which makes it easy to test both.
+func fixtureVariants(t *testing.T, v4Path string) map[string][]byte {
+ v4, err := os.ReadFile(v4Path)
+ require.NoError(t, err)
+ v3Path := v4Path + "-v3"
+ v3, err := os.ReadFile(v3Path)
+ require.NoError(t, err)
+ return map[string][]byte{v4Path: v4, v3Path: v3}
+}
+
+func TestSigningNotSupportedError(t *testing.T) {
+ // A stupid test just to keep code coverage
+ s := "test"
+ err := SigningNotSupportedError(s)
+ assert.Equal(t, s, err.Error())
+}
+
+func TestNewGPGSigningMechanism(t *testing.T) {
+ // A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
+ mech, err := NewGPGSigningMechanism()
+ assert.NoError(t, err)
+ mech.Close()
+}
+
+func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
+ // A dumb test just for code coverage.
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ assert.NoError(t, err)
+ mech.Close()
+ // The various GPG failure cases are not obviously easy to reach.
+
+ // Test that using the default directory (presumably in user’s home)
+ // cannot use TestKeyFingerprint.
+ signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature")
+ mech, err = newGPGSigningMechanismInDirectory("")
+ require.NoError(t, err)
+ defer mech.Close()
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ assert.Error(t, err, version)
+ }
+
+ // Similarly, using a newly created empty directory makes TestKeyFingerprint
+ // unavailable
+ emptyDir := t.TempDir()
+ mech, err = newGPGSigningMechanismInDirectory(emptyDir)
+ require.NoError(t, err)
+ defer mech.Close()
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ assert.Error(t, err, version)
+ }
+
+ // If pubring.gpg is unreadable in the directory, either initializing
+ // the mechanism fails (with openpgp), or it succeeds (sadly, gpgme) and
+ // later verification fails.
+ unreadableDir := t.TempDir()
+ f, err := os.OpenFile(filepath.Join(unreadableDir, "pubring.gpg"), os.O_RDONLY|os.O_CREATE, 0000)
+ require.NoError(t, err)
+ f.Close()
+ mech, err = newGPGSigningMechanismInDirectory(unreadableDir)
+ if err == nil {
+ defer mech.Close()
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ assert.Error(t, err, version)
+ }
+ }
+
+ // Setting the directory parameter to testGPGHomeDirectory makes the key available.
+ mech, err = newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ assert.NoError(t, err, version)
+ }
+
+ // If we use the default directory mechanism, GNUPGHOME is respected.
+ t.Setenv("GNUPGHOME", testGPGHomeDirectory)
+ mech, err = newGPGSigningMechanismInDirectory("")
+ require.NoError(t, err)
+ defer mech.Close()
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ assert.NoError(t, err, version)
+ }
+}
+
+func TestNewEphemeralGPGSigningMechanism(t *testing.T) {
+ // Empty input: This is accepted anyway by GPG, just returns no keys.
+ mech, keyIdentities, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ defer mech.Close()
+ assert.Empty(t, keyIdentities)
+ // Try validating a signature when the key is unknown.
+ signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature")
+ for version, signature := range signatures {
+ _, _, err := mech.Verify(signature)
+ require.Error(t, err, version)
+ }
+
+ // Successful import
+ keyBlob, err := os.ReadFile("./fixtures/public-key.gpg")
+ require.NoError(t, err)
+ mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(keyBlob)
+ require.NoError(t, err)
+ defer mech.Close()
+ assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
+ // After import, the signature should validate.
+ for version, signature := range signatures {
+ content, signingFingerprint, err := mech.Verify(signature)
+ require.NoError(t, err, version)
+ assert.Equal(t, []byte("This is not JSON\n"), content, version)
+ assert.Equal(t, TestKeyFingerprint, signingFingerprint, version)
+ }
+
+ // Two keys in a keyring: Read the binary-format pubring.gpg, and concatenate it twice.
+ // (Using two copies of public-key.gpg, in the ASCII-armored format, works with
+ // gpgmeSigningMechanism but not openpgpSigningMechanism.)
+ keyBlob, err = os.ReadFile("./fixtures/pubring.gpg")
+ require.NoError(t, err)
+ mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
+ require.NoError(t, err)
+ defer mech.Close()
+ assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprintWithPassphrase, TestKeyFingerprint, TestKeyFingerprintWithPassphrase}, keyIdentities)
+
+ // Two keys from two blobs:
+ keyBlob1, err := os.ReadFile("./fixtures/public-key-1.gpg")
+ require.NoError(t, err)
+ keyBlob2, err := os.ReadFile("./fixtures/public-key-2.gpg")
+ require.NoError(t, err)
+ mech, keyIdentities, err = newEphemeralGPGSigningMechanism([][]byte{keyBlob1, keyBlob2})
+ require.NoError(t, err)
+ defer mech.Close()
+ assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprintWithPassphrase}, keyIdentities)
+
+ // Invalid input: This is, sadly, accepted anyway by GPG, just returns no keys.
+ // For openpgpSigningMechanism we can detect this and fail.
+ mech, keyIdentities, err = NewEphemeralGPGSigningMechanism([]byte("This is invalid"))
+ assert.True(t, err != nil || len(keyIdentities) == 0)
+ if err == nil {
+ mech.Close()
+ }
+ assert.Empty(t, keyIdentities)
+ // The various GPG/GPGME failures cases are not obviously easy to reach.
+}
+
+func TestGPGSigningMechanismClose(t *testing.T) {
+ // Closing a non-ephemeral mechanism does not remove anything in the directory.
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ err = mech.Close()
+ assert.NoError(t, err)
+ _, err = os.Lstat(testGPGHomeDirectory)
+ assert.NoError(t, err)
+ _, err = os.Lstat(filepath.Join(testGPGHomeDirectory, "pubring.gpg"))
+ assert.NoError(t, err)
+}
+
+func TestGPGSigningMechanismSign(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ // Successful signing
+ content := []byte("content")
+ signature, err := mech.Sign(content, TestKeyFingerprint)
+ require.NoError(t, err)
+
+ signedContent, signingFingerprint, err := mech.Verify(signature)
+ require.NoError(t, err)
+ assert.EqualValues(t, content, signedContent)
+ assert.Equal(t, TestKeyFingerprint, signingFingerprint)
+
+ // Error signing
+ _, err = mech.Sign(content, "this fingerprint doesn't exist")
+ assert.Error(t, err)
+ // The various GPG/GPGME failures cases are not obviously easy to reach.
+}
+
+func assertSigningError(t *testing.T, content []byte, fingerprint string, err error, msgAndArgs ...any) {
+ assert.Error(t, err, msgAndArgs...)
+ assert.Nil(t, content, msgAndArgs...)
+ assert.Empty(t, fingerprint, msgAndArgs...)
+}
+
+func TestGPGSigningMechanismVerify(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ // Successful verification
+ signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature")
+ for variant, signature := range signatures {
+ content, signingFingerprint, err := mech.Verify(signature)
+ require.NoError(t, err, variant)
+ assert.Equal(t, []byte("This is not JSON\n"), content, variant)
+ assert.Equal(t, TestKeyFingerprint, signingFingerprint, variant)
+ }
+
+ // For extra paranoia, test that we return nil data on error.
+
+ // Completely invalid signature.
+ content, signingFingerprint, err := mech.Verify([]byte{})
+ assertSigningError(t, content, signingFingerprint, err)
+
+ content, signingFingerprint, err = mech.Verify([]byte("invalid signature"))
+ assertSigningError(t, content, signingFingerprint, err)
+
+ // Literal packet, not a signature
+ signature, err := os.ReadFile("./fixtures/unsigned-literal.signature") // Not fixtureVariants, the “literal data” packet does not have V3/V4 versions.
+ require.NoError(t, err)
+ content, signingFingerprint, err = mech.Verify(signature)
+ assertSigningError(t, content, signingFingerprint, err)
+
+ // Encrypted data, not a signature.
+ signature, err = os.ReadFile("./fixtures/unsigned-encrypted.signature") // Not fixtureVariants, the “public-key encrypted session key” does not have V3/V4 versions.
+ require.NoError(t, err)
+ content, signingFingerprint, err = mech.Verify(signature)
+ assertSigningError(t, content, signingFingerprint, err)
+
+ // FIXME? Is there a way to create a multi-signature so that gpgme_op_verify returns multiple signatures?
+
+ // Expired signature
+ signature, err = os.ReadFile("./fixtures/expired.signature") // Not fixtureVariants, V3 signature packets don’t support expiration.
+ require.NoError(t, err)
+ content, signingFingerprint, err = mech.Verify(signature)
+ assertSigningError(t, content, signingFingerprint, err)
+
+ // Corrupt signature
+ signatures = fixtureVariants(t, "./fixtures/corrupt.signature")
+ for version, signature := range signatures {
+ content, signingFingerprint, err := mech.Verify(signature)
+ assertSigningError(t, content, signingFingerprint, err, version)
+ }
+
+ // Valid signature with an unknown key
+ signatures = fixtureVariants(t, "./fixtures/unknown-key.signature")
+ for version, signature := range signatures {
+ content, signingFingerprint, err := mech.Verify(signature)
+ assertSigningError(t, content, signingFingerprint, err, version)
+ }
+
+ // The various GPG/GPGME failures cases are not obviously easy to reach.
+}
+
+func TestGPGSigningMechanismUntrustedSignatureContents(t *testing.T) {
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ require.NoError(t, err)
+ defer mech.Close()
+
+ // A valid signature
+ signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature")
+ for version, signature := range signatures {
+ content, shortKeyID, err := mech.UntrustedSignatureContents(signature)
+ require.NoError(t, err, version)
+ assert.Equal(t, []byte("This is not JSON\n"), content, version)
+ assert.Equal(t, TestKeyShortID, shortKeyID, version)
+ }
+
+ // Completely invalid signature.
+ _, _, err = mech.UntrustedSignatureContents([]byte{})
+ assert.Error(t, err)
+
+ _, _, err = mech.UntrustedSignatureContents([]byte("invalid signature"))
+ assert.Error(t, err)
+
+ // Literal packet, not a signature
+ signature, err := os.ReadFile("./fixtures/unsigned-literal.signature") // Not fixtureVariants, the “literal data” packet does not have V3/V4 versions.
+ require.NoError(t, err)
+ _, _, err = mech.UntrustedSignatureContents(signature)
+ assert.Error(t, err)
+
+ // Encrypted data, not a signature.
+ signature, err = os.ReadFile("./fixtures/unsigned-encrypted.signature") // Not fixtureVariants, the “public-key encrypted session key” does not have V3/V4 versions.
+ require.NoError(t, err)
+ _, _, err = mech.UntrustedSignatureContents(signature)
+ assert.Error(t, err)
+
+ // Expired signature
+ signature, err = os.ReadFile("./fixtures/expired.signature") // Not fixtureVariants, V3 signature packets don’t support expiration.
+ require.NoError(t, err)
+ content, shortKeyID, err := mech.UntrustedSignatureContents(signature)
+ require.NoError(t, err)
+ assert.Equal(t, []byte("This signature is expired.\n"), content)
+ assert.Equal(t, TestKeyShortID, shortKeyID)
+
+ // Corrupt signature
+ signatures = fixtureVariants(t, "./fixtures/corrupt.signature")
+ for version, signature := range signatures {
+ content, shortKeyID, err := mech.UntrustedSignatureContents(signature)
+ require.NoError(t, err, version)
+ assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic ","timestamp":1458239713}}`), content, version)
+ assert.Equal(t, TestKeyShortID, shortKeyID, version)
+ }
+
+ // Valid signature with an unknown key
+ signatures = fixtureVariants(t, "./fixtures/unknown-key.signature")
+ for version, signature := range signatures {
+ content, shortKeyID, err := mech.UntrustedSignatureContents(signature)
+ require.NoError(t, err, version)
+ assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic 0.1.13-dev","timestamp":1464633474}}`), content, version)
+ assert.Equal(t, "BB75E91990DF8F7E", shortKeyID, version)
+ }
+}
diff --git a/signature/policy_config.go b/signature/policy_config.go
new file mode 100644
index 0000000..7eb5cab
--- /dev/null
+++ b/signature/policy_config.go
@@ -0,0 +1,799 @@
+// policy_config.go handles creation of policy objects, either by parsing JSON
+// or by programs building them programmatically.
+
+// The New* constructors are intended to be a stable API. FIXME: after an independent review.
+
+// Do not invoke the internals of the JSON marshaling/unmarshaling directly.
+
+// We can't just blindly call json.Unmarshal because that would silently ignore
+// typos, and that would just not do for security policy.
+
+// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context.
+// But at least it is not worse than blind json.Unmarshal()…
+
+package signature
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/regexp"
+)
+
+// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path'
+var systemDefaultPolicyPath = builtinDefaultPolicyPath
+
+// userPolicyFile is the path to the per user policy path.
+var userPolicyFile = filepath.FromSlash(".config/containers/policy.json")
+
+// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
+type InvalidPolicyFormatError string
+
+func (err InvalidPolicyFormatError) Error() string {
+ return string(err)
+}
+
+// DefaultPolicy returns the default policy of the system.
+// Most applications should be using this method to get the policy configured
+// by the system administrator.
+// sys should usually be nil, can be set to override the default.
+// NOTE: When this function returns an error, report it to the user and abort.
+// DO NOT hard-code fallback policies in your application.
+func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
+ return NewPolicyFromFile(defaultPolicyPath(sys))
+}
+
+// defaultPolicyPath returns a path to the default policy of the system.
+func defaultPolicyPath(sys *types.SystemContext) string {
+ return defaultPolicyPathWithHomeDir(sys, homedir.Get())
+}
+
+// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
+// it exists only to allow testing it with an artificial home directory.
+func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
+ if sys != nil && sys.SignaturePolicyPath != "" {
+ return sys.SignaturePolicyPath
+ }
+ userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
+ if _, err := os.Stat(userPolicyFilePath); err == nil {
+ return userPolicyFilePath
+ }
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
+ }
+ return systemDefaultPolicyPath
+}
+
+// NewPolicyFromFile returns a policy configured in the specified file.
+func NewPolicyFromFile(fileName string) (*Policy, error) {
+ contents, err := os.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+ policy, err := NewPolicyFromBytes(contents)
+ if err != nil {
+ return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err)
+ }
+ return policy, nil
+}
+
+// NewPolicyFromBytes returns a policy parsed from the specified blob.
+// Use this function instead of calling json.Unmarshal directly.
+func NewPolicyFromBytes(data []byte) (*Policy, error) {
+ p := Policy{}
+ if err := json.Unmarshal(data, &p); err != nil {
+ return nil, InvalidPolicyFormatError(err.Error())
+ }
+ return &p, nil
+}
+
+// Compile-time check that Policy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*Policy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (p *Policy) UnmarshalJSON(data []byte) error {
+ *p = Policy{}
+ transports := policyTransportsMap{}
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "default":
+ return &p.Default
+ case "transports":
+ return &transports
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if p.Default == nil {
+ return InvalidPolicyFormatError("Default policy is missing")
+ }
+ p.Transports = map[string]PolicyTransportScopes(transports)
+ return nil
+}
+
+// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member.
+type policyTransportsMap map[string]PolicyTransportScopes
+
+// Compile-time check that policyTransportsMap implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportsMap)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyTransportScopes{}
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ // transport can be nil
+ transport := transports.Get(key)
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ ptsWithTransport := policyTransportScopesWithTransport{
+ transport: transport,
+ dest: &PolicyTransportScopes{}, // This allocates a new instance on each call.
+ }
+ tmpMap[key] = ptsWithTransport.dest
+ return &ptsWithTransport
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler.
+// we want to only use policyTransportScopesWithTransport
+var _ json.Unmarshaler = (*PolicyTransportScopes)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
+ return errors.New("Do not try to unmarshal PolicyTransportScopes directly")
+}
+
+// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
+// while validating using a specific ImageTransport if not nil.
+type policyTransportScopesWithTransport struct {
+ transport types.ImageTransport
+ dest *PolicyTransportScopes
+}
+
+// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyRequirements{}
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ if key != "" && m.transport != nil {
+ if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
+ return nil
+ }
+ }
+ ptr := &PolicyRequirements{} // This allocates a new instance on each call.
+ tmpMap[key] = ptr
+ return ptr
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m.dest)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyRequirements implements json.Unmarshaler.
+var _ json.Unmarshaler = (*PolicyRequirements)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyRequirements) UnmarshalJSON(data []byte) error {
+ reqJSONs := []json.RawMessage{}
+ if err := json.Unmarshal(data, &reqJSONs); err != nil {
+ return err
+ }
+ if len(reqJSONs) == 0 {
+ return InvalidPolicyFormatError("List of verification policy requirements must not be empty")
+ }
+ res := make([]PolicyRequirement, len(reqJSONs))
+ for i, reqJSON := range reqJSONs {
+ req, err := newPolicyRequirementFromJSON(reqJSON)
+ if err != nil {
+ return err
+ }
+ res[i] = req
+ }
+ *m = res
+ return nil
+}
+
+// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation.
+func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
+ var typeField prCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyRequirement
+ switch typeField.Type {
+ case prTypeInsecureAcceptAnything:
+ res = &prInsecureAcceptAnything{}
+ case prTypeReject:
+ res = &prReject{}
+ case prTypeSignedBy:
+ res = &prSignedBy{}
+ case prTypeSignedBaseLayer:
+ res = &prSignedBaseLayer{}
+ case prTypeSigstoreSigned:
+ res = &prSigstoreSigned{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type.
+func newPRInsecureAcceptAnything() *prInsecureAcceptAnything {
+ return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}}
+}
+
+// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement.
+func NewPRInsecureAcceptAnything() PolicyRequirement {
+ return newPRInsecureAcceptAnything()
+}
+
+// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
+ *pr = prInsecureAcceptAnything{}
+ var tmp prInsecureAcceptAnything
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeInsecureAcceptAnything {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRInsecureAcceptAnything()
+ return nil
+}
+
+// newPRReject is NewPRReject, except it returns the private type.
+func newPRReject() *prReject {
+ return &prReject{prCommon{Type: prTypeReject}}
+}
+
+// NewPRReject returns a new "reject" PolicyRequirement.
+func NewPRReject() PolicyRequirement {
+ return newPRReject()
+}
+
+// Compile-time check that prReject implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prReject)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prReject) UnmarshalJSON(data []byte) error {
+ *pr = prReject{}
+ var tmp prReject
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeReject {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRReject()
+ return nil
+}
+
+// newPRSignedBy returns a new prSignedBy if parameters are valid.
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ if !keyType.IsValid() {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
+ }
+ keySources := 0
+ if keyPath != "" {
+ keySources++
+ }
+ if keyPaths != nil {
+ keySources++
+ }
+ if keyData != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified")
+ }
+ if signedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+ return &prSignedBy{
+ prCommon: prCommon{Type: prTypeSignedBy},
+ KeyType: keyType,
+ KeyPath: keyPath,
+ KeyPaths: keyPaths,
+ KeyData: keyData,
+ SignedIdentity: signedIdentity,
+ }, nil
+}
+
+// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
+func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
+func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
+}
+
+// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type.
+func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths
+func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity)
+}
+
+// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
+func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", nil, keyData, signedIdentity)
+}
+
+// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
+func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyData(keyType, keyData, signedIdentity)
+}
+
+// Compile-time check that prSignedBy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBy{}
+ var tmp prSignedBy
+ var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
+ var signedIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyType":
+ return &tmp.KeyType
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyPaths":
+ gotKeyPaths = true
+ return &tmp.KeyPaths
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBy {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var res *prSignedBy
+ var err error
+ switch {
+ case gotKeyPath && !gotKeyPaths && !gotKeyData:
+ res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
+ case !gotKeyPath && gotKeyPaths && !gotKeyData:
+ res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyPaths && gotKeyData:
+ res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyPaths && !gotKeyData:
+ return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present")
+ default:
+ return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present")
+ }
+ if err != nil {
+ return err
+ }
+ *pr = *res
+
+ return nil
+}
+
+// IsValid returns true iff kt is a recognized value
+func (kt sbKeyType) IsValid() bool {
+ switch kt {
+ case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compile-time check that sbKeyType implements json.Unmarshaler.
+var _ json.Unmarshaler = (*sbKeyType)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
+ *kt = sbKeyType("")
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ if !sbKeyType(s).IsValid() {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
+ }
+ *kt = sbKeyType(s)
+ return nil
+}
+
+// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type.
+func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {
+ if baseLayerIdentity == nil {
+ return nil, InvalidPolicyFormatError("baseLayerIdentity not specified")
+ }
+ return &prSignedBaseLayer{
+ prCommon: prCommon{Type: prTypeSignedBaseLayer},
+ BaseLayerIdentity: baseLayerIdentity,
+ }, nil
+}
+
+// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement.
+func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedBaseLayer(baseLayerIdentity)
+}
+
+// Compile-time check that prSignedBaseLayer implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBaseLayer)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBaseLayer{}
+ var tmp prSignedBaseLayer
+ var baseLayerIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ "baseLayerIdentity": &baseLayerIdentity,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBaseLayer {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
+ if err != nil {
+ return err
+ }
+ res, err := newPRSignedBaseLayer(bli)
+ if err != nil {
+ // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid.
+ return err
+ }
+ *pr = *res
+ return nil
+}
+
+// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
+func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
+ var typeField prmCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyReferenceMatch
+ switch typeField.Type {
+ case prmTypeMatchExact:
+ res = &prmMatchExact{}
+ case prmTypeMatchRepoDigestOrExact:
+ res = &prmMatchRepoDigestOrExact{}
+ case prmTypeMatchRepository:
+ res = &prmMatchRepository{}
+ case prmTypeExactReference:
+ res = &prmExactReference{}
+ case prmTypeExactRepository:
+ res = &prmExactRepository{}
+ case prmTypeRemapIdentity:
+ res = &prmRemapIdentity{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRMMatchExact is NewPRMMatchExact, except it returns the private type.
+func newPRMMatchExact() *prmMatchExact {
+ return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
+}
+
+// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch.
+func NewPRMMatchExact() PolicyReferenceMatch {
+ return newPRMMatchExact()
+}
+
+// Compile-time check that prmMatchExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchExact{}
+ var tmp prmMatchExact
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchExact()
+ return nil
+}
+
+// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type.
+func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
+ return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
+}
+
+// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch.
+func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch {
+ return newPRMMatchRepoDigestOrExact()
+}
+
+// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepoDigestOrExact{}
+ var tmp prmMatchRepoDigestOrExact
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepoDigestOrExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepoDigestOrExact()
+ return nil
+}
+
+// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type.
+func newPRMMatchRepository() *prmMatchRepository {
+ return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
+}
+
+// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch.
+func NewPRMMatchRepository() PolicyReferenceMatch {
+ return newPRMMatchRepository()
+}
+
+// Compile-time check that prmMatchRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepository{}
+ var tmp prmMatchRepository
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepository()
+ return nil
+}
+
+// newPRMExactReference is NewPRMExactReference, except it returns the private type.
+func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
+ ref, err := reference.ParseNormalizedNamed(dockerReference)
+ if err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
+ }
+ if reference.IsNameOnly(ref) {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
+ }
+ return &prmExactReference{
+ prmCommon: prmCommon{Type: prmTypeExactReference},
+ DockerReference: dockerReference,
+ }, nil
+}
+
+// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch.
+func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) {
+ return newPRMExactReference(dockerReference)
+}
+
+// Compile-time check that prmExactReference implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactReference)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
+ *prm = prmExactReference{}
+ var tmp prmExactReference
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ "dockerReference": &tmp.DockerReference,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactReference {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactReference(tmp.DockerReference)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
+
+// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
+func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
+ if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
+ }
+ return &prmExactRepository{
+ prmCommon: prmCommon{Type: prmTypeExactRepository},
+ DockerRepository: dockerRepository,
+ }, nil
+}
+
+// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch.
+func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) {
+ return newPRMExactRepository(dockerRepository)
+}
+
+// Compile-time check that prmExactRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmExactRepository{}
+ var tmp prmExactRepository
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ "dockerRepository": &tmp.DockerRepository,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactRepository(tmp.DockerRepository)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
+
+// Private objects for validateIdentityRemappingPrefix
+var (
+ // remapIdentityDomainRegexp matches exactly a reference domain (name[:port])
+ remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
+ // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain;
+ // we need this because reference.NameRegexp accepts short names with docker.io implied.
+ remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/")
+ // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized)
+ remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$")
+)
+
+// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid
+// for the Prefix or SignedPrefix values of prmRemapIdentity.
+// Note that it may not recognize _all_ invalid values.
+func validateIdentityRemappingPrefix(s string) error {
+ if remapIdentityDomainRegexp.MatchString(s) ||
+ (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) {
+ // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference
+ // does not provide an API for the short vs. long name logic.
+ // It will either not match, or fail in the ParseNamed call of
+ // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix.
+ return nil
+ }
+ return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s))
+}
+
+// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type.
+func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) {
+ if err := validateIdentityRemappingPrefix(prefix); err != nil {
+ return nil, err
+ }
+ if err := validateIdentityRemappingPrefix(signedPrefix); err != nil {
+ return nil, err
+ }
+ return &prmRemapIdentity{
+ prmCommon: prmCommon{Type: prmTypeRemapIdentity},
+ Prefix: prefix,
+ SignedPrefix: signedPrefix,
+ }, nil
+}
+
+// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch.
+func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) {
+ return newPRMRemapIdentity(prefix, signedPrefix)
+}
+
+// Compile-time check that prmRemapIdentity implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
+ *prm = prmRemapIdentity{}
+ var tmp prmRemapIdentity
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "type": &tmp.Type,
+ "prefix": &tmp.Prefix,
+ "signedPrefix": &tmp.SignedPrefix,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeRemapIdentity {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
diff --git a/signature/policy_config_sigstore.go b/signature/policy_config_sigstore.go
new file mode 100644
index 0000000..d8c6a97
--- /dev/null
+++ b/signature/policy_config_sigstore.go
@@ -0,0 +1,343 @@
+package signature
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/containers/image/v5/signature/internal"
+)
+
+// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned
+type PRSigstoreSignedOption func(*prSigstoreSigned) error
+
+// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyPath != "" {
+ return errors.New(`"keyPath" already specified`)
+ }
+ pr.KeyPath = keyPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.KeyData != nil {
+ return errors.New(`"keyData" already specified`)
+ }
+ pr.KeyData = keyData
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.Fulcio != nil {
+ return errors.New(`"fulcio" already specified`)
+ }
+ pr.Fulcio = fulcio
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyPath != "" {
+ return errors.New(`"rekorPublicKeyPath" already specified`)
+ }
+ pr.RekorPublicKeyPath = rekorPublicKeyPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.RekorPublicKeyData != nil {
+ return errors.New(`"rekorPublicKeyData" already specified`)
+ }
+ pr.RekorPublicKeyData = rekorPublicKeyData
+ return nil
+ }
+}
+
+// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
+func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
+ return func(pr *prSigstoreSigned) error {
+ if pr.SignedIdentity != nil {
+ return errors.New(`"signedIdentity" already specified`)
+ }
+ pr.SignedIdentity = signedIdentity
+ return nil
+ }
+}
+
+// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type.
+func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) {
+ res := prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ }
+ for _, o := range options {
+ if err := o(&res); err != nil {
+ return nil, err
+ }
+ }
+
+ keySources := 0
+ if res.KeyPath != "" {
+ keySources++
+ }
+ if res.KeyData != nil {
+ keySources++
+ }
+ if res.Fulcio != nil {
+ keySources++
+ }
+ if keySources != 1 {
+ return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified")
+ }
+
+ if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil {
+ return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously")
+ }
+ if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil {
+ return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used")
+ }
+
+ if res.SignedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+
+ return &res, nil
+}
+
+// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options.
+func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) {
+ return newPRSigstoreSigned(options...)
+}
+
+// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
+func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath(keyPath),
+ PRSigstoreSignedWithSignedIdentity(signedIdentity),
+ )
+}
+
+// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
+func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyData(keyData),
+ PRSigstoreSignedWithSignedIdentity(signedIdentity),
+ )
+}
+
+// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
+ *pr = prSigstoreSigned{}
+ var tmp prSigstoreSigned
+ var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
+ var fulcio prSigstoreSignedFulcio
+ var signedIdentity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "fulcio":
+ gotFulcio = true
+ return &fulcio
+ case "rekorPublicKeyPath":
+ gotRekorPublicKeyPath = true
+ return &tmp.RekorPublicKeyPath
+ case "rekorPublicKeyData":
+ gotRekorPublicKeyData = true
+ return &tmp.RekorPublicKeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSigstoreSigned {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var opts []PRSigstoreSignedOption
+ if gotKeyPath {
+ opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
+ }
+ if gotKeyData {
+ opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
+ }
+ if gotFulcio {
+ opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
+ }
+ if gotRekorPublicKeyPath {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
+ }
+ if gotRekorPublicKeyData {
+ opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
+ }
+ opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
+
+ res, err := newPRSigstoreSigned(opts...)
+ if err != nil {
+ return err
+ }
+ *pr = *res
+ return nil
+}
+
+// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio
+type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
+
+// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.CAPath != "" {
+ return errors.New(`"caPath" already specified`)
+ }
+ f.CAPath = caPath
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.CAData != nil {
+ return errors.New(`"caData" already specified`)
+ }
+ f.CAData = caData
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.OIDCIssuer != "" {
+ return errors.New(`"oidcIssuer" already specified`)
+ }
+ f.OIDCIssuer = oidcIssuer
+ return nil
+ }
+}
+
+// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio
+func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
+ return func(f *prSigstoreSignedFulcio) error {
+ if f.SubjectEmail != "" {
+ return errors.New(`"subjectEmail" already specified`)
+ }
+ f.SubjectEmail = subjectEmail
+ return nil
+ }
+}
+
+// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type
+func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) {
+ res := prSigstoreSignedFulcio{}
+ for _, o := range options {
+ if err := o(&res); err != nil {
+ return nil, err
+ }
+ }
+
+ if res.CAPath != "" && res.CAData != nil {
+ return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously")
+ }
+ if res.CAPath == "" && res.CAData == nil {
+ return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified")
+ }
+ if res.OIDCIssuer == "" {
+ return nil, InvalidPolicyFormatError("oidcIssuer not specified")
+ }
+ if res.SubjectEmail == "" {
+ return nil, InvalidPolicyFormatError("subjectEmail not specified")
+ }
+
+ return &res, nil
+}
+
+// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options.
+func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) {
+ return newPRSigstoreSignedFulcio(options...)
+}
+
+// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil)
+
+func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
+ *f = prSigstoreSignedFulcio{}
+ var tmp prSigstoreSignedFulcio
+ var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
+ if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
+ switch key {
+ case "caPath":
+ gotCAPath = true
+ return &tmp.CAPath
+ case "caData":
+ gotCAData = true
+ return &tmp.CAData
+ case "oidcIssuer":
+ gotOIDCIssuer = true
+ return &tmp.OIDCIssuer
+ case "subjectEmail":
+ gotSubjectEmail = true
+ return &tmp.SubjectEmail
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ var opts []PRSigstoreSignedFulcioOption
+ if gotCAPath {
+ opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath))
+ }
+ if gotCAData {
+ opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData))
+ }
+ if gotOIDCIssuer {
+ opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer))
+ }
+ if gotSubjectEmail {
+ opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail))
+ }
+
+ res, err := newPRSigstoreSignedFulcio(opts...)
+ if err != nil {
+ return err
+ }
+
+ *f = *res
+ return nil
+}
diff --git a/signature/policy_config_sigstore_test.go b/signature/policy_config_sigstore_test.go
new file mode 100644
index 0000000..d71ae5f
--- /dev/null
+++ b/signature/policy_config_sigstore_test.go
@@ -0,0 +1,502 @@
+package signature
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// xNewPRSigstoreSigned is like NewPRSigstoreSigned, except it must not fail.
+func xNewPRSigstoreSigned(options ...PRSigstoreSignedOption) PolicyRequirement {
+ pr, err := NewPRSigstoreSigned(options...)
+ if err != nil {
+ panic("xNewPRSigstoreSigned failed")
+ }
+ return pr
+}
+
+func TestNewPRSigstoreSigned(t *testing.T) {
+ const testKeyPath = "/foo/bar"
+ testKeyData := []byte("abc")
+ testFulcio, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ const testRekorKeyPath = "/foo/baz"
+ testRekorKeyData := []byte("def")
+ testIdentity := NewPRMMatchRepoDigestOrExact()
+
+ // Success: combinatoric combinations of key source and Rekor uses
+ for _, c := range []struct {
+ options []PRSigstoreSignedOption
+ requiresRekor bool
+ expected prSigstoreSigned
+ }{
+ {
+ options: []PRSigstoreSignedOption{
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ expected: prSigstoreSigned{
+ prCommon: prCommon{prTypeSigstoreSigned},
+ KeyPath: testKeyPath,
+ KeyData: nil,
+ Fulcio: nil,
+ SignedIdentity: testIdentity,
+ },
+ },
+ {
+ options: []PRSigstoreSignedOption{
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ expected: prSigstoreSigned{
+ prCommon: prCommon{prTypeSigstoreSigned},
+ KeyPath: "",
+ KeyData: testKeyData,
+ Fulcio: nil,
+ SignedIdentity: testIdentity,
+ },
+ },
+ {
+ options: []PRSigstoreSignedOption{
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ requiresRekor: true,
+ expected: prSigstoreSigned{
+ prCommon: prCommon{prTypeSigstoreSigned},
+ KeyPath: "",
+ KeyData: nil,
+ Fulcio: testFulcio,
+ SignedIdentity: testIdentity,
+ },
+ },
+ } {
+ for _, c2 := range []struct {
+ rekorOptions []PRSigstoreSignedOption
+ rekorExpected prSigstoreSigned
+ }{
+ { // No Rekor
+ rekorOptions: []PRSigstoreSignedOption{},
+ rekorExpected: prSigstoreSigned{},
+ },
+ {
+ rekorOptions: []PRSigstoreSignedOption{
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ },
+ rekorExpected: prSigstoreSigned{
+ RekorPublicKeyPath: testRekorKeyPath,
+ },
+ },
+ {
+ rekorOptions: []PRSigstoreSignedOption{
+ PRSigstoreSignedWithRekorPublicKeyData(testRekorKeyData),
+ },
+ rekorExpected: prSigstoreSigned{
+ RekorPublicKeyData: testRekorKeyData,
+ },
+ },
+ } {
+ // Special-case this rejected combination:
+ if c.requiresRekor && len(c2.rekorOptions) == 0 {
+ continue
+ }
+ pr, err := newPRSigstoreSigned(append(c.options, c2.rekorOptions...)...)
+ require.NoError(t, err)
+ expected := c.expected // A shallow copy
+ expected.RekorPublicKeyPath = c2.rekorExpected.RekorPublicKeyPath
+ expected.RekorPublicKeyData = c2.rekorExpected.RekorPublicKeyData
+ assert.Equal(t, &expected, pr)
+ }
+ }
+
+ testFulcio2, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("test-user@example.com"),
+ )
+ require.NoError(t, err)
+ for _, c := range [][]PRSigstoreSignedOption{
+ {}, // None of keyPath nor keyData, fulcio specified
+ { // Both keyPath and keyData specified
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // both keyPath and fulcio specified
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // both keyData and fulcio specified
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Duplicate keyPath
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithKeyPath(testKeyPath + "1"),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Duplicate keyData
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithKeyData([]byte("def")),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Duplicate fulcio
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithFulcio(testFulcio2),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // fulcio without Rekor
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Both rekorKeyPath and rekorKeyData specified
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ PRSigstoreSignedWithRekorPublicKeyData(testRekorKeyData),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Duplicate rekorKeyPath
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorKeyPath + "1"),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Duplicate keyData
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithRekorPublicKeyData(testRekorKeyData),
+ PRSigstoreSignedWithRekorPublicKeyData([]byte("def")),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ },
+ { // Missing signedIdentity
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ },
+ { // Duplicate signedIdentity}
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ PRSigstoreSignedWithSignedIdentity(testIdentity),
+ PRSigstoreSignedWithSignedIdentity(newPRMMatchRepository()),
+ },
+ } {
+ _, err = newPRSigstoreSigned(c...)
+ assert.Error(t, err)
+ }
+}
+
+func TestNewPRSigstoreSignedKeyPath(t *testing.T) {
+ const testPath = "/foo/bar"
+ signedIdentity := NewPRMMatchRepoDigestOrExact()
+ _pr, err := NewPRSigstoreSignedKeyPath(testPath, signedIdentity)
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSigstoreSigned)
+ require.True(t, ok)
+ assert.Equal(t, &prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ KeyPath: testPath,
+ SignedIdentity: NewPRMMatchRepoDigestOrExact(),
+ }, pr)
+}
+
+func TestNewPRSigstoreSignedKeyData(t *testing.T) {
+ testData := []byte("abc")
+ signedIdentity := NewPRMMatchRepoDigestOrExact()
+ _pr, err := NewPRSigstoreSignedKeyData(testData, signedIdentity)
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSigstoreSigned)
+ require.True(t, ok)
+ assert.Equal(t, &prSigstoreSigned{
+ prCommon: prCommon{Type: prTypeSigstoreSigned},
+ KeyData: testData,
+ SignedIdentity: NewPRMMatchRepoDigestOrExact(),
+ }, pr)
+}
+
+// Return the result of modifying validJSON with fn and unmarshaling it into *pr
+func tryUnmarshalModifiedSigstoreSigned(t *testing.T, pr *prSigstoreSigned, validJSON []byte, modifyFn func(mSA)) error {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ modifyFn(tmp)
+
+ *pr = prSigstoreSigned{}
+ return jsonUnmarshalFromObject(t, tmp, &pr)
+}
+
+func TestPRSigstoreSignedUnmarshalJSON(t *testing.T) {
+ keyDataTests := policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSigned{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSigstoreSignedKeyData([]byte("abc"), NewPRMMatchRepoDigestOrExact())
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // All of "keyPath" and "keyData", and "fulcio" is missing
+ func(v mSA) { delete(v, "keyData") },
+ // Both "keyPath" and "keyData" is present
+ func(v mSA) { v["keyPath"] = "/foo/bar" },
+ // Both "keyData" and "fulcio" is present
+ func(v mSA) {
+ v["fulcio"] = mSA{
+ "caPath": "/foo/baz",
+ "oidcIssuer": "https://example.com",
+ "subjectEmail": "test@example.com",
+ }
+ },
+ // Invalid "keyPath" field
+ func(v mSA) { delete(v, "keyData"); v["keyPath"] = 1 },
+ // Invalid "keyData" field
+ func(v mSA) { v["keyData"] = 1 },
+ func(v mSA) { v["keyData"] = "this is invalid base64" },
+ // Invalid "fulcio" field
+ func(v mSA) { v["fulcio"] = 1 },
+ func(v mSA) { v["fulcio"] = mSA{} },
+ // "fulcio" is explicit nil
+ func(v mSA) { v["fulcio"] = nil },
+ // Both "rekorKeyPath" and "rekorKeyData" is present
+ func(v mSA) {
+ v["rekorPublicKeyPath"] = "/foo/baz"
+ v["rekorPublicKeyData"] = ""
+ },
+ // Invalid "rekorPublicKeyPath" field
+ func(v mSA) { v["rekorPublicKeyPath"] = 1 },
+ // Invalid "rekorPublicKeyData" field
+ func(v mSA) { v["rekorPublicKeyData"] = 1 },
+ func(v mSA) { v["rekorPublicKeyData"] = "this is invalid base64" },
+ // Invalid "signedIdentity" field
+ func(v mSA) { v["signedIdentity"] = "this is invalid" },
+ // "signedIdentity" an explicit nil
+ func(v mSA) { v["signedIdentity"] = nil },
+ },
+ duplicateFields: []string{"type", "keyData", "signedIdentity"},
+ }
+ keyDataTests.run(t)
+ // Test keyPath-specific duplicate fields
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSigned{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSigstoreSignedKeyPath("/foo/bar", NewPRMMatchRepoDigestOrExact())
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ duplicateFields: []string{"type", "keyPath", "signedIdentity"},
+ }.run(t)
+ // Test Fulcio and rekorPublicKeyPath duplicate fields
+ testFulcio, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSigned{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithRekorPublicKeyPath("/foo/rekor"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchRepoDigestOrExact()),
+ )
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ duplicateFields: []string{"type", "fulcio", "rekorPublicKeyPath", "signedIdentity"},
+ }.run(t)
+ // Test rekorPublicKeyData duplicate fields
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSigned{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("/foo/bar"),
+ PRSigstoreSignedWithRekorPublicKeyData([]byte("foo")),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchRepoDigestOrExact()),
+ )
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ duplicateFields: []string{"type", "keyPath", "rekorPublicKeyData", "signedIdentity"},
+ }.run(t)
+
+ var pr prSigstoreSigned
+
+ // Start with a valid JSON.
+ _, validJSON := keyDataTests.validObjectAndJSON(t)
+
+ // Various allowed modifications to the requirement
+ allowedModificationFns := []func(mSA){
+ // Delete the signedIdentity field
+ func(v mSA) { delete(v, "signedIdentity") },
+ }
+ for _, fn := range allowedModificationFns {
+ err := tryUnmarshalModifiedSigstoreSigned(t, &pr, validJSON, fn)
+ require.NoError(t, err)
+ }
+
+ // Various ways to set signedIdentity to the default value
+ signedIdentityDefaultFns := []func(mSA){
+ // Set signedIdentity to the default explicitly
+ func(v mSA) { v["signedIdentity"] = NewPRMMatchRepoDigestOrExact() },
+ // Delete the signedIdentity field
+ func(v mSA) { delete(v, "signedIdentity") },
+ }
+ for _, fn := range signedIdentityDefaultFns {
+ err := tryUnmarshalModifiedSigstoreSigned(t, &pr, validJSON, fn)
+ require.NoError(t, err)
+ assert.Equal(t, NewPRMMatchRepoDigestOrExact(), pr.SignedIdentity)
+ }
+}
+
+func TestNewPRSigstoreSignedFulcio(t *testing.T) {
+ const testCAPath = "/foo/bar"
+ testCAData := []byte("abc")
+ const testOIDCIssuer = "https://example.com"
+ const testSubjectEmail = "test@example.com"
+
+ // Success:
+ for _, c := range []struct {
+ options []PRSigstoreSignedFulcioOption
+ expected prSigstoreSignedFulcio
+ }{
+ {
+ options: []PRSigstoreSignedFulcioOption{
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ expected: prSigstoreSignedFulcio{
+ CAPath: testCAPath,
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ },
+ {
+ options: []PRSigstoreSignedFulcioOption{
+ PRSigstoreSignedFulcioWithCAData(testCAData),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ expected: prSigstoreSignedFulcio{
+ CAData: testCAData,
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ },
+ } {
+ pr, err := newPRSigstoreSignedFulcio(c.options...)
+ require.NoError(t, err)
+ assert.Equal(t, &c.expected, pr)
+ }
+
+ for _, c := range [][]PRSigstoreSignedFulcioOption{
+ { // Neither caPath nor caData specified
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Both caPath and caData specified
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithCAData(testCAData),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Duplicate caPath
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithCAPath(testCAPath + "1"),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Duplicate caData
+ PRSigstoreSignedFulcioWithCAData(testCAData),
+ PRSigstoreSignedFulcioWithCAData([]byte("def")),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Missing oidcIssuer
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Duplicate oidcIssuer
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer + "1"),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ { // Missing subjectEmail
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ },
+ { // Duplicate subjectEmail
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ PRSigstoreSignedFulcioWithSubjectEmail("1" + testSubjectEmail),
+ },
+ } {
+ _, err := newPRSigstoreSignedFulcio(c...)
+ logrus.Errorf("%#v", err)
+ assert.Error(t, err)
+ }
+}
+
+func TestPRSigstoreSignedFulcioUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PRSigstoreSignedFulcio]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSignedFulcio{} },
+ newValidObject: func() (PRSigstoreSignedFulcio, error) {
+ return NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ },
+ otherJSONParser: nil,
+ breakFns: []func(mSA){
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // Both of "caPath" and "caData" are missing
+ func(v mSA) { delete(v, "caPath") },
+ // Both "caPath" and "caData" is present
+ func(v mSA) { v["caData"] = "" },
+ // Invalid "caPath" field
+ func(v mSA) { v["caPath"] = 1 },
+ // Invalid "oidcIssuer" field
+ func(v mSA) { v["oidcIssuer"] = 1 },
+ // "oidcIssuer" is missing
+ func(v mSA) { delete(v, "oidcIssuer") },
+ // Invalid "subjectEmail" field
+ func(v mSA) { v["subjectEmail"] = 1 },
+ // "subjectEmail" is missing
+ func(v mSA) { delete(v, "subjectEmail") },
+ },
+ duplicateFields: []string{"caPath", "oidcIssuer", "subjectEmail"},
+ }.run(t)
+ // Test caData specifics
+ policyJSONUmarshallerTests[PRSigstoreSignedFulcio]{
+ newDest: func() json.Unmarshaler { return &prSigstoreSignedFulcio{} },
+ newValidObject: func() (PRSigstoreSignedFulcio, error) {
+ return NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAData([]byte("abc")),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ },
+ otherJSONParser: nil,
+ breakFns: []func(mSA){
+ // Invalid "caData" field
+ func(v mSA) { v["caData"] = 1 },
+ func(v mSA) { v["caData"] = "this is invalid base64" },
+ },
+ duplicateFields: []string{"caData", "oidcIssuer", "subjectEmail"},
+ }.run(t)
+}
diff --git a/signature/policy_config_test.go b/signature/policy_config_test.go
new file mode 100644
index 0000000..1af6dc4
--- /dev/null
+++ b/signature/policy_config_test.go
@@ -0,0 +1,1341 @@
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker"
+ "golang.org/x/exp/maps"
+
+ // this import is needed where we use the "atomic" transport in TestPolicyUnmarshalJSON
+ _ "github.com/containers/image/v5/openshift"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mSA map[string]any // To minimize typing the long name
+
+// A short-hand way to get a JSON object field value or panic. No error handling done, we know
+// what we are working with, a panic in a test is good enough, and fitting test cases on a single line
+// is a priority.
+func x(m mSA, fields ...string) mSA {
+ for _, field := range fields {
+ // Not .(mSA) because type assertion of an unnamed type to a named type always fails (the types
+ // are not "identical"), but the assignment is fine because they are "assignable".
+ m = m[field].(map[string]any)
+ }
+ return m
+}
+
+// policyFixtureContents is a data structure equal to the contents of "fixtures/policy.json"
+var policyFixtureContents = &Policy{
+ Default: PolicyRequirements{NewPRReject()},
+ Transports: map[string]PolicyTransportScopes{
+ "dir": {
+ "": PolicyRequirements{NewPRInsecureAcceptAnything()},
+ },
+ "docker": {
+ "example.com/playground": {
+ NewPRInsecureAcceptAnything(),
+ },
+ "example.com/production": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys,
+ "/keys/employee-gpg-keyring",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "example.com/hardened": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys,
+ "/keys/employee-gpg-keyring",
+ NewPRMMatchRepository()),
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys,
+ "/keys/public-key-signing-gpg-keyring",
+ NewPRMMatchExact()),
+ xNewPRSignedBaseLayer(xNewPRMExactRepository("registry.access.redhat.com/rhel7/rhel")),
+ },
+ "example.com/hardened-x509": {
+ xNewPRSignedByKeyPath(SBKeyTypeX509Certificates,
+ "/keys/employee-cert-file",
+ NewPRMMatchRepository()),
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByX509CAs,
+ "/keys/public-key-signing-ca-file",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "registry.access.redhat.com": {
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys,
+ "/keys/RH-key-signing-key-gpg-keyring",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "registry.redhat.io/beta": {
+ xNewPRSignedByKeyPaths(SBKeyTypeGPGKeys,
+ []string{"/keys/RH-production-signing-key-gpg-keyring", "/keys/RH-beta-signing-key-gpg-keyring"},
+ newPRMMatchRepoDigestOrExact()),
+ },
+ "private-mirror:5000/vendor-mirror": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys,
+ "/keys/vendor-gpg-keyring",
+ xNewPRMRemapIdentity("private-mirror:5000/vendor-mirror", "vendor.example.com")),
+ },
+ "*.access.redhat.com": {
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys,
+ "/keys/RH-key-signing-key-gpg-keyring",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "*.redhat.com": {
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys,
+ "/keys/RH-key-signing-key-gpg-keyring",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "*.com": {
+ xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys,
+ "/keys/RH-key-signing-key-gpg-keyring",
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "bogus/key-data-example": {
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys,
+ []byte("nonsense"),
+ NewPRMMatchRepoDigestOrExact()),
+ },
+ "bogus/signed-identity-example": {
+ xNewPRSignedBaseLayer(xNewPRMExactReference("registry.access.redhat.com/rhel7/rhel:latest")),
+ },
+ "example.com/sigstore/key-data-example": {
+ xNewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyData([]byte("nonsense")),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchRepoDigestOrExact()),
+ ),
+ },
+ "example.com/sigstore/key-path-example": {
+ xNewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("/keys/public-key"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchRepository()),
+ ),
+ },
+ },
+ },
+}
+
+func TestInvalidPolicyFormatError(t *testing.T) {
+ // A stupid test just to keep code coverage
+ s := "test"
+ err := InvalidPolicyFormatError(s)
+ assert.Equal(t, s, err.Error())
+}
+
+func TestDefaultPolicy(t *testing.T) {
+ // We can't test the actual systemDefaultPolicyPath, so override.
+ // TestDefaultPolicyPath below tests that we handle the overrides and defaults
+ // correctly.
+
+ // Success
+ policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: "./fixtures/policy.json"})
+ require.NoError(t, err)
+ assert.Equal(t, policyFixtureContents, policy)
+
+ for _, path := range []string{
+ "/this/does/not/exist", // Error reading file
+ "/dev/null", // A failure case; most are tested in the individual method unit tests.
+ } {
+ policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
+ assert.Error(t, err)
+ assert.Nil(t, policy)
+ }
+}
+
+func TestDefaultPolicyPath(t *testing.T) {
+ const nondefaultPath = "/this/is/not/the/default/path.json"
+ const variableReference = "$HOME"
+ const rootPrefix = "/root/prefix"
+ tempHome := t.TempDir()
+ userDefaultPolicyPath := filepath.Join(tempHome, userPolicyFile)
+
+ for _, c := range []struct {
+ sys *types.SystemContext
+ userfilePresent bool
+ expected string
+ }{
+ // The common case
+ {nil, false, systemDefaultPolicyPath},
+ // There is a context, but it does not override the path.
+ {&types.SystemContext{}, false, systemDefaultPolicyPath},
+ // Path overridden
+ {&types.SystemContext{SignaturePolicyPath: nondefaultPath}, false, nondefaultPath},
+ // Root overridden
+ {
+ &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix},
+ false,
+ filepath.Join(rootPrefix, systemDefaultPolicyPath),
+ },
+ // Empty context and user policy present
+ {&types.SystemContext{}, true, userDefaultPolicyPath},
+ // Only user policy present
+ {nil, true, userDefaultPolicyPath},
+ // Context signature path and user policy present
+ {
+ &types.SystemContext{
+ SignaturePolicyPath: nondefaultPath,
+ },
+ true,
+ nondefaultPath,
+ },
+ // Root and user policy present
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ },
+ true,
+ userDefaultPolicyPath,
+ },
+ // Context and user policy file preset simultaneously
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ SignaturePolicyPath: nondefaultPath,
+ },
+ true,
+ nondefaultPath,
+ },
+ // Root and path overrides present simultaneously,
+ {
+ &types.SystemContext{
+ RootForImplicitAbsolutePaths: rootPrefix,
+ SignaturePolicyPath: nondefaultPath,
+ },
+ false,
+ nondefaultPath,
+ },
+ // No environment expansion happens in the overridden paths
+ {&types.SystemContext{SignaturePolicyPath: variableReference}, false, variableReference},
+ } {
+ if c.userfilePresent {
+ err := os.MkdirAll(filepath.Dir(userDefaultPolicyPath), os.ModePerm)
+ require.NoError(t, err)
+ f, err := os.Create(userDefaultPolicyPath)
+ require.NoError(t, err)
+ f.Close()
+ } else {
+ os.Remove(userDefaultPolicyPath)
+ }
+ path := defaultPolicyPathWithHomeDir(c.sys, tempHome)
+ assert.Equal(t, c.expected, path)
+ }
+}
+
+func TestNewPolicyFromFile(t *testing.T) {
+ // Success
+ policy, err := NewPolicyFromFile("./fixtures/policy.json")
+ require.NoError(t, err)
+ assert.Equal(t, policyFixtureContents, policy)
+
+ // Error reading file
+ _, err = NewPolicyFromFile("/this/does/not/exist")
+ assert.Error(t, err)
+
+ // A failure case; most are tested in the individual method unit tests.
+ _, err = NewPolicyFromFile("/dev/null")
+ require.Error(t, err)
+ var formatError InvalidPolicyFormatError
+ assert.ErrorAs(t, err, &formatError)
+}
+
+func TestNewPolicyFromBytes(t *testing.T) {
+ // Success
+ bytes, err := os.ReadFile("./fixtures/policy.json")
+ require.NoError(t, err)
+ policy, err := NewPolicyFromBytes(bytes)
+ require.NoError(t, err)
+ assert.Equal(t, policyFixtureContents, policy)
+
+ // A failure case; most are tested in the individual method unit tests.
+ _, err = NewPolicyFromBytes([]byte(""))
+ require.Error(t, err)
+ assert.IsType(t, InvalidPolicyFormatError(""), err)
+}
+
+// FIXME? There is quite a bit of duplication below. Factor some of it out?
+
+// jsonUnmarshalFromObject is like json.Unmarshal(), but the input is an arbitrary object
+// that is JSON-marshalled first (as a convenient way to create an invalid/unusual JSON input)
+func jsonUnmarshalFromObject(t *testing.T, object any, dest any) error {
+ testJSON, err := json.Marshal(object)
+ require.NoError(t, err)
+ return json.Unmarshal(testJSON, dest)
+}
+
+// assertJSONUnmarshalFromObjectFails checks that unmarshaling the JSON-marshaled version
+// of an arbitrary object (as a convenient way to create an invalid/unusual JSON input) into
+// dest fails.
+func assertJSONUnmarshalFromObjectFails(t *testing.T, object any, dest any) {
+ err := jsonUnmarshalFromObject(t, object, dest)
+ assert.Error(t, err)
+}
+
+// testInvalidJSONInput verifies that obviously invalid input is rejected for dest.
+func testInvalidJSONInput(t *testing.T, dest json.Unmarshaler) {
+ // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
+ // UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
+ err := json.Unmarshal([]byte("&"), dest)
+ assert.Error(t, err)
+ err = dest.UnmarshalJSON([]byte("&"))
+ assert.Error(t, err)
+
+ // Not an object/array/string
+ err = json.Unmarshal([]byte("1"), dest)
+ assert.Error(t, err)
+}
+
+// addExtraJSONMember adds an additional member "$name": $extra,
+// possibly with a duplicate name, to encoded.
+// Errors, if any, are reported through t.
+func addExtraJSONMember(t *testing.T, encoded []byte, name string, extra any) []byte {
+ extraJSON, err := json.Marshal(extra)
+ require.NoError(t, err)
+
+ require.True(t, bytes.HasSuffix(encoded, []byte("}")))
+ preservedLen := len(encoded) - 1
+
+ res := bytes.Join([][]byte{encoded[:preservedLen], []byte(`,"`), []byte(name), []byte(`":`), extraJSON, []byte("}")}, nil)
+ // Verify that the result is valid JSON, as a sanity check that we are actually triggering
+ // the “duplicate member” case in the caller.
+ var raw map[string]any
+ err = json.Unmarshal(res, &raw)
+ require.NoError(t, err)
+ return res
+}
+
+// policyJSONUnmarshallerTests formalizes the repeated structure of the JSON unmarshaller
+// tests in this file, and allows sharing the test implementation.
+type policyJSONUmarshallerTests[T any] struct {
+ newDest func() json.Unmarshaler // Create a new json.Unmarshaler to test against
+ newValidObject func() (T, error) // A function that generates a valid object, used as a base for other tests
+ otherJSONParser func([]byte) (T, error) // Another function that must accept the result of encoding validObject
+ invalidObjects []mSA // mSA values that are invalid for this unmarshaller; a simpler alternative to breakFns
+ breakFns []func(mSA) // Functions that edit a mSA from newValidObject() to make it invalid
+ duplicateFields []string // Names of fields in the return value of newValidObject() that should not be duplicated
+}
+
+// validObjectAndJSON returns an object created by d.newValidObject() and its JSON representation.
+func (d policyJSONUmarshallerTests[T]) validObjectAndJSON(t *testing.T) (T, []byte) {
+ validObject, err := d.newValidObject()
+ require.NoError(t, err)
+ validJSON, err := json.Marshal(validObject)
+ require.NoError(t, err)
+ return validObject, validJSON
+}
+
+func (d policyJSONUmarshallerTests[T]) run(t *testing.T) {
+ dest := d.newDest()
+ testInvalidJSONInput(t, dest)
+
+ validObject, validJSON := d.validObjectAndJSON(t)
+
+ // Success
+ dest = d.newDest()
+ err := json.Unmarshal(validJSON, dest)
+ require.NoError(t, err)
+ assert.Equal(t, validObject, dest)
+
+ // otherJSONParser recognizes this data
+ if d.otherJSONParser != nil {
+ other, err := d.otherJSONParser(validJSON)
+ require.NoError(t, err)
+ assert.Equal(t, validObject, other)
+ }
+
+ // Invalid JSON objects
+ for _, invalid := range d.invalidObjects {
+ dest := d.newDest()
+ assertJSONUnmarshalFromObjectFails(t, invalid, dest)
+ }
+ // Various ways to corrupt the JSON
+ for index, fn := range d.breakFns {
+ t.Run(fmt.Sprintf("breakFns[%d]", index), func(t *testing.T) {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ fn(tmp)
+
+ dest := d.newDest()
+ assertJSONUnmarshalFromObjectFails(t, tmp, dest)
+ })
+ }
+
+ // Duplicated fields
+ for _, field := range d.duplicateFields {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ testJSON := addExtraJSONMember(t, validJSON, field, tmp[field])
+
+ dest := d.newDest()
+ err = json.Unmarshal(testJSON, dest)
+ assert.Error(t, err)
+ }
+}
+
+// xNewPRSignedByKeyPath is like NewPRSignedByKeyPath, except it must not fail.
+func xNewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) PolicyRequirement {
+ pr, err := NewPRSignedByKeyPath(keyType, keyPath, signedIdentity)
+ if err != nil {
+ panic("xNewPRSignedByKeyPath failed")
+ }
+ return pr
+}
+
+// xNewPRSignedByKeyPaths is like NewPRSignedByKeyPaths, except it must not fail.
+func xNewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) PolicyRequirement {
+ pr, err := NewPRSignedByKeyPaths(keyType, keyPaths, signedIdentity)
+ if err != nil {
+ panic("xNewPRSignedByKeyPaths failed")
+ }
+ return pr
+}
+
+// xNewPRSignedByKeyData is like NewPRSignedByKeyData, except it must not fail.
+func xNewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) PolicyRequirement {
+ pr, err := NewPRSignedByKeyData(keyType, keyData, signedIdentity)
+ if err != nil {
+ panic("xNewPRSignedByKeyData failed")
+ }
+ return pr
+}
+
+func TestPolicyUnmarshalJSON(t *testing.T) {
+ tests := policyJSONUmarshallerTests[*Policy]{
+ newDest: func() json.Unmarshaler { return &Policy{} },
+ newValidObject: func() (*Policy, error) {
+ return &Policy{
+ Default: []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact()),
+ },
+ Transports: map[string]PolicyTransportScopes{
+ "docker": {
+ "docker.io/library/busybox": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()),
+ },
+ "registry.access.redhat.com": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()),
+ },
+ },
+ "atomic": {
+ "registry.access.redhat.com/rhel7": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()),
+ },
+ },
+ "unknown": {
+ "registry.access.redhat.com/rhel7": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()),
+ },
+ },
+ },
+ }, nil
+ },
+ otherJSONParser: nil,
+ breakFns: []func(mSA){
+ // The "default" field is missing
+ func(v mSA) { delete(v, "default") },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // "default" not an array
+ func(v mSA) { v["default"] = 1 },
+ func(v mSA) { v["default"] = mSA{} },
+ // "transports" not an object
+ func(v mSA) { v["transports"] = 1 },
+ func(v mSA) { v["transports"] = []string{} },
+ // "default" is an invalid PolicyRequirements
+ func(v mSA) { v["default"] = PolicyRequirements{} },
+ },
+ duplicateFields: []string{"default", "transports"},
+ }
+ tests.run(t)
+
+ // Various allowed modifications to the policy
+ _, validJSON := tests.validObjectAndJSON(t)
+ allowedModificationFns := []func(mSA){
+ // Delete the map of transport-specific scopes
+ func(v mSA) { delete(v, "transports") },
+ // Use an empty map of transport-specific scopes
+ func(v mSA) { v["transports"] = map[string]PolicyTransportScopes{} },
+ }
+ for _, fn := range allowedModificationFns {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ fn(tmp)
+
+ p := Policy{}
+ err = jsonUnmarshalFromObject(t, tmp, &p)
+ assert.NoError(t, err)
+ }
+}
+
+func TestPolicyTransportScopesUnmarshalJSON(t *testing.T) {
+ // Start with a valid JSON.
+ validPTS := PolicyTransportScopes{
+ "": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()),
+ },
+ }
+
+ // Nothing can be unmarshaled directly into PolicyTransportScopes
+ pts := PolicyTransportScopes{}
+ assertJSONUnmarshalFromObjectFails(t, validPTS, &pts)
+}
+
+// Return the result of modifying validJSON with fn and unmarshaling it into *pts
+// using transport.
+func tryUnmarshalModifiedPTS(t *testing.T, pts *PolicyTransportScopes, transport types.ImageTransport,
+ validJSON []byte, modifyFn func(mSA)) error {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ modifyFn(tmp)
+
+ *pts = PolicyTransportScopes{}
+ dest := policyTransportScopesWithTransport{
+ transport: transport,
+ dest: pts,
+ }
+ return jsonUnmarshalFromObject(t, tmp, &dest)
+}
+
+func TestPolicyTransportScopesWithTransportUnmarshalJSON(t *testing.T) {
+ var pts PolicyTransportScopes
+
+ dest := policyTransportScopesWithTransport{
+ transport: docker.Transport,
+ dest: &pts,
+ }
+ testInvalidJSONInput(t, &dest)
+
+ // Start with a valid JSON.
+ validPTS := PolicyTransportScopes{
+ "docker.io/library/busybox": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()),
+ },
+ "registry.access.redhat.com": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()),
+ },
+ "": []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()),
+ },
+ }
+ validJSON, err := json.Marshal(validPTS)
+ require.NoError(t, err)
+
+ // Success
+ pts = PolicyTransportScopes{}
+ dest = policyTransportScopesWithTransport{
+ transport: docker.Transport,
+ dest: &pts,
+ }
+ err = json.Unmarshal(validJSON, &dest)
+ require.NoError(t, err)
+ assert.Equal(t, validPTS, pts)
+
+ // Various ways to corrupt the JSON
+ breakFns := []func(mSA){
+ // A scope is not an array
+ func(v mSA) { v["docker.io/library/busybox"] = 1 },
+ func(v mSA) { v["docker.io/library/busybox"] = mSA{} },
+ func(v mSA) { v[""] = 1 },
+ func(v mSA) { v[""] = mSA{} },
+ // A scope is an invalid PolicyRequirements
+ func(v mSA) { v["docker.io/library/busybox"] = PolicyRequirements{} },
+ func(v mSA) { v[""] = PolicyRequirements{} },
+ }
+ for _, fn := range breakFns {
+ err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn)
+ assert.Error(t, err)
+ }
+
+ // Duplicated fields
+ for _, field := range []string{"docker.io/library/busybox", ""} {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ testJSON := addExtraJSONMember(t, validJSON, field, tmp[field])
+
+ pts = PolicyTransportScopes{}
+ dest := policyTransportScopesWithTransport{
+ transport: docker.Transport,
+ dest: &pts,
+ }
+ err = json.Unmarshal(testJSON, &dest)
+ assert.Error(t, err)
+ }
+
+ // Scope rejected by transport the Docker scopes we use as valid are rejected by directory.Transport
+ // as relative paths.
+ err = tryUnmarshalModifiedPTS(t, &pts, directory.Transport, validJSON,
+ func(v mSA) {})
+ assert.Error(t, err)
+
+ // Various allowed modifications to the policy
+ allowedModificationFns := []func(mSA){
+ // The "" scope is missing
+ func(v mSA) { delete(v, "") },
+ // The policy is completely empty
+ func(v mSA) { maps.Clear(v) },
+ }
+ for _, fn := range allowedModificationFns {
+ err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn)
+ require.NoError(t, err)
+ }
+}
+
+func TestPolicyRequirementsUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[*PolicyRequirements]{
+ newDest: func() json.Unmarshaler { return &PolicyRequirements{} },
+ newValidObject: func() (*PolicyRequirements, error) {
+ return &PolicyRequirements{
+ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()),
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()),
+ }, nil
+ },
+ otherJSONParser: nil,
+ }.run(t)
+
+ // This would be inconvenient to integrate into policyJSONUnmarshallerTests.invalidObjects
+ // because all other users are easier to express as mSA.
+ for _, invalid := range [][]any{
+ // No requirements
+ {},
+ // A member is not an object
+ {1},
+ // A member has an invalid type
+ {prSignedBy{prCommon: prCommon{Type: "this is invalid"}}},
+ // A member has a valid type but invalid contents
+ {prSignedBy{
+ prCommon: prCommon{Type: prTypeSignedBy},
+ KeyType: "this is invalid",
+ }},
+ } {
+ reqs := PolicyRequirements{}
+ assertJSONUnmarshalFromObjectFails(t, invalid, &reqs)
+ }
+}
+
+func TestNewPolicyRequirementFromJSON(t *testing.T) {
+ // Sample success. Others tested in the individual PolicyRequirement.UnmarshalJSON implementations.
+ validReq := NewPRInsecureAcceptAnything()
+ validJSON, err := json.Marshal(validReq)
+ require.NoError(t, err)
+ req, err := newPolicyRequirementFromJSON(validJSON)
+ require.NoError(t, err)
+ assert.Equal(t, validReq, req)
+
+ // Invalid
+ for _, invalid := range []any{
+ // Not an object
+ 1,
+ // Missing type
+ prCommon{},
+ // Invalid type
+ prCommon{Type: "this is invalid"},
+ // Valid type but invalid contents
+ prSignedBy{
+ prCommon: prCommon{Type: prTypeSignedBy},
+ KeyType: "this is invalid",
+ },
+ } {
+ testJSON, err := json.Marshal(invalid)
+ require.NoError(t, err)
+
+ _, err = newPolicyRequirementFromJSON(testJSON)
+ assert.Error(t, err, string(testJSON))
+ }
+}
+
+func TestNewPRInsecureAcceptAnything(t *testing.T) {
+ _pr := NewPRInsecureAcceptAnything()
+ pr, ok := _pr.(*prInsecureAcceptAnything)
+ require.True(t, ok)
+ assert.Equal(t, &prInsecureAcceptAnything{prCommon{prTypeInsecureAcceptAnything}}, pr)
+}
+
+func TestPRInsecureAcceptAnythingUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prInsecureAcceptAnything{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRInsecureAcceptAnything(), nil
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ invalidObjects: []mSA{
+ // Missing "type" field
+ {},
+ // Wrong "type" field
+ {"type": 1},
+ {"type": "this is invalid"},
+ // Extra fields
+ {
+ "type": string(prTypeInsecureAcceptAnything),
+ "unknown": "foo",
+ },
+ },
+ duplicateFields: []string{"type"},
+ }.run(t)
+}
+
+func TestNewPRReject(t *testing.T) {
+ _pr := NewPRReject()
+ pr, ok := _pr.(*prReject)
+ require.True(t, ok)
+ assert.Equal(t, &prReject{prCommon{prTypeReject}}, pr)
+}
+
+func TestPRRejectUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prReject{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRReject(), nil
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ invalidObjects: []mSA{
+ // Missing "type" field
+ {},
+ // Wrong "type" field
+ {"type": 1},
+ {"type": "this is invalid"},
+ // Extra fields
+ {
+ "type": string(prTypeReject),
+ "unknown": "foo",
+ },
+ },
+ duplicateFields: []string{"type"},
+ }.run(t)
+}
+
+func TestNewPRSignedBy(t *testing.T) {
+ const testPath = "/foo/bar"
+ testPaths := []string{"/path/1", "/path/2"}
+ testData := []byte("abc")
+ testIdentity := NewPRMMatchRepoDigestOrExact()
+
+ // Success
+ pr, err := newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, nil, testIdentity)
+ require.NoError(t, err)
+ assert.Equal(t, &prSignedBy{
+ prCommon: prCommon{prTypeSignedBy},
+ KeyType: SBKeyTypeGPGKeys,
+ KeyPath: testPath,
+ KeyPaths: nil,
+ KeyData: nil,
+ SignedIdentity: testIdentity,
+ }, pr)
+ pr, err = newPRSignedBy(SBKeyTypeGPGKeys, "", testPaths, nil, testIdentity)
+ require.NoError(t, err)
+ assert.Equal(t, &prSignedBy{
+ prCommon: prCommon{prTypeSignedBy},
+ KeyType: SBKeyTypeGPGKeys,
+ KeyPath: "",
+ KeyPaths: testPaths,
+ KeyData: nil,
+ SignedIdentity: testIdentity,
+ }, pr)
+ pr, err = newPRSignedBy(SBKeyTypeGPGKeys, "", nil, testData, testIdentity)
+ require.NoError(t, err)
+ assert.Equal(t, &prSignedBy{
+ prCommon: prCommon{prTypeSignedBy},
+ KeyType: SBKeyTypeGPGKeys,
+ KeyPath: "",
+ KeyPaths: nil,
+ KeyData: testData,
+ SignedIdentity: testIdentity,
+ }, pr)
+
+ // Invalid keyType
+ _, err = newPRSignedBy(sbKeyType(""), testPath, nil, nil, testIdentity)
+ assert.Error(t, err)
+ _, err = newPRSignedBy(sbKeyType("this is invalid"), testPath, nil, nil, testIdentity)
+ assert.Error(t, err)
+
+ // Invalid keyPath/keyPaths/keyData combinations
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, testPaths, testData, testIdentity)
+ assert.Error(t, err)
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, testPaths, nil, testIdentity)
+ assert.Error(t, err)
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, testData, testIdentity)
+ assert.Error(t, err)
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, "", testPaths, testData, testIdentity)
+ assert.Error(t, err)
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, "", nil, nil, testIdentity)
+ assert.Error(t, err)
+
+ // Invalid signedIdentity
+ _, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, nil, nil)
+ assert.Error(t, err)
+}
+
+func TestNewPRSignedByKeyPath(t *testing.T) {
+ const testPath = "/foo/bar"
+ _pr, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, testPath, NewPRMMatchRepoDigestOrExact())
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSignedBy)
+ require.True(t, ok)
+ assert.Equal(t, testPath, pr.KeyPath)
+ // Failure cases tested in TestNewPRSignedBy.
+}
+
+func TestNewPRSignedByKeyPaths(t *testing.T) {
+ testPaths := []string{"/path/1", "/path/2"}
+ _pr, err := NewPRSignedByKeyPaths(SBKeyTypeGPGKeys, testPaths, NewPRMMatchRepoDigestOrExact())
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSignedBy)
+ require.True(t, ok)
+ assert.Equal(t, testPaths, pr.KeyPaths)
+ // Failure cases tested in TestNewPRSignedBy.
+}
+
+func TestNewPRSignedByKeyData(t *testing.T) {
+ testData := []byte("abc")
+ _pr, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, testData, NewPRMMatchRepoDigestOrExact())
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSignedBy)
+ require.True(t, ok)
+ assert.Equal(t, testData, pr.KeyData)
+ // Failure cases tested in TestNewPRSignedBy.
+}
+
+// Return the result of modifying validJSON with fn and unmarshaling it into *pr
+func tryUnmarshalModifiedSignedBy(t *testing.T, pr *prSignedBy, validJSON []byte, modifyFn func(mSA)) error {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ modifyFn(tmp)
+
+ *pr = prSignedBy{}
+ return jsonUnmarshalFromObject(t, tmp, &pr)
+}
+
+func TestPRSignedByUnmarshalJSON(t *testing.T) {
+ keyDataTests := policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSignedBy{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact())
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // The "keyType" field is missing
+ func(v mSA) { delete(v, "keyType") },
+ // Invalid "keyType" field
+ func(v mSA) { v["keyType"] = "this is invalid" },
+ // All three of "keyPath", "keyPaths" and "keyData" are missing
+ func(v mSA) { delete(v, "keyData") },
+ // All three of "keyPath", "keyPaths" and "keyData" are present
+ func(v mSA) { v["keyPath"] = "/foo/bar"; v["keyPaths"] = []string{"/1", "/2"} },
+ // Two of "keyPath", "keyPaths" and "keyData" are present
+ func(v mSA) { v["keyPath"] = "/foo/bar"; v["keyPaths"] = []string{"/1", "/2"}; delete(v, "keyData") },
+ func(v mSA) { v["keyPath"] = "/foo/bar" },
+ func(v mSA) { v["keyPaths"] = []string{"/1", "/2"} },
+ // Invalid "keyPath" field
+ func(v mSA) { delete(v, "keyData"); v["keyPath"] = 1 },
+ // Invalid "keyPaths" field
+ func(v mSA) { delete(v, "keyData"); v["keyPaths"] = 1 },
+ func(v mSA) { delete(v, "keyData"); v["keyPaths"] = []int{1} },
+ // Invalid "keyData" field
+ func(v mSA) { v["keyData"] = 1 },
+ func(v mSA) { v["keyData"] = "this is invalid base64" },
+ // Invalid "signedIdentity" field
+ func(v mSA) { v["signedIdentity"] = "this is invalid" },
+ // "signedIdentity" an explicit nil
+ func(v mSA) { v["signedIdentity"] = nil },
+ },
+ duplicateFields: []string{"type", "keyType", "keyData", "signedIdentity"},
+ }
+ keyDataTests.run(t)
+ // Test the keyPath-specific aspects
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSignedBy{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/foo/bar", NewPRMMatchRepoDigestOrExact())
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ duplicateFields: []string{"type", "keyType", "keyPath", "signedIdentity"},
+ }.run(t)
+ // Test the keyPaths-specific aspects
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSignedBy{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyPaths(SBKeyTypeGPGKeys, []string{"/1", "/2"}, NewPRMMatchRepoDigestOrExact())
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ duplicateFields: []string{"type", "keyType", "keyPaths", "signedIdentity"},
+ }.run(t)
+
+ var pr prSignedBy
+
+ // Start with a valid JSON.
+ _, validJSON := keyDataTests.validObjectAndJSON(t)
+
+ // Various allowed modifications to the requirement
+ allowedModificationFns := []func(mSA){
+ // Delete the signedIdentity field
+ func(v mSA) { delete(v, "signedIdentity") },
+ }
+ for _, fn := range allowedModificationFns {
+ err := tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn)
+ require.NoError(t, err)
+ }
+
+ // Various ways to set signedIdentity to the default value
+ signedIdentityDefaultFns := []func(mSA){
+ // Set signedIdentity to the default explicitly
+ func(v mSA) { v["signedIdentity"] = NewPRMMatchRepoDigestOrExact() },
+ // Delete the signedIdentity field
+ func(v mSA) { delete(v, "signedIdentity") },
+ }
+ for _, fn := range signedIdentityDefaultFns {
+ err := tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn)
+ require.NoError(t, err)
+ assert.Equal(t, NewPRMMatchRepoDigestOrExact(), pr.SignedIdentity)
+ }
+}
+
+func TestSBKeyTypeIsValid(t *testing.T) {
+ // Valid values
+ for _, s := range []sbKeyType{
+ SBKeyTypeGPGKeys,
+ SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates,
+ SBKeyTypeSignedByX509CAs,
+ } {
+ assert.True(t, s.IsValid())
+ }
+
+ // Invalid values
+ for _, s := range []string{"", "this is invalid"} {
+ assert.False(t, sbKeyType(s).IsValid())
+ }
+}
+
+func TestSBKeyTypeUnmarshalJSON(t *testing.T) {
+ var kt sbKeyType
+
+ testInvalidJSONInput(t, &kt)
+
+ // Valid values.
+ for _, v := range []sbKeyType{
+ SBKeyTypeGPGKeys,
+ SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates,
+ SBKeyTypeSignedByX509CAs,
+ } {
+ kt = sbKeyType("")
+ err := json.Unmarshal([]byte(`"`+string(v)+`"`), &kt)
+ assert.NoError(t, err)
+ }
+
+ // Invalid values
+ kt = sbKeyType("")
+ err := json.Unmarshal([]byte(`""`), &kt)
+ assert.Error(t, err)
+
+ kt = sbKeyType("")
+ err = json.Unmarshal([]byte(`"this is invalid"`), &kt)
+ assert.Error(t, err)
+}
+
+// NewPRSignedBaseLayer is like NewPRSignedBaseLayer, except it must not fail.
+func xNewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) PolicyRequirement {
+ pr, err := NewPRSignedBaseLayer(baseLayerIdentity)
+ if err != nil {
+ panic("xNewPRSignedBaseLayer failed")
+ }
+ return pr
+}
+
+func TestNewPRSignedBaseLayer(t *testing.T) {
+ testBLI := NewPRMMatchExact()
+
+ // Success
+ _pr, err := NewPRSignedBaseLayer(testBLI)
+ require.NoError(t, err)
+ pr, ok := _pr.(*prSignedBaseLayer)
+ require.True(t, ok)
+ assert.Equal(t, &prSignedBaseLayer{
+ prCommon: prCommon{prTypeSignedBaseLayer},
+ BaseLayerIdentity: testBLI,
+ }, pr)
+
+ // Invalid baseLayerIdentity
+ _, err = NewPRSignedBaseLayer(nil)
+ assert.Error(t, err)
+}
+
+func TestPRSignedBaseLayerUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyRequirement]{
+ newDest: func() json.Unmarshaler { return &prSignedBaseLayer{} },
+ newValidObject: func() (PolicyRequirement, error) {
+ baseIdentity, err := NewPRMExactReference("registry.access.redhat.com/rhel7/rhel:7.2.3")
+ require.NoError(t, err)
+ return NewPRSignedBaseLayer(baseIdentity)
+ },
+ otherJSONParser: newPolicyRequirementFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // The "baseLayerIdentity" field is missing
+ func(v mSA) { delete(v, "baseLayerIdentity") },
+ // Invalid "baseLayerIdentity" field
+ func(v mSA) { v["baseLayerIdentity"] = "this is invalid" },
+ // Invalid "baseLayerIdentity" an explicit nil
+ func(v mSA) { v["baseLayerIdentity"] = nil },
+ },
+ duplicateFields: []string{"type", "baseLayerIdentity"},
+ }.run(t)
+}
+
+func TestNewPolicyReferenceMatchFromJSON(t *testing.T) {
+ // Sample success. Others tested in the individual PolicyReferenceMatch.UnmarshalJSON implementations.
+ validPRM := NewPRMMatchRepoDigestOrExact()
+ validJSON, err := json.Marshal(validPRM)
+ require.NoError(t, err)
+ prm, err := newPolicyReferenceMatchFromJSON(validJSON)
+ require.NoError(t, err)
+ assert.Equal(t, validPRM, prm)
+
+ // Invalid
+ for _, invalid := range []any{
+ // Not an object
+ 1,
+ // Missing type
+ prmCommon{},
+ // Invalid type
+ prmCommon{Type: "this is invalid"},
+ // Valid type but invalid contents
+ prmExactReference{
+ prmCommon: prmCommon{Type: prmTypeExactReference},
+ DockerReference: "",
+ },
+ } {
+ testJSON, err := json.Marshal(invalid)
+ require.NoError(t, err)
+
+ _, err = newPolicyReferenceMatchFromJSON(testJSON)
+ assert.Error(t, err, string(testJSON))
+ }
+}
+
+func TestNewPRMMatchExact(t *testing.T) {
+ _prm := NewPRMMatchExact()
+ prm, ok := _prm.(*prmMatchExact)
+ require.True(t, ok)
+ assert.Equal(t, &prmMatchExact{prmCommon{prmTypeMatchExact}}, prm)
+}
+
+func TestPRMMatchExactUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmMatchExact{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMMatchExact(), nil
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ invalidObjects: []mSA{
+ // Missing "type" field
+ {},
+ // Wrong "type" field
+ {"type": 1},
+ {"type": "this is invalid"},
+ // Extra fields
+ {
+ "type": string(prmTypeMatchExact),
+ "unknown": "foo",
+ },
+ },
+ duplicateFields: []string{"type"},
+ }.run(t)
+}
+
+func TestNewPRMMatchRepoDigestOrExact(t *testing.T) {
+ _prm := NewPRMMatchRepoDigestOrExact()
+ prm, ok := _prm.(*prmMatchRepoDigestOrExact)
+ require.True(t, ok)
+ assert.Equal(t, &prmMatchRepoDigestOrExact{prmCommon{prmTypeMatchRepoDigestOrExact}}, prm)
+}
+
+func TestPRMMatchRepoDigestOrExactUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmMatchRepoDigestOrExact{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMMatchRepoDigestOrExact(), nil
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ invalidObjects: []mSA{
+ // Missing "type" field
+ {},
+ // Wrong "type" field
+ {"type": 1},
+ {"type": "this is invalid"},
+ // Extra fields
+ {
+ "type": string(prmTypeMatchRepoDigestOrExact),
+ "unknown": "foo",
+ },
+ },
+ duplicateFields: []string{"type"},
+ }.run(t)
+}
+
+func TestNewPRMMatchRepository(t *testing.T) {
+ _prm := NewPRMMatchRepository()
+ prm, ok := _prm.(*prmMatchRepository)
+ require.True(t, ok)
+ assert.Equal(t, &prmMatchRepository{prmCommon{prmTypeMatchRepository}}, prm)
+}
+
+func TestPRMMatchRepositoryUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmMatchRepository{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMMatchRepository(), nil
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ invalidObjects: []mSA{
+ // Missing "type" field
+ {},
+ // Wrong "type" field
+ {"type": 1},
+ {"type": "this is invalid"},
+ // Extra fields
+ {
+ "type": string(prmTypeMatchRepository),
+ "unknown": "foo",
+ },
+ },
+ duplicateFields: []string{"type"},
+ }.run(t)
+}
+
+// xNewPRMExactReference is like NewPRMExactReference, except it must not fail.
+func xNewPRMExactReference(dockerReference string) PolicyReferenceMatch {
+ pr, err := NewPRMExactReference(dockerReference)
+ if err != nil {
+ panic("xNewPRMExactReference failed")
+ }
+ return pr
+}
+
+func TestNewPRMExactReference(t *testing.T) {
+ const testDR = "library/busybox:latest"
+
+ // Success
+ _prm, err := NewPRMExactReference(testDR)
+ require.NoError(t, err)
+ prm, ok := _prm.(*prmExactReference)
+ require.True(t, ok)
+ assert.Equal(t, &prmExactReference{
+ prmCommon: prmCommon{prmTypeExactReference},
+ DockerReference: testDR,
+ }, prm)
+
+ // Invalid dockerReference
+ _, err = NewPRMExactReference("")
+ assert.Error(t, err)
+ // Uppercase is invalid in Docker reference components.
+ _, err = NewPRMExactReference("INVALIDUPPERCASE:latest")
+ assert.Error(t, err)
+ // Missing tag
+ _, err = NewPRMExactReference("library/busybox")
+ assert.Error(t, err)
+}
+
+func TestPRMExactReferenceUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmExactReference{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMExactReference("library/busybox:latest")
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // The "dockerReference" field is missing
+ func(v mSA) { delete(v, "dockerReference") },
+ // Invalid "dockerReference" field
+ func(v mSA) { v["dockerReference"] = 1 },
+ },
+ duplicateFields: []string{"type", "dockerReference"},
+ }.run(t)
+}
+
+// xNewPRMExactRepository is like NewPRMExactRepository, except it must not fail.
+func xNewPRMExactRepository(dockerRepository string) PolicyReferenceMatch {
+ pr, err := NewPRMExactRepository(dockerRepository)
+ if err != nil {
+ panic("xNewPRMExactRepository failed")
+ }
+ return pr
+}
+
+func TestNewPRMExactRepository(t *testing.T) {
+ const testDR = "library/busybox:latest"
+
+ // Success
+ _prm, err := NewPRMExactRepository(testDR)
+ require.NoError(t, err)
+ prm, ok := _prm.(*prmExactRepository)
+ require.True(t, ok)
+ assert.Equal(t, &prmExactRepository{
+ prmCommon: prmCommon{prmTypeExactRepository},
+ DockerRepository: testDR,
+ }, prm)
+
+ // Invalid dockerRepository
+ _, err = NewPRMExactRepository("")
+ assert.Error(t, err)
+ // Uppercase is invalid in Docker reference components.
+ _, err = NewPRMExactRepository("INVALIDUPPERCASE")
+ assert.Error(t, err)
+}
+
+func TestPRMExactRepositoryUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmExactRepository{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMExactRepository("library/busybox:latest")
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // The "dockerRepository" field is missing
+ func(v mSA) { delete(v, "dockerRepository") },
+ // Invalid "dockerRepository" field
+ func(v mSA) { v["dockerRepository"] = 1 },
+ },
+ duplicateFields: []string{"type", "dockerRepository"},
+ }.run(t)
+}
+
+func TestValidateIdentityRemappingPrefix(t *testing.T) {
+ for _, s := range []string{
+ "localhost",
+ "example.com",
+ "example.com:80",
+ "example.com/repo",
+ "example.com/ns1/ns2/ns3/repo.with.dots-dashes_underscores",
+ "example.com:80/ns1/ns2/ns3/repo.with.dots-dashes_underscores",
+ // NOTE: These values are invalid, do not actually work, and may be rejected by this function
+ // and in NewPRMRemapIdentity in the future.
+ "shortname",
+ "ns/shortname",
+ } {
+ err := validateIdentityRemappingPrefix(s)
+ assert.NoError(t, err, s)
+ }
+
+ for _, s := range []string{
+ "",
+ "repo_with_underscores", // Not a valid DNS name, at least per docker/reference
+ "example.com/",
+ "example.com/UPPERCASEISINVALID",
+ "example.com/repo/",
+ "example.com/repo:tag",
+ "example.com/repo@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
+ "example.com/repo:tag@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
+ } {
+ err := validateIdentityRemappingPrefix(s)
+ assert.Error(t, err, s)
+ }
+}
+
+// xNewPRMRemapIdentity is like NewPRMRemapIdentity, except it must not fail.
+func xNewPRMRemapIdentity(prefix, signedPrefix string) PolicyReferenceMatch {
+ pr, err := NewPRMRemapIdentity(prefix, signedPrefix)
+ if err != nil {
+ panic("xNewPRMRemapIdentity failed")
+ }
+ return pr
+}
+
+func TestNewPRMRemapIdentity(t *testing.T) {
+ const testPrefix = "example.com/docker-library"
+ const testSignedPrefix = "docker.io/library"
+
+ // Success
+ _prm, err := NewPRMRemapIdentity(testPrefix, testSignedPrefix)
+ require.NoError(t, err)
+ prm, ok := _prm.(*prmRemapIdentity)
+ require.True(t, ok)
+ assert.Equal(t, &prmRemapIdentity{
+ prmCommon: prmCommon{prmTypeRemapIdentity},
+ Prefix: testPrefix,
+ SignedPrefix: testSignedPrefix,
+ }, prm)
+
+ // Invalid prefix
+ _, err = NewPRMRemapIdentity("", testSignedPrefix)
+ assert.Error(t, err)
+ _, err = NewPRMRemapIdentity("example.com/UPPERCASEISINVALID", testSignedPrefix)
+ assert.Error(t, err)
+ // Invalid signedPrefix
+ _, err = NewPRMRemapIdentity(testPrefix, "")
+ assert.Error(t, err)
+ _, err = NewPRMRemapIdentity(testPrefix, "example.com/UPPERCASEISINVALID")
+ assert.Error(t, err)
+}
+
+func TestPRMRemapIdentityUnmarshalJSON(t *testing.T) {
+ policyJSONUmarshallerTests[PolicyReferenceMatch]{
+ newDest: func() json.Unmarshaler { return &prmRemapIdentity{} },
+ newValidObject: func() (PolicyReferenceMatch, error) {
+ return NewPRMRemapIdentity("example.com/docker-library", "docker.io/library")
+ },
+ otherJSONParser: newPolicyReferenceMatchFromJSON,
+ breakFns: []func(mSA){
+ // The "type" field is missing
+ func(v mSA) { delete(v, "type") },
+ // Wrong "type" field
+ func(v mSA) { v["type"] = 1 },
+ func(v mSA) { v["type"] = "this is invalid" },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // The "prefix" field is missing
+ func(v mSA) { delete(v, "prefix") },
+ // Invalid "prefix" field
+ func(v mSA) { v["prefix"] = 1 },
+ func(v mSA) { v["prefix"] = "this is invalid" },
+ // The "signedPrefix" field is missing
+ func(v mSA) { delete(v, "signedPrefix") },
+ // Invalid "signedPrefix" field
+ func(v mSA) { v["signedPrefix"] = 1 },
+ func(v mSA) { v["signedPrefix"] = "this is invalid" },
+ },
+ duplicateFields: []string{"type", "prefix", "signedPrefix"},
+ }.run(t)
+}
diff --git a/signature/policy_eval.go b/signature/policy_eval.go
new file mode 100644
index 0000000..4f8d0da
--- /dev/null
+++ b/signature/policy_eval.go
@@ -0,0 +1,293 @@
+// This defines the top-level policy evaluation API.
+// To the extent possible, the interface of the functions provided
+// here is intended to be completely unambiguous, and stable for users
+// to rely on.
+
+package signature
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/unparsedimage"
+ "github.com/containers/image/v5/types"
+ "github.com/sirupsen/logrus"
+)
+
+// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
+type PolicyRequirementError string
+
+func (err PolicyRequirementError) Error() string {
+ return string(err)
+}
+
+// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
+type signatureAcceptanceResult string
+
+const (
+ sarAccepted signatureAcceptanceResult = "sarAccepted"
+ sarRejected signatureAcceptanceResult = "sarRejected"
+ sarUnknown signatureAcceptanceResult = "sarUnknown"
+)
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+type PolicyRequirement interface {
+ // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
+ // costly initialization like creating temporary GPG home directories and reading files.
+ // Setup() (someState, error)
+ // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
+
+ // isSignatureAuthorAccepted, given an image and a signature blob, returns:
+ // - sarAccepted if the signature has been verified against the appropriate public key
+ // (where "appropriate public key" may depend on the contents of the signature);
+ // in that case a parsed Signature should be returned.
+ // - sarRejected if the signature has not been verified;
+ // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // - sarUnknown if this PolicyRequirement does not deal with signatures.
+ // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
+ // Returning sarUnknown and a non-nil error value is invalid.
+ // WARNING: This makes the signature contents acceptable for further processing,
+ // but it does not necessarily mean that the contents of the signature are
+ // consistent with local policy.
+ // For example:
+ // - Do not use a true value to determine whether to run
+ // a container based on this image; use IsRunningImageAllowed instead.
+ // - Just because a signature is accepted does not automatically mean the contents of the
+ // signature are authorized to run code as root, or to affect system or cluster configuration.
+ isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+
+ // isRunningImageAllowed returns true if the requirement allows running an image.
+ // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // WARNING: This validates signatures and the manifest, but does not download or validate the
+ // layers. Users must validate that the layers match their expected digests.
+ isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error)
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+type PolicyReferenceMatch interface {
+ // matchesDockerReference decides whether a specific image identity is accepted for an image
+ // (or, usually, for the image's Reference().DockerReference()). Note that
+ // image.Reference().DockerReference() may be nil.
+ matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool
+}
+
+// PolicyContext encapsulates a policy and possible cached state
+// for speeding up its evaluation.
+type PolicyContext struct {
+ Policy *Policy
+ state policyContextState // Internal consistency checking
+}
+
+// policyContextState is used internally to verify the users are not misusing a PolicyContext.
+type policyContextState string
+
+const (
+ pcInitializing policyContextState = "Initializing"
+ pcReady policyContextState = "Ready"
+ pcInUse policyContextState = "InUse"
+ pcDestroying policyContextState = "Destroying"
+ pcDestroyed policyContextState = "Destroyed"
+)
+
+// changeContextState changes pc.state, or fails if the state is unexpected
+func (pc *PolicyContext) changeState(expected, new policyContextState) error {
+ if pc.state != expected {
+ return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ }
+ pc.state = new
+ return nil
+}
+
+// NewPolicyContext sets up and initializes a context for the specified policy.
+// The policy must not be modified while the context exists. FIXME: make a deep copy?
+// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
+func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
+ pc := &PolicyContext{Policy: policy, state: pcInitializing}
+ // FIXME: initialize
+ if err := pc.changeState(pcInitializing, pcReady); err != nil {
+ // Huh?! This should never fail, we didn't give the pointer to anybody.
+ // Just give up and leave unclean state around.
+ return nil, err
+ }
+ return pc, nil
+}
+
+// Destroy should be called when the user of the context is done with it.
+func (pc *PolicyContext) Destroy() error {
+ if err := pc.changeState(pcReady, pcDestroying); err != nil {
+ return err
+ }
+ // FIXME: destroy
+ return pc.changeState(pcDestroying, pcDestroyed)
+}
+
+// policyIdentityLogName returns a string description of the image identity for policy purposes.
+// ONLY use this for log messages, not for any decisions!
+func policyIdentityLogName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
+}
+
+// requirementsForImageRef selects the appropriate requirements for ref.
+func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
+ // Do we have a PolicyTransportScopes for this transport?
+ transportName := ref.Transport().Name()
+ if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if req, ok := transportScopes[identity]; ok {
+ logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
+ return req
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if req, ok := transportScopes[name]; ok {
+ logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
+ return req
+ }
+ }
+
+ // Look for a default match for the transport.
+ if req, ok := transportScopes[""]; ok {
+ logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
+ return req
+ }
+ }
+
+ logrus.Debugf(" Using default policy section")
+ return pc.Policy.Default
+}
+
+// GetSignaturesWithAcceptedAuthor returns those signatures from an image
+// for which the policy accepts the author (and which have been successfully
+// verified).
+// NOTE: This may legitimately return an empty list and no error, if the image
+// has no signatures or only invalid signatures.
+// WARNING: This makes the signature contents acceptable for further processing,
+// but it does not necessarily mean that the contents of the signature are
+// consistent with local policy.
+// For example:
+// - Do not use a an existence of an accepted signature to determine whether to run
+// a container based on this image; use IsRunningImageAllowed instead.
+// - Just because a signature is accepted does not automatically mean the contents of the
+// signature are authorized to run code as root, or to affect system or cluster configuration.
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ sigs = nil
+ finalErr = err
+ }
+ }()
+
+ image := unparsedimage.FromPublic(publicImage)
+
+ logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!)
+ unverifiedSignatures, err := image.Signatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]*Signature, 0, len(unverifiedSignatures))
+ for sigNumber, sig := range unverifiedSignatures {
+ var acceptedSig *Signature // non-nil if accepted
+ rejected := false
+ // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
+ logrus.Debugf("Evaluating signature %d:", sigNumber)
+ interpretingReqs:
+ for reqNumber, req := range reqs {
+ // FIXME: Log the requirement itself? For now, we use just the number.
+ // FIXME: supply state
+ switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res {
+ case sarAccepted:
+ if as == nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
+ if acceptedSig == nil {
+ acceptedSig = as
+ } else if *as != *acceptedSig { // Coverage: this should never happen
+ // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
+ rejected = true
+ acceptedSig = nil
+ break interpretingReqs
+ }
+ case sarRejected:
+ logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ case sarUnknown:
+ if err != nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
+ default: // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
+ rejected = true
+ break interpretingReqs
+ }
+ }
+ // This also handles the (invalid) case of empty reqs, by rejecting the signature.
+ if acceptedSig != nil && !rejected {
+ logrus.Debugf(" Overall: OK, signature accepted")
+ res = append(res, acceptedSig)
+ } else {
+ logrus.Debugf(" Overall: Signature not accepted")
+ }
+ }
+ return res, nil
+}
+
+// IsRunningImageAllowed returns true iff the policy allows running the image.
+// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+// succeeded but the result was rejection.
+// WARNING: This validates signatures and the manifest, but does not download or validate the
+// layers. Users must validate that the layers match their expected digests.
+func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return false, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ res = false
+ finalErr = err
+ }
+ }()
+
+ image := unparsedimage.FromPublic(publicImage)
+
+ logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ if len(reqs) == 0 {
+ return false, PolicyRequirementError("List of verification policy requirements must not be empty")
+ }
+
+ for reqNumber, req := range reqs {
+ // FIXME: supply state
+ allowed, err := req.isRunningImageAllowed(ctx, image)
+ if !allowed {
+ logrus.Debugf("Requirement %d: denied, done", reqNumber)
+ return false, err
+ }
+ logrus.Debugf(" Requirement %d: allowed", reqNumber)
+ }
+ // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
+ logrus.Debugf("Overall: allowed")
+ return true, nil
+}
diff --git a/signature/policy_eval_baselayer.go b/signature/policy_eval_baselayer.go
new file mode 100644
index 0000000..a8bc013
--- /dev/null
+++ b/signature/policy_eval_baselayer.go
@@ -0,0 +1,20 @@
+// Policy evaluation for prSignedBaseLayer.
+
+package signature
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/sirupsen/logrus"
+)
+
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarUnknown, nil, nil
+}
+
+func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ // FIXME? Reject this at policy parsing time already?
+ logrus.Errorf("signedBaseLayer not implemented yet!")
+ return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
+}
diff --git a/signature/policy_eval_baselayer_test.go b/signature/policy_eval_baselayer_test.go
new file mode 100644
index 0000000..8898cf8
--- /dev/null
+++ b/signature/policy_eval_baselayer_test.go
@@ -0,0 +1,25 @@
+package signature
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestPRSignedBaseLayerIsSignatureAuthorAccepted(t *testing.T) {
+ pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
+ require.NoError(t, err)
+ // Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil)
+ assertSARUnknown(t, sar, parsedSig, err)
+}
+
+func TestPRSignedBaseLayerIsRunningImageAllowed(t *testing.T) {
+ // This will obviously need to change after signedBaseLayer is implemented.
+ pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ res, err := pr.isRunningImageAllowed(context.Background(), nil)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+}
diff --git a/signature/policy_eval_signedby.go b/signature/policy_eval_signedby.go
new file mode 100644
index 0000000..a418773
--- /dev/null
+++ b/signature/policy_eval_signedby.go
@@ -0,0 +1,145 @@
+// Policy evaluation for prSignedBy.
+
+package signature
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/manifest"
+ digest "github.com/opencontainers/go-digest"
+ "golang.org/x/exp/slices"
+)
+
+func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ switch pr.KeyType {
+ case SBKeyTypeGPGKeys:
+ case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ // FIXME? Reject this at policy parsing time already?
+ return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ default:
+ // This should never happen, newPRSignedBy ensures KeyType.IsValid()
+ return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
+ }
+
+ // FIXME: move this to per-context initialization
+ var data [][]byte
+ keySources := 0
+ if pr.KeyPath != "" {
+ keySources++
+ d, err := os.ReadFile(pr.KeyPath)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = [][]byte{d}
+ }
+ if pr.KeyPaths != nil {
+ keySources++
+ data = [][]byte{}
+ for _, path := range pr.KeyPaths {
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = append(data, d)
+ }
+ }
+ if pr.KeyData != nil {
+ keySources++
+ data = [][]byte{pr.KeyData}
+ }
+ if keySources != 1 {
+ return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
+ }
+
+ // FIXME: move this to per-context initialization
+ mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ defer mech.Close()
+ if len(trustedIdentities) == 0 {
+ return sarRejected, nil, PolicyRequirementError("No public keys imported")
+ }
+
+ signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ if slices.Contains(trustedIdentities, keyIdentity) {
+ return nil
+ }
+ // Coverage: We use a private GPG home directory and only import trusted keys, so this should
+ // not be reachable.
+ return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
+ },
+ validateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, nil, err
+ }
+
+ return sarAccepted, signature, nil
+}
+
+func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ // FIXME: Use image.UntrustedSignatures, use that to improve error messages
+ // (needs tests!)
+ sigs, err := image.Signatures(ctx)
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ for _, s := range sigs {
+ var reason error
+ switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/signature/policy_eval_signedby_test.go b/signature/policy_eval_signedby_test.go
new file mode 100644
index 0000000..0e9bb83
--- /dev/null
+++ b/signature/policy_eval_signedby_test.go
@@ -0,0 +1,280 @@
+package signature
+
+import (
+ "context"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// dirImageMock returns a private.UnparsedImage for a directory, claiming a specified dockerReference.
+func dirImageMock(t *testing.T, dir, dockerReference string) private.UnparsedImage {
+ ref, err := reference.ParseNormalizedNamed(dockerReference)
+ require.NoError(t, err)
+ return dirImageMockWithRef(t, dir, refImageReferenceMock{ref: ref})
+}
+
+// dirImageMockWithRef returns a private.UnparsedImage for a directory, claiming a specified ref.
+func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) private.UnparsedImage {
+ srcRef, err := directory.NewReference(dir)
+ require.NoError(t, err)
+ src, err := srcRef.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ err := src.Close()
+ require.NoError(t, err)
+ })
+ return image.UnparsedInstance(&dirImageSourceMock{
+ ImageSource: imagesource.FromPublic(src),
+ ref: ref,
+ }, nil)
+}
+
+// dirImageSourceMock inherits dirImageSource, but overrides its Reference method.
+type dirImageSourceMock struct {
+ private.ImageSource
+ ref types.ImageReference
+}
+
+func (d *dirImageSourceMock) Reference() types.ImageReference {
+ return d.ref
+}
+
+func TestPRSignedByIsSignatureAuthorAccepted(t *testing.T) {
+ ktGPG := SBKeyTypeGPGKeys
+ prm := NewPRMMatchExact()
+ testImage := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ testImageSig, err := os.ReadFile("fixtures/dir-img-valid/signature-1")
+ require.NoError(t, err)
+ keyData, err := os.ReadFile("fixtures/public-key.gpg")
+ require.NoError(t, err)
+
+ // Successful validation, with KeyPath, KeyPaths and KeyData.
+ for _, fn := range []func() (PolicyRequirement, error){
+ func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ },
+ // Test the files in both orders, to make sure the correct public keys accepted in either position.
+ func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyPaths(ktGPG, []string{"fixtures/public-key-1.gpg", "fixtures/public-key-1.gpg"}, prm)
+ },
+ func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyPaths(ktGPG, []string{"fixtures/public-key-2.gpg", "fixtures/public-key-1.gpg"}, prm)
+ },
+ func() (PolicyRequirement, error) {
+ return NewPRSignedByKeyData(ktGPG, keyData, prm)
+ },
+ } {
+ pr, err := fn()
+ require.NoError(t, err)
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSig)
+ assertSARAccepted(t, sar, parsedSig, err, Signature{
+ DockerManifestDigest: TestImageManifestDigest,
+ DockerReference: "testing/manifest:latest",
+ })
+ }
+
+ // Unimplemented and invalid KeyType values
+ for _, keyType := range []sbKeyType{SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates,
+ SBKeyTypeSignedByX509CAs,
+ sbKeyType("This is invalid"),
+ } {
+ // Do not use NewPRSignedByKeyData, because it would reject invalid values.
+ pr := &prSignedBy{
+ KeyType: keyType,
+ KeyData: keyData,
+ SignedIdentity: prm,
+ }
+ // Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil)
+ assertSARRejected(t, sar, parsedSig, err)
+ }
+
+ // Invalid KeyPath/KeyPaths/KeyData combinations.
+ for _, fn := range []func() (PolicyRequirement, error){
+ // Two or more of KeyPath, KeyPaths and KeyData set. Do not use NewPRSignedBy*, because it would reject this.
+ func() (PolicyRequirement, error) {
+ return &prSignedBy{KeyType: ktGPG, KeyPath: "fixtures/public-key.gpg", KeyPaths: []string{"fixtures/public-key-1.gpg", "fixtures/public-key-2.gpg"}, KeyData: keyData, SignedIdentity: prm}, nil
+ },
+ func() (PolicyRequirement, error) {
+ return &prSignedBy{KeyType: ktGPG, KeyPath: "fixtures/public-key.gpg", KeyPaths: []string{"fixtures/public-key-1.gpg", "fixtures/public-key-2.gpg"}, SignedIdentity: prm}, nil
+ },
+ func() (PolicyRequirement, error) {
+ return &prSignedBy{KeyType: ktGPG, KeyPath: "fixtures/public-key.gpg", KeyData: keyData, SignedIdentity: prm}, nil
+ },
+ func() (PolicyRequirement, error) {
+ return &prSignedBy{KeyType: ktGPG, KeyPaths: []string{"fixtures/public-key-1.gpg", "fixtures/public-key-2.gpg"}, KeyData: keyData, SignedIdentity: prm}, nil
+ },
+ // None of KeyPath, KeyPaths and KeyData set. Do not use NewPRSignedBy*, because it would reject this.
+ func() (PolicyRequirement, error) {
+ return &prSignedBy{KeyType: ktGPG, SignedIdentity: prm}, nil
+ },
+ func() (PolicyRequirement, error) { // Invalid KeyPath
+ return NewPRSignedByKeyPath(ktGPG, "/this/does/not/exist", prm)
+ },
+ func() (PolicyRequirement, error) { // Invalid KeyPaths
+ return NewPRSignedByKeyPaths(ktGPG, []string{"/this/does/not/exist"}, prm)
+ },
+ func() (PolicyRequirement, error) { // One of the KeyPaths is invalid
+ return NewPRSignedByKeyPaths(ktGPG, []string{"fixtures/public-key.gpg", "/this/does/not/exist"}, prm)
+ },
+ } {
+ pr, err := fn()
+ require.NoError(t, err)
+ // Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil)
+ assertSARRejected(t, sar, parsedSig, err)
+ }
+
+ // Errors initializing the temporary GPG directory and mechanism are not obviously easy to reach.
+
+ // KeyData has no public keys.
+ pr, err := NewPRSignedByKeyData(ktGPG, []byte{}, prm)
+ require.NoError(t, err)
+ // Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil)
+ assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
+
+ // A signature which does not GPG verify
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image parameter.
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, []byte("invalid signature"))
+ assertSARRejected(t, sar, parsedSig, err)
+
+ // A valid signature using an unknown key.
+ // (This is (currently?) rejected through the "mech.Verify fails" path, not the "!identityFound" path,
+ // because we use a temporary directory and only import the trusted keys.)
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ sig, err := os.ReadFile("fixtures/unknown-key.signature")
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image parameter..
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, sig)
+ assertSARRejected(t, sar, parsedSig, err)
+
+ // A valid signature of an invalid JSON.
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ sig, err = os.ReadFile("fixtures/invalid-blob.signature")
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image parameter..
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, sig)
+ assertSARRejected(t, sar, parsedSig, err)
+ assert.IsType(t, InvalidSignatureError{}, err)
+
+ // A valid signature with a rejected identity.
+ nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match")
+ require.NoError(t, err)
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", nonmatchingPRM)
+ require.NoError(t, err)
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSig)
+ assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
+
+ // Error reading image manifest
+ image := dirImageMock(t, "fixtures/dir-img-no-manifest", "testing/manifest:latest")
+ sig, err = os.ReadFile("fixtures/dir-img-no-manifest/signature-1")
+ require.NoError(t, err)
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig)
+ assertSARRejected(t, sar, parsedSig, err)
+
+ // Error computing manifest digest
+ image = dirImageMock(t, "fixtures/dir-img-manifest-digest-error", "testing/manifest:latest")
+ sig, err = os.ReadFile("fixtures/dir-img-manifest-digest-error/signature-1")
+ require.NoError(t, err)
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig)
+ assertSARRejected(t, sar, parsedSig, err)
+
+ // A valid signature with a non-matching manifest
+ image = dirImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
+ sig, err = os.ReadFile("fixtures/dir-img-modified-manifest/signature-1")
+ require.NoError(t, err)
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig)
+ assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
+}
+
+// createInvalidSigDir creates a directory suitable for dirImageMock, in which image.Signatures()
+// fails.
+func createInvalidSigDir(t *testing.T) string {
+ dir := t.TempDir()
+ err := os.WriteFile(path.Join(dir, "manifest.json"), []byte("{}"), 0644)
+ require.NoError(t, err)
+ // Creating a 000-permissions file would work for unprivileged accounts, but root (in particular,
+ // in the Docker container we use for testing) would still have access. So, create a symlink
+ // pointing to itself, to cause an ELOOP. (Note that a symlink pointing to a nonexistent file would be treated
+ // just like a nonexistent signature file, and not an error.)
+ err = os.Symlink("signature-1", path.Join(dir, "signature-1"))
+ require.NoError(t, err)
+ return dir
+}
+
+func TestPRSignedByIsRunningImageAllowed(t *testing.T) {
+ ktGPG := SBKeyTypeGPGKeys
+ prm := NewPRMMatchExact()
+
+ // A simple success case: single valid signature.
+ image := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err := pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // Error reading signatures
+ invalidSigDir := createInvalidSigDir(t)
+ image = dirImageMock(t, invalidSigDir, "testing/manifest:latest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejected(t, allowed, err)
+
+ // No signatures
+ image = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+
+ // 1 invalid signature: use dir-img-valid, but a non-matching Docker reference
+ image = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:notlatest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+
+ // 2 valid signatures
+ image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // One invalid, one valid signature (in this order)
+ image = dirImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // 2 invalid signatures: use dir-img-valid-2, but a non-matching Docker reference
+ image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:notlatest")
+ pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+}
diff --git a/signature/policy_eval_sigstore.go b/signature/policy_eval_sigstore.go
new file mode 100644
index 0000000..dcf5592
--- /dev/null
+++ b/signature/policy_eval_sigstore.go
@@ -0,0 +1,281 @@
+// Policy evaluation for prSigstoreSigned.
+
+package signature
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set,
+// and returns the referenced data, or nil if neither is set.
+func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) {
+ switch {
+ case data != nil && path != "":
+ return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix)
+ case path != "":
+ d, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ case data != nil:
+ return data, nil
+ default: // Nothing
+ return nil, nil
+ }
+}
+
+// prepareTrustRoot creates a fulcioTrustRoot from the input data.
+// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
+func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
+ caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath)
+ if err != nil {
+ return nil, err
+ }
+ if caCertBytes == nil {
+ return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`)
+ }
+ certs := x509.NewCertPool()
+ if ok := certs.AppendCertsFromPEM(caCertBytes); !ok {
+ return nil, errors.New("error loading Fulcio CA certificates")
+ }
+ fulcio := fulcioTrustRoot{
+ caCertificates: certs,
+ oidcIssuer: f.OIDCIssuer,
+ subjectEmail: f.SubjectEmail,
+ }
+ if err := fulcio.validate(); err != nil {
+ return nil, err
+ }
+ return &fulcio, nil
+}
+
+// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
+type sigstoreSignedTrustRoot struct {
+ publicKey crypto.PublicKey
+ fulcio *fulcioTrustRoot
+ rekorPublicKey *ecdsa.PublicKey
+}
+
+func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
+ res := sigstoreSignedTrustRoot{}
+
+ publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath)
+ if err != nil {
+ return nil, err
+ }
+ if publicKeyPEM != nil {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
+ if err != nil {
+ return nil, fmt.Errorf("parsing public key: %w", err)
+ }
+ res.publicKey = pk
+ }
+
+ if pr.Fulcio != nil {
+ f, err := pr.Fulcio.prepareTrustRoot()
+ if err != nil {
+ return nil, err
+ }
+ res.fulcio = f
+ }
+
+ rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath)
+ if err != nil {
+ return nil, err
+ }
+ if rekorPublicKeyPEM != nil {
+ pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM)
+ if err != nil {
+ return nil, fmt.Errorf("parsing Rekor public key: %w", err)
+ }
+ pkECDSA, ok := pk.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, fmt.Errorf("Rekor public key is not using ECDSA")
+
+ }
+ res.rekorPublicKey = pkECDSA
+ }
+
+ return &res, nil
+}
+
+func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // We don’t know of a single user of this API, and we might return unexpected values in Signature.
+ // For now, just punt.
+ return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore")
+}
+
+func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) {
+ // FIXME: move this to per-context initialization
+ trustRoot, err := pr.prepareTrustRoot()
+ if err != nil {
+ return sarRejected, err
+ }
+
+ untrustedAnnotations := sig.UntrustedAnnotations()
+ untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey]
+ if !ok {
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey)
+ }
+ untrustedPayload := sig.UntrustedPayload()
+
+ var publicKey crypto.PublicKey
+ switch {
+ case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
+ case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
+
+ case trustRoot.publicKey != nil:
+ if trustRoot.rekorPublicKey != nil {
+ untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
+ }
+ // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
+ // FIXME: We could just generate DER instead of the full PEM text
+ recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey)
+ if err != nil {
+ // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
+ // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
+ return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
+
+ }
+ // We don’t care about the Rekor timestamp, just about log presence.
+ if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil {
+ return sarRejected, err
+ }
+ }
+ publicKey = trustRoot.publicKey
+
+ case trustRoot.fulcio != nil:
+ if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations.
+ return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
+ }
+ untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
+ }
+ untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey]
+ if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
+ return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey)
+ }
+ var untrustedIntermediateChainBytes []byte
+ if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
+ untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
+ }
+ pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio,
+ []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
+ if err != nil {
+ return sarRejected, err
+ }
+ publicKey = pk
+ }
+
+ if publicKey == nil {
+ // Coverage: This should never happen, we have already excluded the possibility in the switch above.
+ return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
+ }
+ signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest(ctx)
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, err
+ }
+ if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values
+ return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen.
+ }
+
+ return sarAccepted, nil
+}
+
+func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ sigs, err := image.UntrustedSignatures(ctx)
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ foundNonSigstoreSignatures := 0
+ foundSigstoreNonAttachments := 0
+ for _, s := range sigs {
+ sigstoreSig, ok := s.(signature.Sigstore)
+ if !ok {
+ foundNonSigstoreSignatures++
+ continue
+ }
+ if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType {
+ foundSigstoreNonAttachments++
+ continue
+ }
+
+ var reason error
+ switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 {
+ // A nice message for the most common case.
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ } else {
+ summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)",
+ foundNonSigstoreSignatures, foundSigstoreNonAttachments))
+ }
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/signature/policy_eval_sigstore_test.go b/signature/policy_eval_sigstore_test.go
new file mode 100644
index 0000000..f4dd113
--- /dev/null
+++ b/signature/policy_eval_sigstore_test.go
@@ -0,0 +1,681 @@
+// Policy evaluation for prCosignSigned.
+
+package signature
+
+import (
+ "context"
+ "encoding/base64"
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPRSigstoreSignedFulcioPrepareTrustRoot(t *testing.T) {
+ const testCAPath = "fixtures/fulcio_v1.crt.pem"
+ testCAData, err := os.ReadFile(testCAPath)
+ require.NoError(t, err)
+ const testOIDCIssuer = "https://example.com"
+ testSubjectEmail := "test@example.com"
+
+ // Success
+ for _, c := range [][]PRSigstoreSignedFulcioOption{
+ {
+ PRSigstoreSignedFulcioWithCAPath(testCAPath),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ {
+ PRSigstoreSignedFulcioWithCAData(testCAData),
+ PRSigstoreSignedFulcioWithOIDCIssuer(testOIDCIssuer),
+ PRSigstoreSignedFulcioWithSubjectEmail(testSubjectEmail),
+ },
+ } {
+ f, err := newPRSigstoreSignedFulcio(c...)
+ require.NoError(t, err)
+ res, err := f.prepareTrustRoot()
+ require.NoError(t, err)
+ assert.NotNil(t, res.caCertificates) // Doing a better test seems hard; we would need to compare .Subjects with a DER encoding.
+ assert.Equal(t, testOIDCIssuer, res.oidcIssuer)
+ assert.Equal(t, testSubjectEmail, res.subjectEmail)
+ }
+
+ // Failure
+ for _, f := range []prSigstoreSignedFulcio{ // Use a prSigstoreSignedFulcio because these configurations should be rejected by NewPRSigstoreSignedFulcio.
+ { // Neither CAPath nor CAData specified
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Both CAPath and CAData specified
+ CAPath: testCAPath,
+ CAData: testCAData,
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Invalid CAPath
+ CAPath: "fixtures/image.signature",
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Unusable CAPath
+ CAPath: "fixtures/this/does/not/exist",
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Invalid CAData
+ CAData: []byte("invalid"),
+ OIDCIssuer: testOIDCIssuer,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Missing OIDCIssuer
+ CAPath: testCAPath,
+ SubjectEmail: testSubjectEmail,
+ },
+ { // Missing SubjectEmail
+ CAPath: testCAPath,
+ OIDCIssuer: testOIDCIssuer,
+ },
+ } {
+ _, err := f.prepareTrustRoot()
+ assert.Error(t, err)
+ }
+}
+
+func TestPRSigstoreSignedPrepareTrustRoot(t *testing.T) {
+ const testKeyPath = "fixtures/cosign.pub"
+ testKeyData, err := os.ReadFile(testKeyPath)
+ require.NoError(t, err)
+ testFulcio, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ const testRekorPublicKeyPath = "fixtures/rekor.pub"
+ testRekorPublicKeyData, err := os.ReadFile(testRekorPublicKeyPath)
+ require.NoError(t, err)
+ testIdentity := newPRMMatchRepoDigestOrExact()
+ testIdentityOption := PRSigstoreSignedWithSignedIdentity(testIdentity)
+
+ // Success with public key
+ for _, c := range [][]PRSigstoreSignedOption{
+ {
+ PRSigstoreSignedWithKeyPath(testKeyPath),
+ testIdentityOption,
+ },
+ {
+ PRSigstoreSignedWithKeyData(testKeyData),
+ testIdentityOption,
+ },
+ } {
+ pr, err := newPRSigstoreSigned(c...)
+ require.NoError(t, err)
+ res, err := pr.prepareTrustRoot()
+ require.NoError(t, err)
+ assert.NotNil(t, res.publicKey)
+ assert.Nil(t, res.fulcio)
+ assert.Nil(t, res.rekorPublicKey)
+ }
+ // Success with Fulcio
+ pr, err := newPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(testFulcio),
+ PRSigstoreSignedWithRekorPublicKeyData(testRekorPublicKeyData),
+ testIdentityOption,
+ )
+ require.NoError(t, err)
+ res, err := pr.prepareTrustRoot()
+ require.NoError(t, err)
+ assert.Nil(t, res.publicKey)
+ assert.NotNil(t, res.fulcio)
+ assert.NotNil(t, res.rekorPublicKey)
+ // Success with Rekor public key
+ for _, c := range [][]PRSigstoreSignedOption{
+ {
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithRekorPublicKeyPath(testRekorPublicKeyPath),
+ testIdentityOption,
+ },
+ {
+ PRSigstoreSignedWithKeyData(testKeyData),
+ PRSigstoreSignedWithRekorPublicKeyData(testRekorPublicKeyData),
+ testIdentityOption,
+ },
+ } {
+ pr, err := newPRSigstoreSigned(c...)
+ require.NoError(t, err)
+ res, err := pr.prepareTrustRoot()
+ require.NoError(t, err)
+ assert.NotNil(t, res.publicKey)
+ assert.Nil(t, res.fulcio)
+ assert.NotNil(t, res.rekorPublicKey)
+ }
+
+ // Failure
+ for _, pr := range []prSigstoreSigned{ // Use a prSigstoreSigned because these configurations should be rejected by NewPRSigstoreSigned.
+ { // Both KeyPath and KeyData specified
+ KeyPath: testKeyPath,
+ KeyData: testKeyData,
+ SignedIdentity: testIdentity,
+ },
+ { // Invalid public key path
+ KeyPath: "fixtures/image.signature",
+ SignedIdentity: testIdentity,
+ },
+ { // Unusable public key path
+ KeyPath: "fixtures/this/does/not/exist",
+ SignedIdentity: testIdentity,
+ },
+ { // Invalid public key data
+ KeyData: []byte("this is invalid"),
+ SignedIdentity: testIdentity,
+ },
+ { // Invalid Fulcio configuration
+ Fulcio: &prSigstoreSignedFulcio{},
+ RekorPublicKeyData: testKeyData,
+ SignedIdentity: testIdentity,
+ },
+ { // Both RekorPublicKeyPath and RekorPublicKeyData specified
+ KeyData: testKeyData,
+ RekorPublicKeyPath: testRekorPublicKeyPath,
+ RekorPublicKeyData: testRekorPublicKeyData,
+ SignedIdentity: testIdentity,
+ },
+ { // Invalid Rekor public key path
+ KeyData: testKeyData,
+ RekorPublicKeyPath: "fixtures/image.signature",
+ SignedIdentity: testIdentity,
+ },
+ { // Invalid Rekor public key data
+ KeyData: testKeyData,
+ RekorPublicKeyData: []byte("this is invalid"),
+ SignedIdentity: testIdentity,
+ },
+ { // Rekor public key is not ECDSA
+ KeyData: testKeyData,
+ RekorPublicKeyPath: "fixtures/some-rsa-key.pub",
+ SignedIdentity: testIdentity,
+ },
+ } {
+ _, err = pr.prepareTrustRoot()
+ assert.Error(t, err)
+ }
+}
+
+func TestPRSigstoreSignedIsSignatureAuthorAccepted(t *testing.T) {
+ // Currently, this fails even with a correctly signed image.
+ prm := NewPRMMatchRepository() // We prefer to test with a Cosign-created signature for interoperability, and that doesn’t work with matchExact.
+ testImage := dirImageMock(t, "fixtures/dir-img-cosign-valid", "192.168.64.2:5000/cosign-signed-single-sample")
+ testImageSigBlob, err := os.ReadFile("fixtures/dir-img-cosign-valid/signature-1")
+ require.NoError(t, err)
+
+ // Successful validation, with KeyData and KeyPath
+ pr, err := newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSigBlob)
+ assertSARRejected(t, sar, parsedSig, err)
+}
+
+// sigstoreSignatureFromFile returns a signature.Sigstore loaded from path.
+func sigstoreSignatureFromFile(t *testing.T, path string) signature.Sigstore {
+ blob, err := os.ReadFile(path)
+ require.NoError(t, err)
+ genericSig, err := signature.FromBlob(blob)
+ require.NoError(t, err)
+ sig, ok := genericSig.(signature.Sigstore)
+ require.True(t, ok)
+ return sig
+}
+
+// sigstoreSignatureWithoutAnnotation returns a signature.Sigstore based on template
+// that is missing the specified annotation.
+func sigstoreSignatureWithoutAnnotation(t *testing.T, template signature.Sigstore, annotation string) signature.Sigstore {
+ annotations := template.UntrustedAnnotations() // This returns a copy that is safe to modify.
+ require.Contains(t, annotations, annotation)
+ delete(annotations, annotation)
+ return signature.SigstoreFromComponents(template.UntrustedMIMEType(), template.UntrustedPayload(), annotations)
+}
+
+// sigstoreSignatureWithModifiedAnnotation returns a signature.Sigstore based on template
+// where the specified annotation is replaced
+func sigstoreSignatureWithModifiedAnnotation(template signature.Sigstore, annotation, value string) signature.Sigstore {
+ annotations := template.UntrustedAnnotations() // This returns a copy that is safe to modify.
+ annotations[annotation] = value
+ return signature.SigstoreFromComponents(template.UntrustedMIMEType(), template.UntrustedPayload(), annotations)
+}
+
+func TestPRrSigstoreSignedIsSignatureAccepted(t *testing.T) {
+ assertAccepted := func(sar signatureAcceptanceResult, err error) {
+ assert.Equal(t, sarAccepted, sar)
+ assert.NoError(t, err)
+ }
+ assertRejected := func(sar signatureAcceptanceResult, err error) {
+ logrus.Errorf("%v", err)
+ assert.Equal(t, sarRejected, sar)
+ assert.Error(t, err)
+ }
+
+ prm := NewPRMMatchRepository() // We prefer to test with a Cosign-created signature to ensure interoperability, and that doesn’t work with matchExact. matchExact is tested later.
+ testKeyImage := dirImageMock(t, "fixtures/dir-img-cosign-valid", "192.168.64.2:5000/cosign-signed-single-sample")
+ testKeyImageSig := sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-valid/signature-1")
+ testKeyRekorImage := dirImageMock(t, "fixtures/dir-img-cosign-key-rekor-valid", "192.168.64.2:5000/cosign-signed/key-1")
+ testKeyRekorImageSig := sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-key-rekor-valid/signature-1")
+ testFulcioRekorImage := dirImageMock(t, "fixtures/dir-img-cosign-fulcio-rekor-valid", "192.168.64.2:5000/cosign-signed/fulcio-rekor-1")
+ testFulcioRekorImageSig := sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-fulcio-rekor-valid/signature-1")
+ keyData, err := os.ReadFile("fixtures/cosign.pub")
+ require.NoError(t, err)
+
+ // prepareTrustRoot fails
+ pr := &prSigstoreSigned{
+ KeyPath: "fixtures/cosign.pub",
+ KeyData: keyData,
+ SignedIdentity: prm,
+ }
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err := pr.isSignatureAccepted(context.Background(), nil, testKeyImageSig)
+ assertRejected(sar, err)
+
+ // Signature has no cryptographic signature
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ signature.SigstoreFromComponents(testKeyImageSig.UntrustedMIMEType(), testKeyImageSig.UntrustedPayload(), nil))
+ assertRejected(sar, err)
+
+ // Neither a public key nor Fulcio is specified
+ pr = &prSigstoreSigned{
+ SignedIdentity: prm,
+ }
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil, testKeyImageSig)
+ assertRejected(sar, err)
+
+ // Both a public key and Fulcio is specified
+ fulcio, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ pr = &prSigstoreSigned{
+ KeyPath: "fixtures/cosign.pub",
+ Fulcio: fulcio,
+ SignedIdentity: prm,
+ }
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil, testKeyImageSig)
+ assertRejected(sar, err)
+
+ // Successful key+Rekor use
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign2.pub"),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/rekor.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testKeyRekorImage, testKeyRekorImageSig)
+ assertAccepted(sar, err)
+
+ // key+Rekor, missing Rekor SET annotation
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithoutAnnotation(t, testKeyRekorImageSig, signature.SigstoreSETAnnotationKey))
+ assertRejected(sar, err)
+ // Actual Rekor logic is unit-tested elsewhere, but smoke-test the basics:
+ // key+Rekor: Invalid Rekor SET
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithModifiedAnnotation(testKeyRekorImageSig, signature.SigstoreSETAnnotationKey,
+ "this is not a valid SET"))
+ assertRejected(sar, err)
+ // Fulcio: A Rekor SET which we don’t accept (one of many reasons)
+ pr2, err := newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign2.pub"),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/cosign.pub"), // not rekor.pub = a key mismatch
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr2.isSignatureAccepted(context.Background(), nil, testKeyRekorImageSig)
+ assertRejected(sar, err)
+
+ // Successful Fulcio certificate use
+ fulcio, err = NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(fulcio),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/rekor.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testFulcioRekorImage,
+ testFulcioRekorImageSig)
+ assertAccepted(sar, err)
+
+ // Fulcio, no Rekor requirement
+ pr2 = &prSigstoreSigned{
+ Fulcio: fulcio,
+ SignedIdentity: prm,
+ }
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr2.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithoutAnnotation(t, testFulcioRekorImageSig, signature.SigstoreSETAnnotationKey))
+ assertRejected(sar, err)
+ // Fulcio, missing Rekor SET annotation
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithoutAnnotation(t, testFulcioRekorImageSig, signature.SigstoreSETAnnotationKey))
+ assertRejected(sar, err)
+ // Fulcio, missing certificate annotation
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithoutAnnotation(t, testFulcioRekorImageSig, signature.SigstoreCertificateAnnotationKey))
+ assertRejected(sar, err)
+ // Fulcio: missing certificate chain annotation causes the Cosign-issued signature to be rejected
+ // because there is no path to the trusted CA
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithoutAnnotation(t, testFulcioRekorImageSig, signature.SigstoreIntermediateCertificateChainAnnotationKey))
+ assertRejected(sar, err)
+ // … but a signature without the intermediate annotation is fine if the issuer is directly trusted
+ // (which we handle by trusing the intermediates)
+ fulcio2, err := NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAData([]byte(testFulcioRekorImageSig.UntrustedAnnotations()[signature.SigstoreIntermediateCertificateChainAnnotationKey])),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("mitr@redhat.com"),
+ )
+ require.NoError(t, err)
+ pr2, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(fulcio2),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/rekor.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr2.isSignatureAccepted(context.Background(), testFulcioRekorImage,
+ sigstoreSignatureWithoutAnnotation(t, testFulcioRekorImageSig, signature.SigstoreIntermediateCertificateChainAnnotationKey))
+ assertAccepted(sar, err)
+ // Actual Fulcio and Rekor logic is unit-tested elsewhere, but smoke-test the basics:
+ // Fulcio: Invalid Fulcio certificate
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithModifiedAnnotation(testFulcioRekorImageSig, signature.SigstoreCertificateAnnotationKey,
+ "this is not a valid certificate"))
+ assertRejected(sar, err)
+ // Fulcio: A Fulcio certificate which we don’t accept (one of many reasons)
+ fulcio2, err = NewPRSigstoreSignedFulcio(
+ PRSigstoreSignedFulcioWithCAPath("fixtures/fulcio_v1.crt.pem"),
+ PRSigstoreSignedFulcioWithOIDCIssuer("https://github.com/login/oauth"),
+ PRSigstoreSignedFulcioWithSubjectEmail("this-does-not-match@example.com"),
+ )
+ require.NoError(t, err)
+ pr2, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(fulcio2),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/rekor.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr2.isSignatureAccepted(context.Background(), nil, testFulcioRekorImageSig)
+ assertRejected(sar, err)
+ // Fulcio: Invalid Rekor SET
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ sigstoreSignatureWithModifiedAnnotation(testFulcioRekorImageSig, signature.SigstoreSETAnnotationKey,
+ "this is not a valid SET"))
+ assertRejected(sar, err)
+ // Fulcio: A Rekor SET which we don’t accept (one of many reasons)
+ pr2, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithFulcio(fulcio),
+ PRSigstoreSignedWithRekorPublicKeyPath("fixtures/cosign.pub"), // not rekor.pub = a key mismatch
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr2.isSignatureAccepted(context.Background(), nil, testFulcioRekorImageSig)
+ assertRejected(sar, err)
+
+ // Successful validation, with KeyData and KeyPath
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testKeyImage, testKeyImageSig)
+ assertAccepted(sar, err)
+
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyData(keyData),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testKeyImage, testKeyImageSig)
+ assertAccepted(sar, err)
+
+ // A signature which does not verify
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil,
+ signature.SigstoreFromComponents(testKeyImageSig.UntrustedMIMEType(), testKeyImageSig.UntrustedPayload(), map[string]string{
+ signature.SigstoreSignatureAnnotationKey: base64.StdEncoding.EncodeToString([]byte("invalid signature")),
+ }))
+ assertRejected(sar, err)
+
+ // A valid signature using an unknown key.
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ // Pass a nil pointer to, kind of, test that the return value does not depend on the image.
+ sar, err = pr.isSignatureAccepted(context.Background(), nil, sigstoreSignatureFromFile(t, "fixtures/unknown-cosign-key.signature"))
+ assertRejected(sar, err)
+
+ // A valid signature with a rejected identity.
+ nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match")
+ require.NoError(t, err)
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(nonmatchingPRM),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testKeyImage, testKeyImageSig)
+ assertRejected(sar, err)
+
+ // Error reading image manifest
+ image := dirImageMock(t, "fixtures/dir-img-cosign-no-manifest", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), image, sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-no-manifest/signature-1"))
+ assertRejected(sar, err)
+
+ // Error computing manifest digest
+ image = dirImageMock(t, "fixtures/dir-img-cosign-manifest-digest-error", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), image, sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-manifest-digest-error/signature-1"))
+ assertRejected(sar, err)
+
+ // A valid signature with a non-matching manifest
+ image = dirImageMock(t, "fixtures/dir-img-cosign-modified-manifest", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), image, sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-modified-manifest/signature-1"))
+ assertRejected(sar, err)
+
+ // Minimally check that the prmMatchExact also works as expected:
+ // - Signatures with a matching tag work
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-with-tag", "192.168.64.2:5000/skopeo-signed:tag")
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), image, sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-valid-with-tag/signature-1"))
+ assertAccepted(sar, err)
+ // - Signatures with a non-matching tag are rejected
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-with-tag", "192.168.64.2:5000/skopeo-signed:othertag")
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), image, sigstoreSignatureFromFile(t, "fixtures/dir-img-cosign-valid-with-tag/signature-1"))
+ assertRejected(sar, err)
+ // - Cosign-created signatures are rejected
+ pr, err = newPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ sar, err = pr.isSignatureAccepted(context.Background(), testKeyImage, testKeyImageSig)
+ assertRejected(sar, err)
+}
+
+func TestPRSigstoreSignedIsRunningImageAllowed(t *testing.T) {
+ prm := NewPRMMatchRepository() // We prefer to test with a Cosign-created signature to ensure interoperability, and that doesn’t work with matchExact. matchExact is tested later.
+
+ // A simple success case: single valid signature.
+ image := dirImageMock(t, "fixtures/dir-img-cosign-valid", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err := NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err := pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // Error reading signatures
+ invalidSigDir := createInvalidSigDir(t)
+ image = dirImageMock(t, invalidSigDir, "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejected(t, allowed, err)
+
+ // No signatures
+ image = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejected(t, allowed, err)
+
+ // Only non-sigstore signatures
+ image = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejected(t, allowed, err)
+
+ // Only non-signature sigstore attachments
+ image = dirImageMock(t, "fixtures/dir-img-cosign-other-attachment", "testing/manifest:latest")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejected(t, allowed, err)
+
+ // 1 invalid signature: use dir-img-valid, but a non-matching Docker reference
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid", "testing/manifest:notlatest")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+
+ // 2 valid signatures
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-2", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // One invalid, one valid signature (in this order)
+ image = dirImageMock(t, "fixtures/dir-img-cosign-mixed", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+
+ // 2 invalid signajtures: use dir-img-cosign-valid-2, but a non-matching Docker reference
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-2", "this/doesnt:match")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(prm),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+
+ // Minimally check that the prmMatchExact also works as expected:
+ // - Signatures with a matching tag work
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-with-tag", "192.168.64.2:5000/skopeo-signed:tag")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningAllowed(t, allowed, err)
+ // - Signatures with a non-matching tag are rejected
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid-with-tag", "192.168.64.2:5000/skopeo-signed:othertag")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+ // - Cosign-created signatures are rejected
+ image = dirImageMock(t, "fixtures/dir-img-cosign-valid", "192.168.64.2:5000/cosign-signed-single-sample")
+ pr, err = NewPRSigstoreSigned(
+ PRSigstoreSignedWithKeyPath("fixtures/cosign.pub"),
+ PRSigstoreSignedWithSignedIdentity(NewPRMMatchExact()),
+ )
+ require.NoError(t, err)
+ allowed, err = pr.isRunningImageAllowed(context.Background(), image)
+ assertRunningRejectedPolicyRequirement(t, allowed, err)
+}
diff --git a/signature/policy_eval_simple.go b/signature/policy_eval_simple.go
new file mode 100644
index 0000000..031866f
--- /dev/null
+++ b/signature/policy_eval_simple.go
@@ -0,0 +1,29 @@
+// Policy evaluation for the various simple PolicyRequirement types.
+
+package signature
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/transports"
+)
+
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // prInsecureAcceptAnything semantics: Every image is allowed to run,
+ // but this does not consider the signature as verified.
+ return sarUnknown, nil, nil
+}
+
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ return true, nil
+}
+
+func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
+}
+
+func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) {
+ return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
+}
diff --git a/signature/policy_eval_simple_test.go b/signature/policy_eval_simple_test.go
new file mode 100644
index 0000000..3249090
--- /dev/null
+++ b/signature/policy_eval_simple_test.go
@@ -0,0 +1,57 @@
+package signature
+
+import (
+ "context"
+ "testing"
+
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/types"
+)
+
+// nameOnlyImageMock is a mock of private.UnparsedImage which only allows transports.ImageName to work
+type nameOnlyImageMock struct {
+ mocks.ForbiddenUnparsedImage
+}
+
+func (nameOnlyImageMock) Reference() types.ImageReference {
+ return nameOnlyImageReferenceMock{s: "== StringWithinTransport mock"}
+}
+
+// nameOnlyImageReferenceMock is a mock of types.ImageReference which only allows transports.ImageName to work, returning self.
+type nameOnlyImageReferenceMock struct {
+ mocks.ForbiddenImageReference
+ s string
+}
+
+func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport {
+ return mocks.NameImageTransport("== Transport mock")
+}
+func (ref nameOnlyImageReferenceMock) StringWithinTransport() string {
+ return ref.s
+}
+
+func TestPRInsecureAcceptAnythingIsSignatureAuthorAccepted(t *testing.T) {
+ pr := NewPRInsecureAcceptAnything()
+ // Pass nil signature to, kind of, test that the return value does not depend on it.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nameOnlyImageMock{}, nil)
+ assertSARUnknown(t, sar, parsedSig, err)
+}
+
+func TestPRInsecureAcceptAnythingIsRunningImageAllowed(t *testing.T) {
+ pr := NewPRInsecureAcceptAnything()
+ res, err := pr.isRunningImageAllowed(context.Background(), nameOnlyImageMock{})
+ assertRunningAllowed(t, res, err)
+}
+
+func TestPRRejectIsSignatureAuthorAccepted(t *testing.T) {
+ pr := NewPRReject()
+ // Pass nil signature to, kind of, test that the return value does not depend on it.
+ sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nameOnlyImageMock{}, nil)
+ assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
+}
+
+func TestPRRejectIsRunningImageAllowed(t *testing.T) {
+ pr := NewPRReject()
+ res, err := pr.isRunningImageAllowed(context.Background(), nameOnlyImageMock{})
+ assertRunningRejectedPolicyRequirement(t, res, err)
+}
diff --git a/signature/policy_eval_test.go b/signature/policy_eval_test.go
new file mode 100644
index 0000000..af3c3e4
--- /dev/null
+++ b/signature/policy_eval_test.go
@@ -0,0 +1,499 @@
+package signature
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/policyconfiguration"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPolicyRequirementError(t *testing.T) {
+ // A stupid test just to keep code coverage
+ s := "test"
+ err := PolicyRequirementError(s)
+ assert.Equal(t, s, err.Error())
+}
+
+func TestPolicyContextChangeState(t *testing.T) {
+ pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
+ require.NoError(t, err)
+ defer func() {
+ err := pc.Destroy()
+ require.NoError(t, err)
+ }()
+
+ require.Equal(t, pcReady, pc.state)
+ err = pc.changeState(pcReady, pcInUse)
+ require.NoError(t, err)
+
+ err = pc.changeState(pcReady, pcInUse)
+ require.Error(t, err)
+
+ // Return state to pcReady to allow pc.Destroy to clean up.
+ err = pc.changeState(pcInUse, pcReady)
+ require.NoError(t, err)
+}
+
+func TestPolicyContextNewDestroy(t *testing.T) {
+ pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
+ require.NoError(t, err)
+ assert.Equal(t, pcReady, pc.state)
+
+ err = pc.Destroy()
+ require.NoError(t, err)
+ assert.Equal(t, pcDestroyed, pc.state)
+
+ // Trying to destroy when not pcReady
+ pc, err = NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
+ require.NoError(t, err)
+ err = pc.changeState(pcReady, pcInUse)
+ require.NoError(t, err)
+ err = pc.Destroy()
+ require.Error(t, err)
+ assert.Equal(t, pcInUse, pc.state) // The state, and hopefully nothing else, has changed.
+
+ err = pc.changeState(pcInUse, pcReady)
+ require.NoError(t, err)
+ err = pc.Destroy()
+ assert.NoError(t, err)
+}
+
+// pcImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference
+// and handles PolicyConfigurationIdentity and PolicyConfigurationReference consistently.
+type pcImageReferenceMock struct {
+ mocks.ForbiddenImageReference
+ transportName string
+ ref reference.Named
+}
+
+func (ref pcImageReferenceMock) Transport() types.ImageTransport {
+ return mocks.NameImageTransport(ref.transportName)
+}
+func (ref pcImageReferenceMock) StringWithinTransport() string {
+ // We use this in error messages, so sadly we must return something.
+ return "== StringWithinTransport mock"
+}
+func (ref pcImageReferenceMock) DockerReference() reference.Named {
+ return ref.ref
+}
+func (ref pcImageReferenceMock) PolicyConfigurationIdentity() string {
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
+ if res == "" || err != nil {
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+func (ref pcImageReferenceMock) PolicyConfigurationNamespaces() []string {
+ if ref.ref == nil {
+ panic("unexpected call to a mock function")
+ }
+ return policyconfiguration.DockerReferenceNamespaces(ref.ref)
+}
+
+func TestPolicyContextRequirementsForImageRefNotRegisteredTransport(t *testing.T) {
+ transports.Delete("docker")
+ assert.Nil(t, transports.Get("docker"))
+
+ defer func() {
+ assert.Nil(t, transports.Get("docker"))
+ transports.Register(docker.Transport)
+ assert.NotNil(t, transports.Get("docker"))
+ }()
+
+ pr := []PolicyRequirement{
+ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()),
+ }
+ policy := &Policy{
+ Default: PolicyRequirements{NewPRReject()},
+ Transports: map[string]PolicyTransportScopes{
+ "docker": {
+ "registry.access.redhat.com": pr,
+ },
+ },
+ }
+ pc, err := NewPolicyContext(policy)
+ require.NoError(t, err)
+ ref, err := reference.ParseNormalizedNamed("registry.access.redhat.com/rhel7:latest")
+ require.NoError(t, err)
+ reqs := pc.requirementsForImageRef(pcImageReferenceMock{transportName: "docker", ref: ref})
+ assert.True(t, &(reqs[0]) == &(pr[0]))
+ assert.True(t, len(reqs) == len(pr))
+
+}
+
+func TestPolicyContextRequirementsForImageRef(t *testing.T) {
+ ktGPG := SBKeyTypeGPGKeys
+ prm := NewPRMMatchRepoDigestOrExact()
+
+ policy := &Policy{
+ Default: PolicyRequirements{NewPRReject()},
+ Transports: map[string]PolicyTransportScopes{},
+ }
+ // Just put _something_ into the PolicyTransportScopes map for the keys we care about, and make it pairwise
+ // distinct so that we can compare the values and show them when debugging the tests.
+ for _, t := range []struct{ transport, scope string }{
+ {"docker", ""},
+ {"docker", "unmatched"},
+ {"docker", "deep.com"},
+ {"docker", "*.very.deep.com"},
+ {"docker", "*.deep.com"},
+ {"docker", "deep.com/n1"},
+ {"docker", "deep.com/n1/n2"},
+ {"docker", "deep.com/n1/n2/n3"},
+ {"docker", "deep.com/n1/n2/n3/repo"},
+ {"docker", "deep.com/n1/n2/n3/repo:tag2"},
+ {"atomic", "unmatched"},
+ } {
+ if _, ok := policy.Transports[t.transport]; !ok {
+ policy.Transports[t.transport] = PolicyTransportScopes{}
+ }
+ policy.Transports[t.transport][t.scope] = PolicyRequirements{xNewPRSignedByKeyData(ktGPG, []byte(t.transport+t.scope), prm)}
+ }
+
+ pc, err := NewPolicyContext(policy)
+ require.NoError(t, err)
+
+ for _, c := range []struct{ inputTransport, input, matchedTransport, matched string }{
+ // Full match
+ {"docker", "deep.com/n1/n2/n3/repo:tag2", "docker", "deep.com/n1/n2/n3/repo:tag2"},
+ // Namespace matches
+ {"docker", "deep.com/n1/n2/n3/repo:nottag2", "docker", "deep.com/n1/n2/n3/repo"},
+ {"docker", "deep.com/n1/n2/n3/notrepo:tag2", "docker", "deep.com/n1/n2/n3"},
+ {"docker", "deep.com/n1/n2/notn3/repo:tag2", "docker", "deep.com/n1/n2"},
+ {"docker", "deep.com/n1/notn2/n3/repo:tag2", "docker", "deep.com/n1"},
+ // Host name match
+ {"docker", "deep.com/notn1/n2/n3/repo:tag2", "docker", "deep.com"},
+ // Sub domain match
+ {"docker", "very.deep.com/n1/n2/n3/repo:tag2", "docker", "*.deep.com"},
+ {"docker", "not.very.deep.com/n1/n2/n3/repo:tag2", "docker", "*.very.deep.com"},
+ // Default
+ {"docker", "this.does-not/match:anything", "docker", ""},
+ // No match within a matched transport which doesn't have a "" scope
+ {"atomic", "this.does-not/match:anything", "", ""},
+ // No configuration available for this transport at all
+ {"dir", "what/ever", "", ""}, // "what/ever" is not a valid scope for the real "dir" transport, but we only need it to be a valid reference.Named.
+ } {
+ var expected PolicyRequirements
+ if c.matchedTransport != "" {
+ e, ok := policy.Transports[c.matchedTransport][c.matched]
+ require.True(t, ok, fmt.Sprintf("case %s:%s: expected reqs not found", c.inputTransport, c.input))
+ expected = e
+ } else {
+ expected = policy.Default
+ }
+
+ ref, err := reference.ParseNormalizedNamed(c.input)
+ require.NoError(t, err)
+ reqs := pc.requirementsForImageRef(pcImageReferenceMock{transportName: c.inputTransport, ref: ref})
+ comment := fmt.Sprintf("case %s:%s: %#v", c.inputTransport, c.input, reqs[0])
+ // Do not use assert.Equal, which would do a deep contents comparison; we want to compare
+ // the pointers. Also, == does not work on slices; so test that the slices start at the
+ // same element and have the same length.
+ assert.True(t, &(reqs[0]) == &(expected[0]), comment)
+ assert.True(t, len(reqs) == len(expected), comment)
+ }
+}
+
+// pcImageMock returns a private.UnparsedImage for a directory, claiming a specified dockerReference and implementing PolicyConfigurationIdentity/PolicyConfigurationNamespaces.
+func pcImageMock(t *testing.T, dir, dockerReference string) private.UnparsedImage {
+ ref, err := reference.ParseNormalizedNamed(dockerReference)
+ require.NoError(t, err)
+ return dirImageMockWithRef(t, dir, pcImageReferenceMock{transportName: "docker", ref: ref})
+}
+
+func TestPolicyContextGetSignaturesWithAcceptedAuthor(t *testing.T) {
+ expectedSig := &Signature{
+ DockerManifestDigest: TestImageManifestDigest,
+ DockerReference: "testing/manifest:latest",
+ }
+
+ pc, err := NewPolicyContext(&Policy{
+ Default: PolicyRequirements{NewPRReject()},
+ Transports: map[string]PolicyTransportScopes{
+ "docker": {
+ "docker.io/testing/manifest:latest": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
+ },
+ "docker.io/testing/manifest:twoAccepts": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ },
+ "docker.io/testing/manifest:acceptReject": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ NewPRReject(),
+ },
+ "docker.io/testing/manifest:acceptUnknown": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ xNewPRSignedBaseLayer(NewPRMMatchRepository()),
+ },
+ "docker.io/testing/manifest:rejectUnknown": {
+ NewPRReject(),
+ xNewPRSignedBaseLayer(NewPRMMatchRepository()),
+ },
+ "docker.io/testing/manifest:unknown": {
+ xNewPRSignedBaseLayer(NewPRMMatchRepository()),
+ },
+ "docker.io/testing/manifest:unknown2": {
+ NewPRInsecureAcceptAnything(),
+ },
+ "docker.io/testing/manifest:invalidEmptyRequirements": {},
+ },
+ },
+ })
+ require.NoError(t, err)
+ defer func() {
+ err := pc.Destroy()
+ require.NoError(t, err)
+ }()
+
+ // Success
+ img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ sigs, err := pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Equal(t, []*Signature{expectedSig}, sigs)
+
+ // Two signatures
+ // FIXME? Use really different signatures for this?
+ img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Equal(t, []*Signature{expectedSig, expectedSig}, sigs)
+
+ // No signatures
+ img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // Only invalid signatures
+ img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // 1 invalid, 1 valid signature (in this order)
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Equal(t, []*Signature{expectedSig}, sigs)
+
+ // Two sarAccepted results for one signature
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:twoAccepts")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Equal(t, []*Signature{expectedSig}, sigs)
+
+ // sarAccepted+sarRejected for a signature
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptReject")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // sarAccepted+sarUnknown for a signature
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptUnknown")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Equal(t, []*Signature{expectedSig}, sigs)
+
+ // sarRejected+sarUnknown for a signature
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:rejectUnknown")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // sarUnknown only
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown2")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // Empty list of requirements (invalid)
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ require.NoError(t, err)
+ assert.Empty(t, sigs)
+
+ // Failures: Make sure we return nil sigs.
+
+ // Unexpected state (context already destroyed)
+ destroyedPC, err := NewPolicyContext(pc.Policy)
+ require.NoError(t, err)
+ err = destroyedPC.Destroy()
+ require.NoError(t, err)
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ sigs, err = destroyedPC.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ assert.Error(t, err)
+ assert.Nil(t, sigs)
+ // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
+ // implementations meddling with the state, or threads. This is for catching trivial programmer
+ // mistakes only, anyway.
+
+ // Error reading signatures.
+ invalidSigDir := createInvalidSigDir(t)
+ img = pcImageMock(t, invalidSigDir, "testing/manifest:latest")
+ sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img)
+ assert.Error(t, err)
+ assert.Nil(t, sigs)
+}
+
+func TestPolicyContextIsRunningImageAllowed(t *testing.T) {
+ pc, err := NewPolicyContext(&Policy{
+ Default: PolicyRequirements{NewPRReject()},
+ Transports: map[string]PolicyTransportScopes{
+ "docker": {
+ "docker.io/testing/manifest:latest": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
+ },
+ "docker.io/testing/manifest:twoAllows": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ },
+ "docker.io/testing/manifest:allowDeny": {
+ xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
+ NewPRReject(),
+ },
+ "docker.io/testing/manifest:reject": {
+ NewPRReject(),
+ },
+ "docker.io/testing/manifest:acceptAnything": {
+ NewPRInsecureAcceptAnything(),
+ },
+ "docker.io/testing/manifest:invalidEmptyRequirements": {},
+ },
+ },
+ })
+ require.NoError(t, err)
+ defer func() {
+ err := pc.Destroy()
+ require.NoError(t, err)
+ }()
+
+ // Success
+ img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ res, err := pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningAllowed(t, res, err)
+
+ // Two signatures
+ // FIXME? Use really different signatures for this?
+ img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningAllowed(t, res, err)
+
+ // No signatures
+ img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+
+ // Only invalid signatures
+ img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+
+ // 1 invalid, 1 valid signature (in this order)
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningAllowed(t, res, err)
+
+ // Two allowed results
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:twoAllows")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningAllowed(t, res, err)
+
+ // Allow + deny results
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:allowDeny")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+
+ // prReject works
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:reject")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+
+ // prInsecureAcceptAnything works
+ img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:acceptAnything")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningAllowed(t, res, err)
+
+ // Empty list of requirements (invalid)
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
+ res, err = pc.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejectedPolicyRequirement(t, res, err)
+
+ // Unexpected state (context already destroyed)
+ destroyedPC, err := NewPolicyContext(pc.Policy)
+ require.NoError(t, err)
+ err = destroyedPC.Destroy()
+ require.NoError(t, err)
+ img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
+ res, err = destroyedPC.IsRunningImageAllowed(context.Background(), img)
+ assertRunningRejected(t, res, err)
+ // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
+ // implementations meddling with the state, or threads. This is for catching trivial programmer
+ // mistakes only, anyway.
+}
+
+// Helpers for validating PolicyRequirement.isSignatureAuthorAccepted results:
+
+// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result
+// with the expected signature.
+func assertSARAccepted(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error, expectedSig Signature) {
+ assert.Equal(t, sarAccepted, sar)
+ assert.Equal(t, &expectedSig, parsedSig)
+ assert.NoError(t, err)
+}
+
+// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result.
+func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
+ assert.Equal(t, sarRejected, sar)
+ assert.Nil(t, parsedSig)
+ assert.Error(t, err)
+}
+
+// assertSARRejectedPolicyRequirement verifies that isSignatureAuthorAccepted returns a consistent sarRejected result,
+// and that the returned error is a PolicyRequirementError..
+func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
+ assertSARRejected(t, sar, parsedSig, err)
+ assert.IsType(t, PolicyRequirementError(""), err)
+}
+
+// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarUnknown result.
+func assertSARUnknown(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
+ assert.Equal(t, sarUnknown, sar)
+ assert.Nil(t, parsedSig)
+ assert.NoError(t, err)
+}
+
+// Helpers for validating PolicyRequirement.isRunningImageAllowed results:
+
+// assertRunningAllowed verifies that isRunningImageAllowed returns a consistent true result
+func assertRunningAllowed(t *testing.T, allowed bool, err error) {
+ assert.Equal(t, true, allowed)
+ assert.NoError(t, err)
+}
+
+// assertRunningRejected verifies that isRunningImageAllowed returns a consistent false result
+func assertRunningRejected(t *testing.T, allowed bool, err error) {
+ assert.Equal(t, false, allowed)
+ assert.Error(t, err)
+}
+
+// assertRunningRejectedPolicyRequirement verifies that isRunningImageAllowed returns a consistent false result
+// and that the returned error is a PolicyRequirementError.
+func assertRunningRejectedPolicyRequirement(t *testing.T, allowed bool, err error) {
+ assertRunningRejected(t, allowed, err)
+ assert.IsType(t, PolicyRequirementError(""), err)
+}
diff --git a/signature/policy_paths_common.go b/signature/policy_paths_common.go
new file mode 100644
index 0000000..290fc24
--- /dev/null
+++ b/signature/policy_paths_common.go
@@ -0,0 +1,8 @@
+//go:build !freebsd
+// +build !freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/etc/containers/policy.json"
diff --git a/signature/policy_paths_freebsd.go b/signature/policy_paths_freebsd.go
new file mode 100644
index 0000000..702b717
--- /dev/null
+++ b/signature/policy_paths_freebsd.go
@@ -0,0 +1,8 @@
+//go:build freebsd
+// +build freebsd
+
+package signature
+
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json"
diff --git a/signature/policy_reference_match.go b/signature/policy_reference_match.go
new file mode 100644
index 0000000..4e70c0f
--- /dev/null
+++ b/signature/policy_reference_match.go
@@ -0,0 +1,154 @@
+// PolicyReferenceMatch implementations.
+
+package signature
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/transports"
+)
+
+// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
+func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+ r1 := image.Reference().DockerReference()
+ if r1 == nil {
+ return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
+ transports.ImageName(image.Reference())))
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference
+// using reference.Named values.
+func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool {
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(signature) {
+ return false
+ }
+ switch intended.(type) {
+ case reference.NamedTagged: // Includes the case when intended has both a tag and a digest.
+ return signature.String() == intended.String()
+ case reference.Canonical:
+ // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
+ // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
+ // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
+ return signature.Name() == intended.Name()
+ default: // !reference.IsNameOnly(intended)
+ return false
+ }
+}
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return matchRepoDigestOrExactReferenceValues(intended, signature)
+}
+
+func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
+
+// parseDockerReferences converts two reference strings into parsed entities, failing on any error
+func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
+ r1, err := reference.ParseNormalizedNamed(s1)
+ if err != nil {
+ return nil, nil, err
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
+
+// refMatchesPrefix returns true if ref matches prm.Prefix.
+func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool {
+ name := ref.Name()
+ switch {
+ case len(name) < len(prm.Prefix):
+ return false
+ case len(name) == len(prm.Prefix):
+ return name == prm.Prefix
+ case len(name) > len(prm.Prefix):
+ // We are matching only ref.Name(), not ref.String(), so the only separator we are
+ // expecting is '/':
+ // - '@' is only valid to separate a digest, i.e. not a part of ref.Name()
+ // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a
+ // host:port domain syntax, but we don't treat that specially and require an exact match
+ // of the domain.
+ return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/'
+ default:
+ panic("Internal error: impossible comparison outcome")
+ }
+}
+
+// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix
+// or the original ref if it does not.
+func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) {
+ if !prm.refMatchesPrefix(ref) {
+ return ref, nil
+ }
+ refString := ref.String()
+ newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
+ newParsedRef, err := reference.ParseNamed(newNamedRef)
+ if err != nil {
+ return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err)
+ }
+ return newParsedRef, nil
+}
+
+func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ intended, err = prm.remapReferencePrefix(intended)
+ if err != nil {
+ return false
+ }
+ return matchRepoDigestOrExactReferenceValues(intended, signature)
+}
diff --git a/signature/policy_reference_match_test.go b/signature/policy_reference_match_test.go
new file mode 100644
index 0000000..8707732
--- /dev/null
+++ b/signature/policy_reference_match_test.go
@@ -0,0 +1,556 @@
+package signature
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/testing/mocks"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ fullRHELRef = "registry.access.redhat.com/rhel7/rhel:7.2.3"
+ untaggedRHELRef = "registry.access.redhat.com/rhel7/rhel"
+ digestSuffix = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ digestSuffixOther = "@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+)
+
+func TestParseImageAndDockerReference(t *testing.T) {
+ const (
+ ok1 = "busybox"
+ ok2 = fullRHELRef
+ bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES"
+ bad2 = ""
+ )
+ // Success
+ ref, err := reference.ParseNormalizedNamed(ok1)
+ require.NoError(t, err)
+ r1, r2, err := parseImageAndDockerReference(refImageMock{ref: ref}, ok2)
+ require.NoError(t, err)
+ assert.Equal(t, ok1, reference.FamiliarString(r1))
+ assert.Equal(t, ok2, reference.FamiliarString(r2))
+
+ // Unidentified images are rejected.
+ _, _, err = parseImageAndDockerReference(refImageMock{ref: nil}, ok2)
+ require.Error(t, err)
+ assert.IsType(t, PolicyRequirementError(""), err)
+
+ // Failures
+ for _, refs := range [][]string{
+ {bad1, ok2},
+ {ok1, bad2},
+ {bad1, bad2},
+ } {
+ ref, err := reference.ParseNormalizedNamed(refs[0])
+ if err == nil {
+ _, _, err := parseImageAndDockerReference(refImageMock{ref: ref}, refs[1])
+ assert.Error(t, err)
+ }
+ }
+}
+
+// refImageMock is a mock of private.UnparsedImage which returns itself in Reference().DockerReference.
+type refImageMock struct {
+ mocks.ForbiddenUnparsedImage
+ ref reference.Named
+}
+
+func (ref refImageMock) Reference() types.ImageReference {
+ return refImageReferenceMock{ref: ref.ref}
+}
+
+// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference.
+type refImageReferenceMock struct {
+ mocks.ForbiddenImageReference
+ ref reference.Named
+}
+
+func (ref refImageReferenceMock) Transport() types.ImageTransport {
+ return mocks.NameImageTransport("== Transport mock")
+}
+func (ref refImageReferenceMock) StringWithinTransport() string {
+ // We use this in error messages, so sadly we must return something. But right now we do so only when DockerReference is nil, so restrict to that.
+ if ref.ref == nil {
+ return "== StringWithinTransport for an image with no Docker support"
+ }
+ panic("unexpected call to a mock function")
+}
+func (ref refImageReferenceMock) DockerReference() reference.Named {
+ return ref.ref
+}
+
+type prmSymmetricTableTest struct {
+ refA, refB string
+ result bool
+}
+
+// Test cases for exact reference match. The behavior is supposed to be symmetric.
+var prmExactMatchTestTable = []prmSymmetricTableTest{
+ // Success, simple matches
+ {"busybox:latest", "busybox:latest", true},
+ {fullRHELRef, fullRHELRef, true},
+ {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time.
+ // Non-canonical reference format is canonicalized
+ {"library/busybox:latest", "busybox:latest", true},
+ {"docker.io/library/busybox:latest", "busybox:latest", true},
+ {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true},
+ // Mismatch
+ {"busybox:latest", "busybox:notlatest", false},
+ {"busybox:latest", "notbusybox:latest", false},
+ {"busybox:latest", "hostname/library/busybox:notlatest", false},
+ {"hostname/library/busybox:latest", "busybox:notlatest", false},
+ {"busybox:latest", fullRHELRef, false},
+ {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false},
+ {"busybox:latest", "busybox" + digestSuffix, false},
+ {"busybox" + digestSuffix, "busybox" + digestSuffixOther, false},
+ // NameOnly references
+ {"busybox", "busybox:latest", false},
+ {"busybox", "busybox" + digestSuffix, false},
+ {"busybox", "busybox", false},
+ // References with both tags and digests: We match them exactly (requiring BOTH to match)
+ // NOTE: Again, this is not documented behavior; the recommendation is to sign tags, not digests, and then tag-and-digest references won’t match the signed identity.
+ {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true},
+ {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false},
+ {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, false},
+ {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, false},
+ {"busybox:latest" + digestSuffix, "busybox:latest", false},
+ // Invalid format
+ {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false},
+ {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
+ // Even if they are exactly equal, invalid values are rejected.
+ {"INVALID", "INVALID", false},
+}
+
+// Test cases for repository-only reference match. The behavior is supposed to be symmetric.
+var prmRepositoryMatchTestTable = []prmSymmetricTableTest{
+ // Success, simple matches
+ {"busybox:latest", "busybox:latest", true},
+ {fullRHELRef, fullRHELRef, true},
+ {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time.
+ // Non-canonical reference format is canonicalized
+ {"library/busybox:latest", "busybox:latest", true},
+ {"docker.io/library/busybox:latest", "busybox:latest", true},
+ {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true},
+ // The same as above, but with mismatching tags
+ {"busybox:latest", "busybox:notlatest", true},
+ {fullRHELRef + "tagsuffix", fullRHELRef, true},
+ {"library/busybox:latest", "busybox:notlatest", true},
+ {"busybox:latest", "library/busybox:notlatest", true},
+ {"docker.io/library/busybox:notlatest", "busybox:latest", true},
+ {"busybox:notlatest", "docker.io/library/busybox:latest", true},
+ {"busybox:latest", "busybox" + digestSuffix, true},
+ {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.)
+ // The same as above, but with defaulted tags (which can happen with /usr/bin/cosign)
+ {"busybox", "busybox:notlatest", true},
+ {fullRHELRef, untaggedRHELRef, true},
+ {"busybox", "busybox" + digestSuffix, true},
+ {"library/busybox", "busybox", true},
+ {"docker.io/library/busybox", "busybox", true},
+ // Mismatch
+ {"busybox:latest", "notbusybox:latest", false},
+ {"hostname/library/busybox:latest", "busybox:notlatest", false},
+ {"busybox:latest", fullRHELRef, false},
+ {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false},
+ // References with both tags and digests: We ignore both anyway.
+ {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true},
+ {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, true},
+ {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, true},
+ {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, true},
+ {"busybox:latest" + digestSuffix, "busybox:latest", true},
+ // Invalid format
+ {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false},
+ {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
+ // Even if they are exactly equal, invalid values are rejected.
+ {"INVALID", "INVALID", false},
+}
+
+// Test cases for matchRepoDigestOrExact
+var matchRepoDigestOrExactTestTable = []struct {
+ imageRef, sigRef string
+ result bool
+}{
+ // Tag mismatch
+ {"busybox:latest", "busybox:notlatest", false},
+ {fullRHELRef + "tagsuffix", fullRHELRef, false},
+ {"library/busybox:latest", "busybox:notlatest", false},
+ {"busybox:latest", "library/busybox:notlatest", false},
+ {"docker.io/library/busybox:notlatest", "busybox:latest", false},
+ {"busybox:notlatest", "docker.io/library/busybox:latest", false},
+ // NameOnly references
+ {"busybox", "busybox:latest", false},
+ {"busybox:latest", "busybox", false},
+ {"busybox", "busybox" + digestSuffix, false},
+ {"busybox" + digestSuffix, "busybox", false},
+ {fullRHELRef, untaggedRHELRef, false},
+ {"busybox", "busybox", false},
+ // Tag references only accept signatures with matching tags.
+ {"busybox:latest", "busybox" + digestSuffix, false},
+ // Digest references accept any signature with matching repository.
+ {"busybox" + digestSuffix, "busybox:latest", true},
+ {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.)
+ // References with both tags and digests: We match them exactly (requiring BOTH to match).
+ {"busybox:latest" + digestSuffix, "busybox:latest", false},
+ {"busybox:latest" + digestSuffix, "busybox:notlatest", false},
+ {"busybox:latest", "busybox:latest" + digestSuffix, false},
+ {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false},
+ {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffixOther, false},
+}
+
+func testImageAndSig(t *testing.T, prm PolicyReferenceMatch, imageRef, sigRef string, result bool) {
+ // This assumes that all ways to obtain a reference.Named perform equivalent validation,
+ // and therefore values refused by reference.ParseNormalizedNamed can not happen in practice.
+ parsedImageRef, err := reference.ParseNormalizedNamed(imageRef)
+ require.NoError(t, err)
+ res := prm.matchesDockerReference(refImageMock{ref: parsedImageRef}, sigRef)
+ assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef))
+}
+
+// testPossiblyInvalidImageAndSig is a variant of testImageAndSig
+// that does not fail if the imageRef is invalid (which should never happen in practice,
+// but makes testing of symmetrical properties using shared tables easier)
+func testPossiblyInvalidImageAndSig(t *testing.T, prm PolicyReferenceMatch, imageRef, sigRef string, result bool) {
+ // This assumes that all ways to obtain a reference.Named perform equivalent validation,
+ // and therefore values refused by reference.ParseNormalizedNamed can not happen in practice.
+ _, err := reference.ParseNormalizedNamed(imageRef)
+ if err != nil {
+ return
+ }
+ testImageAndSig(t, prm, imageRef, sigRef, result)
+}
+
+func TestMatchRepoDigestOrExactReferenceValues(t *testing.T) {
+ // prmMatchRepoDigestOrExact is a middle ground between prmMatchExact and prmMatchRepository:
+ // It accepts anything prmMatchExact accepts,…
+ for _, test := range prmExactMatchTestTable {
+ if test.result == true {
+ refA, errA := reference.ParseNormalizedNamed(test.refA)
+ refB, errB := reference.ParseNormalizedNamed(test.refB)
+ if errA == nil && errB == nil {
+ res1 := matchRepoDigestOrExactReferenceValues(refA, refB)
+ assert.Equal(t, test.result, res1)
+ res2 := matchRepoDigestOrExactReferenceValues(refB, refA)
+ assert.Equal(t, test.result, res2)
+ }
+ }
+ }
+ // … and it rejects everything prmMatchRepository rejects.
+ for _, test := range prmRepositoryMatchTestTable {
+ if test.result == false {
+ refA, errA := reference.ParseNormalizedNamed(test.refA)
+ refB, errB := reference.ParseNormalizedNamed(test.refB)
+ if errA == nil && errB == nil {
+ res1 := matchRepoDigestOrExactReferenceValues(refA, refB)
+ assert.Equal(t, test.result, res1)
+ res2 := matchRepoDigestOrExactReferenceValues(refB, refA)
+ assert.Equal(t, test.result, res2)
+ }
+ }
+ }
+
+ // The other cases, possibly asymmetrical:
+ for _, test := range matchRepoDigestOrExactTestTable {
+ imageRef, err := reference.ParseNormalizedNamed(test.imageRef)
+ require.NoError(t, err)
+ sigRef, err := reference.ParseNormalizedNamed(test.sigRef)
+ require.NoError(t, err)
+ res := matchRepoDigestOrExactReferenceValues(imageRef, sigRef)
+ assert.Equal(t, test.result, res)
+ }
+}
+
+func TestPRMMatchExactMatchesDockerReference(t *testing.T) {
+ prm := NewPRMMatchExact()
+ for _, test := range prmExactMatchTestTable {
+ testPossiblyInvalidImageAndSig(t, prm, test.refA, test.refB, test.result)
+ testPossiblyInvalidImageAndSig(t, prm, test.refB, test.refA, test.result)
+ }
+ // Even if they are signed with an empty string as a reference, unidentified images are rejected.
+ res := prm.matchesDockerReference(refImageMock{ref: nil}, "")
+ assert.False(t, res, `unidentified vs. ""`)
+}
+
+func TestPRMMatchRepoDigestOrExactMatchesDockerReference(t *testing.T) {
+ prm := NewPRMMatchRepoDigestOrExact()
+
+ // prmMatchRepoDigestOrExact is a middle ground between prmMatchExact and prmMatchRepository:
+ // It accepts anything prmMatchExact accepts,…
+ for _, test := range prmExactMatchTestTable {
+ if test.result == true {
+ testPossiblyInvalidImageAndSig(t, prm, test.refA, test.refB, test.result)
+ testPossiblyInvalidImageAndSig(t, prm, test.refB, test.refA, test.result)
+ }
+ }
+ // … and it rejects everything prmMatchRepository rejects.
+ for _, test := range prmRepositoryMatchTestTable {
+ if test.result == false {
+ testPossiblyInvalidImageAndSig(t, prm, test.refA, test.refB, test.result)
+ testPossiblyInvalidImageAndSig(t, prm, test.refB, test.refA, test.result)
+ }
+ }
+
+ // The other cases, possibly asymmetrical:
+ for _, test := range matchRepoDigestOrExactTestTable {
+ testImageAndSig(t, prm, test.imageRef, test.sigRef, test.result)
+ }
+}
+
+func TestPRMMatchRepositoryMatchesDockerReference(t *testing.T) {
+ prm := NewPRMMatchRepository()
+ for _, test := range prmRepositoryMatchTestTable {
+ testPossiblyInvalidImageAndSig(t, prm, test.refA, test.refB, test.result)
+ testPossiblyInvalidImageAndSig(t, prm, test.refB, test.refA, test.result)
+ }
+ // Even if they are signed with an empty string as a reference, unidentified images are rejected.
+ res := prm.matchesDockerReference(refImageMock{ref: nil}, "")
+ assert.False(t, res, `unidentified vs. ""`)
+}
+
+func TestParseDockerReferences(t *testing.T) {
+ const (
+ ok1 = "busybox"
+ ok2 = fullRHELRef
+ bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES"
+ bad2 = ""
+ )
+
+ // Success
+ r1, r2, err := parseDockerReferences(ok1, ok2)
+ require.NoError(t, err)
+ assert.Equal(t, ok1, reference.FamiliarString(r1))
+ assert.Equal(t, ok2, reference.FamiliarString(r2))
+
+ // Failures
+ for _, refs := range [][]string{
+ {bad1, ok2},
+ {ok1, bad2},
+ {bad1, bad2},
+ } {
+ _, _, err := parseDockerReferences(refs[0], refs[1])
+ assert.Error(t, err)
+ }
+}
+
+func testExactPRMAndSig(t *testing.T, prmFactory func(string) PolicyReferenceMatch, imageRef, sigRef string, result bool) {
+ prm := prmFactory(imageRef)
+ res := prm.matchesDockerReference(mocks.ForbiddenUnparsedImage{}, sigRef)
+ assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef))
+}
+
+func prmExactReferenceFactory(ref string) PolicyReferenceMatch {
+ // Do not use NewPRMExactReference, we want to also test the case with an invalid DockerReference,
+ // even though NewPRMExactReference should never let it happen.
+ return &prmExactReference{DockerReference: ref}
+}
+
+func TestPRMExactReferenceMatchesDockerReference(t *testing.T) {
+ for _, test := range prmExactMatchTestTable {
+ testExactPRMAndSig(t, prmExactReferenceFactory, test.refA, test.refB, test.result)
+ testExactPRMAndSig(t, prmExactReferenceFactory, test.refB, test.refA, test.result)
+ }
+}
+
+func prmExactRepositoryFactory(ref string) PolicyReferenceMatch {
+ // Do not use NewPRMExactRepository, we want to also test the case with an invalid DockerReference,
+ // even though NewPRMExactRepository should never let it happen.
+ return &prmExactRepository{DockerRepository: ref}
+}
+
+func TestPRMExactRepositoryMatchesDockerReference(t *testing.T) {
+ for _, test := range prmRepositoryMatchTestTable {
+ testExactPRMAndSig(t, prmExactRepositoryFactory, test.refA, test.refB, test.result)
+ testExactPRMAndSig(t, prmExactRepositoryFactory, test.refB, test.refA, test.result)
+ }
+}
+
+func TestPRMRemapIdentityRefMatchesPrefix(t *testing.T) {
+ for _, c := range []struct {
+ ref, prefix string
+ expected bool
+ }{
+ // Prefix is a reference.Domain() value
+ {"docker.io/image", "docker.io", true},
+ {"docker.io/image", "example.com", false},
+ {"example.com:5000/image", "example.com:5000", true},
+ {"example.com:50000/image", "example.com:5000", false},
+ {"example.com:5000/image", "example.com", false},
+ {"example.com/foo", "example.com", true},
+ {"example.com/foo/bar", "example.com", true},
+ {"example.com/foo/bar:baz", "example.com", true},
+ {"example.com/foo/bar" + digestSuffix, "example.com", true},
+ // Prefix is a reference.Named.Name() value or a repo namespace
+ {"docker.io/ns/image", "docker.io/library", false},
+ {"example.com/library", "docker.io/library", false},
+ {"docker.io/libraryy/image", "docker.io/library", false},
+ {"docker.io/library/busybox", "docker.io/library", true},
+ {"example.com/ns/image", "example.com/ns", true},
+ {"example.com/ns2/image", "example.com/ns", false},
+ {"example.com/n2/image", "example.com/ns", false},
+ {"example.com", "example.com/library/busybox", false},
+ {"example.com:5000/ns/image", "example.com/ns", false},
+ {"example.com/ns/image", "example.com:5000/ns", false},
+ {"docker.io/library/busybox", "docker.io/library/busybox", true},
+ {"example.com/library/busybox", "docker.io/library/busybox", false},
+ {"docker.io/library/busybox2", "docker.io/library/busybox", false},
+ {"example.com/ns/image", "example.com/ns/image", true},
+ {"example.com/ns/imag2", "example.com/ns/image", false},
+ {"example.com/ns/imagee", "example.com/ns/image", false},
+ {"example.com:5000/ns/image", "example.com/ns/image", false},
+ {"example.com/ns/image", "example.com:5000/ns/image", false},
+ {"example.com/ns/image:tag", "example.com/ns/image", true},
+ {"example.com/ns/image" + digestSuffix, "example.com/ns/image", true},
+ {"example.com/ns/image:tag" + digestSuffix, "example.com/ns/image", true},
+ } {
+ prm, err := newPRMRemapIdentity(c.prefix, "docker.io/library/signed-prefix")
+ require.NoError(t, err, c.prefix)
+ ref, err := reference.ParseNormalizedNamed(c.ref)
+ require.NoError(t, err, c.ref)
+ res := prm.refMatchesPrefix(ref)
+ assert.Equal(t, c.expected, res, fmt.Sprintf("%s vs. %s", c.ref, c.prefix))
+ }
+}
+
+func TestPRMRemapIdentityRemapReferencePrefix(t *testing.T) {
+ for _, c := range []struct{ prefix, signedPrefix, ref, expected string }{
+ // Match sanity checking, primarily tested in TestPRMRefMatchesPrefix
+ {"mirror.example", "vendor.example", "mirror.example/ns/image:tag", "vendor.example/ns/image:tag"},
+ {"mirror.example", "vendor.example", "different.com/ns/image:tag", "different.com/ns/image:tag"},
+ {"mirror.example/ns", "vendor.example/vendor-ns", "mirror.example/different-ns/image:tag", "mirror.example/different-ns/image:tag"},
+ {"docker.io", "not-docker-signed.example/ns", "busybox", "not-docker-signed.example/ns/library/busybox"},
+ // Rewrites work as expected
+ {"mirror.example", "vendor.example", "mirror.example/ns/image:tag", "vendor.example/ns/image:tag"},
+ {"example.com/mirror", "example.com/vendor", "example.com/mirror/image:tag", "example.com/vendor/image:tag"},
+ {"example.com/ns/mirror", "example.com/ns/vendor", "example.com/ns/mirror:tag", "example.com/ns/vendor:tag"},
+ {"mirror.example", "vendor.example", "prefixmirror.example/ns/image:tag", "prefixmirror.example/ns/image:tag"},
+ {"docker.io", "not-docker-signed.example", "busybox", "not-docker-signed.example/library/busybox"},
+ {"docker.io/library", "not-docker-signed.example/ns", "busybox", "not-docker-signed.example/ns/busybox"},
+ {"docker.io/library/busybox", "not-docker-signed.example/ns/notbusybox", "busybox", "not-docker-signed.example/ns/notbusybox"},
+ // On match, tag/digest is preserved
+ {"mirror.example", "vendor.example", "mirror.example/image", "vendor.example/image"}, // This one should not actually happen, testing for completeness
+ {"mirror.example", "vendor.example", "mirror.example/image:tag", "vendor.example/image:tag"},
+ {"mirror.example", "vendor.example", "mirror.example/image" + digestSuffix, "vendor.example/image" + digestSuffix},
+ {"mirror.example", "vendor.example", "mirror.example/image:tag" + digestSuffix, "vendor.example/image:tag" + digestSuffix},
+ // Rewrite creating an invalid reference
+ {"mirror.example/ns/image", "vendor.example:5000", "mirror.example/ns/image:tag", ""},
+ // Rewrite creating a valid reference string in short format, which would imply a docker.io prefix and is rejected
+ {"mirror.example/ns/image", "vendor.example:5000", "mirror.example/ns/image" + digestSuffix, ""}, // vendor.example:5000@digest
+ {"mirror.example/ns/image", "notlocalhost", "mirror.example/ns/image:tag", ""}, // notlocalhost:tag
+ } {
+ testName := fmt.Sprintf("%#v", c)
+ prm, err := newPRMRemapIdentity(c.prefix, c.signedPrefix)
+ require.NoError(t, err, testName)
+ ref, err := reference.ParseNormalizedNamed(c.ref)
+ require.NoError(t, err, testName)
+ res, err := prm.remapReferencePrefix(ref)
+ if c.expected == "" {
+ assert.Error(t, err, testName)
+ } else {
+ require.NoError(t, err, testName)
+ assert.Equal(t, c.expected, res.String(), testName)
+ }
+ }
+}
+
+// modifiedString returns some string that is different from the input,
+// consistent across calls with the same input;
+// in particular it just replaces the first letter.
+func modifiedString(t *testing.T, input string) string {
+ c := input[0]
+ switch {
+ case c >= 'a' && c <= 'y':
+ c++
+ case c == 'z':
+ c = 'a'
+ default:
+ require.Fail(t, "unimplemented leading character '%c'", c)
+ }
+ return string(c) + input[1:]
+}
+
+// prmRemapIdentityMRDOETestCase is a helper for TestPRMRemapIdentityMatchesDockerReference,
+// verifying that the behavior is consistent with prmMatchRepoDigestOrExact,
+// while still smoke-testing the rewriting behavior.
+// The test succeeds if imageRefString is invalid and ignoreInvalidImageRef.
+func prmRemapIdentityMRDOETestCase(t *testing.T, ignoreInvalidImageRef bool, imageRef, sigRef string, result bool) {
+ parsedImageRef, err := reference.ParseNormalizedNamed(imageRef)
+ if ignoreInvalidImageRef && err != nil {
+ return
+ }
+ require.NoError(t, err)
+
+ // No rewriting happens.
+ prm, err := NewPRMRemapIdentity("never-causes-a-rewrite.example", "never-causes-a-rewrite.example")
+ require.NoError(t, err)
+ testImageAndSig(t, prm, imageRef, sigRef, result)
+
+ // Rewrite imageRef
+ domain := reference.Domain(parsedImageRef)
+ prm, err = NewPRMRemapIdentity(modifiedString(t, domain), domain)
+ require.NoError(t, err)
+ modifiedImageRef, err := reference.ParseNormalizedNamed(modifiedString(t, parsedImageRef.String()))
+ require.NoError(t, err)
+ testImageAndSig(t, prm, modifiedImageRef.String(), sigRef, result)
+}
+
+func TestPRMRemapIdentityMatchesDockerReference(t *testing.T) {
+ // Basic sanity checks. More detailed testing is done in TestPRMRemapIdentityRemapReferencePrefix
+ // and TestMatchRepoDigestOrExactReferenceValues.
+ for _, c := range []struct {
+ prefix, signedPrefix, imageRef, sigRef string
+ result bool
+ }{
+ // No match rewriting
+ {"does-not-match.com", "does-not-match.rewritten", "busybox:latest", "busybox:latest", true},
+ {"does-not-match.com", "does-not-match.rewritten", "busybox:latest", "notbusybox:latest", false},
+ // Match rewriting non-docker
+ {"mirror.example", "public.com", "mirror.example/busybox:1", "public.com/busybox:1", true},
+ {"mirror.example", "public.com", "mirror.example/busybox:1", "public.com/busybox:not1", false},
+ // Rewriting to docker.io
+ {"mirror.example", "docker.io/library", "mirror.example/busybox:latest", "busybox:latest", true},
+ {"mirror.example", "docker.io/library", "mirror.example/alpine:latest", "busybox:latest", false},
+ // Rewriting from docker.io
+ {"docker.io/library", "original.com", "copied:latest", "original.com/copied:latest", true},
+ {"docker.io/library", "original.com", "copied:latest", "original.com/ns/copied:latest", false},
+ // Invalid object: prefix is not a host name
+ {"busybox", "example.com/busybox", "busybox:latest", "example.com/busybox:latest", false},
+ // Invalid object: signedPrefix is not a host name
+ {"docker.io/library/busybox", "busybox", "docker.io/library/busybox:latest", "busybox:latest", false},
+ // Invalid object: invalid prefix
+ {"UPPERCASE", "example.com", "example.com/foo:latest", "example.com/foo:latest", true}, // Happens to work, not an API promise
+ {"example.com", "UPPERCASE", "example.com/foo:latest", "UPPERCASE/foo:latest", false},
+ } {
+ // Do not use NewPRMRemapIdentity, we want to also test the cases with invalid values,
+ // even though NewPRMExactReference should never let it happen.
+ prm := &prmRemapIdentity{Prefix: c.prefix, SignedPrefix: c.signedPrefix}
+ testImageAndSig(t, prm, c.imageRef, c.sigRef, c.result)
+ }
+ // Even if they are signed with an empty string as a reference, unidentified images are rejected.
+ prm, err := NewPRMRemapIdentity("docker.io", "docker.io")
+ require.NoError(t, err)
+ res := prm.matchesDockerReference(refImageMock{ref: nil}, "")
+ assert.False(t, res, `unidentified vs. ""`)
+
+ // Verify that the behavior is otherwise the same as for prmMatchRepoDigestOrExact:
+ // prmMatchRepoDigestOrExact is a middle ground between prmMatchExact and prmMatchRepository:
+ // It accepts anything prmMatchExact accepts,…
+ for _, test := range prmExactMatchTestTable {
+ if test.result == true {
+ prmRemapIdentityMRDOETestCase(t, true, test.refA, test.refB, test.result)
+ prmRemapIdentityMRDOETestCase(t, true, test.refB, test.refA, test.result)
+ }
+ }
+ // … and it rejects everything prmMatchRepository rejects.
+ for _, test := range prmRepositoryMatchTestTable {
+ if test.result == false {
+ prmRemapIdentityMRDOETestCase(t, true, test.refA, test.refB, test.result)
+ prmRemapIdentityMRDOETestCase(t, true, test.refB, test.refA, test.result)
+ }
+ }
+
+ // The other cases, possibly asymmetrical:
+ for _, test := range matchRepoDigestOrExactTestTable {
+ prmRemapIdentityMRDOETestCase(t, false, test.imageRef, test.sigRef, test.result)
+ }
+}
diff --git a/signature/policy_types.go b/signature/policy_types.go
new file mode 100644
index 0000000..96e91a0
--- /dev/null
+++ b/signature/policy_types.go
@@ -0,0 +1,215 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// This defines types used to represent a signature verification policy in memory.
+// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
+// built using the constructor functions provided in policy_config.go.
+
+package signature
+
+// NOTE: Keep this in sync with docs/containers-policy.json.5.md!
+
+// Policy defines requirements for considering a signature, or an image, valid.
+type Policy struct {
+ // Default applies to any image which does not have a matching policy in Transports.
+ // Note that this can happen even if a matching PolicyTransportScopes exists in Transports
+ // if the image matches none of the scopes.
+ Default PolicyRequirements `json:"default"`
+ Transports map[string]PolicyTransportScopes `json:"transports"`
+}
+
+// PolicyTransportScopes defines policies for images for a specific transport,
+// for various scopes, the map keys.
+// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
+// there is one scope precisely matching to a single image, and namespace scopes as prefixes
+// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
+// The empty scope, if exists, is considered a parent namespace of all other scopes.
+// Most specific scope wins, duplication is prohibited (hard failure).
+type PolicyTransportScopes map[string]PolicyRequirements
+
+// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
+// Must not be empty, frequently will only contain a single element.
+type PolicyRequirements []PolicyRequirement
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+
+// prCommon is the common type field in a JSON encoding of PolicyRequirement.
+type prCommon struct {
+ Type prTypeIdentifier `json:"type"`
+}
+
+// prTypeIdentifier is string designating a kind of a PolicyRequirement.
+type prTypeIdentifier string
+
+const (
+ prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
+ prTypeReject prTypeIdentifier = "reject"
+ prTypeSignedBy prTypeIdentifier = "signedBy"
+ prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+ prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned"
+)
+
+// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
+// every image is allowed to run.
+// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
+// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
+// FIXME? Better name?
+type prInsecureAcceptAnything struct {
+ prCommon
+}
+
+// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
+type prReject struct {
+ prCommon
+}
+
+// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
+type prSignedBy struct {
+ prCommon
+
+ // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is.
+ // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
+ // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
+ KeyType sbKeyType `json:"keyType"`
+
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyPaths []string `json:"keyPaths,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
+// sbKeyType are the allowed values for prSignedBy.KeyType
+type sbKeyType string
+
+const (
+ // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
+ SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
+ // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
+ SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
+ // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
+ // FIXME: PEM, DER?
+ SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
+ // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
+ // FIXME: PEM, DER?
+ SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
+)
+
+// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
+type prSignedBaseLayer struct {
+ prCommon
+ // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
+ BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
+}
+
+// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity
+type prSigstoreSigned struct {
+ prCommon
+
+ // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+ // FIXME: Multiple public keys?
+
+ // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified.
+ // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
+ Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
+
+ // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
+ // (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
+ // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
+ // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
+ // (and Rekor inclusion is not required if a Rekor public key is not specified).
+ RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "matchRepoDigestOrExact" if not specified.
+ // Note that /usr/bin/cosign interoperability might require using repo-only matching.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
+// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement.
+// This is a public type with a single private implementation.
+type PRSigstoreSignedFulcio interface {
+ // toFulcioTrustRoot creates a fulcioTrustRoot from the input data.
+ // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
+ prepareTrustRoot() (*fulcioTrustRoot, error)
+}
+
+// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned
+type prSigstoreSignedFulcio struct {
+ // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified.
+ CAPath string `json:"caPath,omitempty"`
+ // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified.
+ CAData []byte `json:"caData,omitempty"`
+ // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates.
+ OIDCIssuer string `json:"oidcIssuer,omitempty"`
+ // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates.
+ SubjectEmail string `json:"subjectEmail,omitempty"`
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+
+// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
+type prmCommon struct {
+ Type prmTypeIdentifier `json:"type"`
+}
+
+// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
+type prmTypeIdentifier string
+
+const (
+ prmTypeMatchExact prmTypeIdentifier = "matchExact"
+ prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact"
+ prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
+ prmTypeExactReference prmTypeIdentifier = "exactReference"
+ prmTypeExactRepository prmTypeIdentifier = "exactRepository"
+ prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity"
+)
+
+// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
+type prmMatchExact struct {
+ prmCommon
+}
+
+// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly,
+// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest
+type prmMatchRepoDigestOrExact struct {
+ prmCommon
+}
+
+// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
+type prmMatchRepository struct {
+ prmCommon
+}
+
+// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
+type prmExactReference struct {
+ prmCommon
+ DockerReference string `json:"dockerReference"`
+}
+
+// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
+type prmExactRepository struct {
+ prmCommon
+ DockerRepository string `json:"dockerRepository"`
+}
+
+// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact,
+// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references.
+type prmRemapIdentity struct {
+ prmCommon
+ Prefix string `json:"prefix"`
+ SignedPrefix string `json:"signedPrefix"`
+ // Possibly let the users make a choice for tag/digest matching behavior
+ // similar to prmMatchExact/prmMatchRepository?
+}
diff --git a/signature/signer/signer.go b/signature/signer/signer.go
new file mode 100644
index 0000000..73ae550
--- /dev/null
+++ b/signature/signer/signer.go
@@ -0,0 +1,9 @@
+package signer
+
+import "github.com/containers/image/v5/internal/signer"
+
+// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
+// It can only be created from within the containers/image package; it can’t be implemented externally.
+//
+// The owner of a Signer must call Close() when done.
+type Signer = signer.Signer
diff --git a/signature/sigstore/copied.go b/signature/sigstore/copied.go
new file mode 100644
index 0000000..2e510f6
--- /dev/null
+++ b/signature/sigstore/copied.go
@@ -0,0 +1,103 @@
+package sigstore
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+
+ "github.com/secure-systems-lab/go-securesystemslib/encrypted"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "github.com/sigstore/sigstore/pkg/signature"
+)
+
+// The following code was copied from github.com/sigstore.
+// FIXME: Eliminate that duplication.
+
+// Copyright 2021 The Sigstore Authors.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const (
+ // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType.
+ cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
+ // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType.
+ sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY"
+)
+
+// from sigstore/cosign/pkg/cosign.loadPrivateKey
+// FIXME: Do we need all of these key formats?
+func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
+ // Decrypt first
+ p, _ := pem.Decode(key)
+ if p == nil {
+ return nil, errors.New("invalid pem block")
+ }
+ if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType {
+ return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
+ }
+
+ x509Encoded, err := encrypted.Decrypt(p.Bytes, pass)
+ if err != nil {
+ return nil, fmt.Errorf("decrypt: %w", err)
+ }
+
+ pk, err := x509.ParsePKCS8PrivateKey(x509Encoded)
+ if err != nil {
+ return nil, fmt.Errorf("parsing private key: %w", err)
+ }
+ switch pk := pk.(type) {
+ case *rsa.PrivateKey:
+ return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256)
+ case *ecdsa.PrivateKey:
+ return signature.LoadECDSASignerVerifier(pk, crypto.SHA256)
+ case ed25519.PrivateKey:
+ return signature.LoadED25519SignerVerifier(pk)
+ default:
+ return nil, errors.New("unsupported key type")
+ }
+}
+
+// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair
+// loadPrivateKey always requires a encryption, so this always requires a passphrase.
+func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) {
+ x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("x509 encoding private key: %w", err)
+ }
+
+ encBytes, err := encrypted.Encrypt(x509Encoded, password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // store in PEM format
+ privBytes := pem.EncodeToMemory(&pem.Block{
+ Bytes: encBytes,
+ // Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types,
+ // but a version of cosign that can accept them has not yet been released.
+ Type: cosignPrivateKeyPemType,
+ })
+
+ // Now do the public key
+ pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return privBytes, pubBytes, nil
+}
diff --git a/signature/sigstore/fulcio/fulcio.go b/signature/sigstore/fulcio/fulcio.go
new file mode 100644
index 0000000..0e6746a
--- /dev/null
+++ b/signature/sigstore/fulcio/fulcio.go
@@ -0,0 +1,155 @@
+package fulcio
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "net/url"
+
+ "github.com/containers/image/v5/internal/useragent"
+ "github.com/containers/image/v5/signature/sigstore/internal"
+ "github.com/sigstore/fulcio/pkg/api"
+ "github.com/sigstore/sigstore/pkg/oauth"
+ "github.com/sigstore/sigstore/pkg/oauthflow"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/oauth2"
+)
+
+// setupSignerWithFulcio updates s with a certificate generated by fulcioURL based on oidcIDToken
+func setupSignerWithFulcio(s *internal.SigstoreSigner, fulcioURL *url.URL, oidcIDToken *oauthflow.OIDCIDToken) error {
+ // ECDSA-P256 is the only interoperable algorithm per
+ // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes .
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return fmt.Errorf("generating short-term private key: %w", err)
+ }
+ keyAlgorithm := "ecdsa"
+ // SHA-256 is opencontainers/go-digest.Canonical, thus the algorithm to use here as well per
+ // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#hashing-algorithms
+ signer, err := sigstoreSignature.LoadECDSASigner(privateKey, crypto.SHA256)
+ if err != nil {
+ return fmt.Errorf("initializing short-term private key: %w", err)
+ }
+ s.PrivateKey = signer
+
+ logrus.Debugf("Requesting a certificate from Fulcio at %s", fulcioURL.Redacted())
+ fulcioClient := api.NewClient(fulcioURL, api.WithUserAgent(useragent.DefaultUserAgent))
+ // Sign the email address as part of the request
+ h := sha256.Sum256([]byte(oidcIDToken.Subject))
+ keyOwnershipProof, err := ecdsa.SignASN1(rand.Reader, privateKey, h[:])
+ if err != nil {
+ return fmt.Errorf("Error signing key ownership proof: %w", err)
+ }
+ publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
+ if err != nil {
+ return fmt.Errorf("converting public key to ASN.1: %w", err)
+ }
+ // Note that unlike most OAuth2 uses, this passes the ID token, not an access token.
+ // This is only secure if every Fulcio server has an individual client ID value
+ // = fulcioOIDCClientID, distinct from other Fulcio servers,
+ // that is embedded into the ID token’s "aud" field.
+ resp, err := fulcioClient.SigningCert(api.CertificateRequest{
+ PublicKey: api.Key{
+ Content: publicKeyBytes,
+ Algorithm: keyAlgorithm,
+ },
+ SignedEmailAddress: keyOwnershipProof,
+ }, oidcIDToken.RawString)
+ if err != nil {
+ return fmt.Errorf("obtaining certificate from Fulcio: %w", err)
+ }
+ s.FulcioGeneratedCertificate = resp.CertPEM
+ s.FulcioGeneratedCertificateChain = resp.ChainPEM
+ // Cosign goes through an unmarshal/marshal roundtrip for Fulcio-generated certificates, let’s not do that.
+ s.SigningKeyOrCert = resp.CertPEM
+ return nil
+}
+
+// WithFulcioAndPreexistingOIDCIDToken sets up signing to use a short-lived key and a Fulcio-issued certificate
+// based on a caller-provided OIDC ID token.
+func WithFulcioAndPreexistingOIDCIDToken(fulcioURL *url.URL, oidcIDToken string) internal.Option {
+ return func(s *internal.SigstoreSigner) error {
+ if s.PrivateKey != nil {
+ return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
+ }
+
+ // This adds dependencies even just to parse the token. We could possibly reimplement that, and split this variant
+ // into a subpackage without the OIDC dependencies… but really, is this going to be used in significantly different situations
+ // than the two interactive OIDC authentication workflows?
+ //
+ // Are there any widely used tools to manually obtain an ID token? Why would there be?
+ // For long-term usage, users provisioning a static OIDC credential might just as well provision an already-generated certificate
+ // or something like that.
+ logrus.Debugf("Using a statically-provided OIDC token")
+ staticTokenGetter := oauthflow.StaticTokenGetter{RawToken: oidcIDToken}
+ oidcIDToken, err := staticTokenGetter.GetIDToken(nil, oauth2.Config{})
+ if err != nil {
+ return fmt.Errorf("parsing OIDC token: %w", err)
+ }
+
+ return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
+ }
+}
+
+// WithFulcioAndDeviceAuthorizationGrantOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
+// based on an OIDC ID token obtained using a device authorization grant (RFC 8628).
+//
+// interactiveOutput must be directly accessible to a human user in real time (i.e. not be just a log file).
+func WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
+ interactiveOutput io.Writer) internal.Option {
+ return func(s *internal.SigstoreSigner) error {
+ if s.PrivateKey != nil {
+ return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
+ }
+
+ logrus.Debugf("Starting OIDC device flow for issuer %s", oidcIssuerURL.Redacted())
+ tokenGetter := oauthflow.NewDeviceFlowTokenGetterForIssuer(oidcIssuerURL.String())
+ tokenGetter.MessagePrinter = func(s string) {
+ fmt.Fprintln(interactiveOutput, s)
+ }
+ oidcIDToken, err := oauthflow.OIDConnect(oidcIssuerURL.String(), oidcClientID, oidcClientSecret, "", tokenGetter)
+ if err != nil {
+ return fmt.Errorf("Error authenticating with OIDC: %w", err)
+ }
+
+ return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
+ }
+}
+
+// WithFulcioAndInterativeOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
+// based on an interactively-obtained OIDC ID token.
+// The token is obtained
+// - directly using a browser, listening on localhost, automatically opening a browser to the OIDC issuer,
+// to be redirected on localhost. (I.e. the current environment must allow launching a browser that connect back to the current process;
+// either or both may be impossible in a container or a remote VM).
+// - or by instructing the user to manually open a browser, obtain the OIDC code, and interactively input it as text.
+//
+// interactiveInput and interactiveOutput must both be directly operable by a human user in real time (i.e. not be just a log file).
+func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
+ interactiveInput io.Reader, interactiveOutput io.Writer) internal.Option {
+ return func(s *internal.SigstoreSigner) error {
+ if s.PrivateKey != nil {
+ return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
+ }
+
+ logrus.Debugf("Starting interactive OIDC authentication for issuer %s", oidcIssuerURL.Redacted())
+ // This is intended to match oauthflow.DefaultIDTokenGetter, overriding only input/output
+ tokenGetter := &oauthflow.InteractiveIDTokenGetter{
+ HTMLPage: oauth.InteractiveSuccessHTML,
+ Input: interactiveInput,
+ Output: interactiveOutput,
+ }
+ oidcIDToken, err := oauthflow.OIDConnect(oidcIssuerURL.String(), oidcClientID, oidcClientSecret, "", tokenGetter)
+ if err != nil {
+ return fmt.Errorf("Error authenticating with OIDC: %w", err)
+ }
+
+ return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
+ }
+}
diff --git a/signature/sigstore/generate.go b/signature/sigstore/generate.go
new file mode 100644
index 0000000..77520c1
--- /dev/null
+++ b/signature/sigstore/generate.go
@@ -0,0 +1,35 @@
+package sigstore
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+)
+
+// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller.
+type GenerateKeyPairResult struct {
+ PublicKey []byte
+ PrivateKey []byte
+}
+
+// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format,
+// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase).
+// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API,
+// and can change with best practices over time.
+func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) {
+ // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes
+ // only requires ECDSA-P256 to be supported, so that’s what we must use.
+ rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ // Coverage: This can fail only if the randomness source fails
+ return nil, err
+ }
+ private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase)
+ if err != nil {
+ return nil, err
+ }
+ return &GenerateKeyPairResult{
+ PublicKey: public,
+ PrivateKey: private,
+ }, nil
+}
diff --git a/signature/sigstore/generate_test.go b/signature/sigstore/generate_test.go
new file mode 100644
index 0000000..7861a92
--- /dev/null
+++ b/signature/sigstore/generate_test.go
@@ -0,0 +1,64 @@
+package sigstore
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/opencontainers/go-digest"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGenerateKeyPair(t *testing.T) {
+ // Test that generation is possible, and the key can be used for signing.
+ testManifest := []byte("{}")
+ testDockerReference, err := reference.ParseNormalizedNamed("example.com/foo:notlatest")
+ require.NoError(t, err)
+
+ passphrase := []byte("some passphrase")
+ keyPair, err := GenerateKeyPair(passphrase)
+ require.NoError(t, err)
+
+ tmpDir := t.TempDir()
+ privateKeyFile := filepath.Join(tmpDir, "private.key")
+ err = os.WriteFile(privateKeyFile, keyPair.PrivateKey, 0600)
+ require.NoError(t, err)
+
+ signer, err := NewSigner(WithPrivateKeyFile(privateKeyFile, passphrase))
+ require.NoError(t, err)
+ sig0, err := internalSigner.SignImageManifest(context.Background(), signer, testManifest, testDockerReference)
+ require.NoError(t, err)
+ sig, ok := sig0.(signature.Sigstore)
+ require.True(t, ok)
+
+ // It would be even more elegant to invoke the higher-level prSigstoreSigned code,
+ // but that is private.
+ publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(keyPair.PublicKey)
+ require.NoError(t, err)
+
+ _, err = internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(),
+ sig.UntrustedAnnotations()[signature.SigstoreSignatureAnnotationKey],
+ internal.SigstorePayloadAcceptanceRules{
+ ValidateSignedDockerReference: func(ref string) error {
+ assert.Equal(t, "example.com/foo:notlatest", ref)
+ return nil
+ },
+ ValidateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ matches, err := manifest.MatchesDigest(testManifest, digest)
+ require.NoError(t, err)
+ assert.True(t, matches)
+ return nil
+ },
+ })
+ assert.NoError(t, err)
+
+ // The failure paths are not obviously easy to reach.
+}
diff --git a/signature/sigstore/internal/signer.go b/signature/sigstore/internal/signer.go
new file mode 100644
index 0000000..c6258f4
--- /dev/null
+++ b/signature/sigstore/internal/signer.go
@@ -0,0 +1,95 @@
+package internal
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/signature/internal"
+ sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
+)
+
+type Option func(*SigstoreSigner) error
+
+// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures.
+// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the
+// dependency impact.
+type SigstoreSigner struct {
+ PrivateKey sigstoreSignature.Signer // May be nil during initialization
+ SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey
+
+ // Fulcio results to include
+ FulcioGeneratedCertificate []byte // Or nil
+ FulcioGeneratedCertificateChain []byte // Or nil
+
+ // Rekor state
+ RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+func (s *SigstoreSigner) ProgressMessage() string {
+ return "Signing image using a sigstore signature"
+}
+
+// SignImageManifest creates a new signature for manifest m as dockerReference.
+func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
+ if s.PrivateKey == nil {
+ return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner")
+ }
+
+ if reference.IsNameOnly(dockerReference) {
+ return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return nil, err
+ }
+ // sigstore/cosign completely ignores dockerReference for actual policy decisions.
+ // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
+ // So, just do what simple signing does, and cosign won’t mind.
+ payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
+ payloadBytes, err := json.Marshal(payloadData)
+ if err != nil {
+ return nil, err
+ }
+
+ // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
+ // which seems to be not used by anything. So we don’t bother.
+ signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes))
+ if err != nil {
+ return nil, fmt.Errorf("creating signature: %w", err)
+ }
+ base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
+ var rekorSETBytes []byte // = nil
+ if s.RekorUploader != nil {
+ set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes)
+ if err != nil {
+ return nil, err
+ }
+ rekorSETBytes = set
+ }
+
+ annotations := map[string]string{
+ signature.SigstoreSignatureAnnotationKey: base64Signature,
+ }
+ if s.FulcioGeneratedCertificate != nil {
+ annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate)
+ }
+ if s.FulcioGeneratedCertificateChain != nil {
+ annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain)
+ }
+ if rekorSETBytes != nil {
+ annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes)
+ }
+ return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil
+}
+
+func (s *SigstoreSigner) Close() error {
+ return nil
+}
diff --git a/signature/sigstore/rekor/leveled_logger.go b/signature/sigstore/rekor/leveled_logger.go
new file mode 100644
index 0000000..f240d8c
--- /dev/null
+++ b/signature/sigstore/rekor/leveled_logger.go
@@ -0,0 +1,52 @@
+package rekor
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-retryablehttp"
+ "github.com/sirupsen/logrus"
+)
+
+// leveledLogger adapts our use of logrus to the expected go-retryablehttp.LeveledLogger interface.
+type leveledLogger struct {
+ logger *logrus.Logger
+}
+
+func leveledLoggerForLogrus(logger *logrus.Logger) retryablehttp.LeveledLogger {
+ return &leveledLogger{logger: logger}
+}
+
+// log is the actual conversion implementation
+func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []any) {
+ fields := logrus.Fields{}
+ for i := 0; i < len(keysAndValues)-1; i += 2 {
+ key := keysAndValues[i]
+ keyString, isString := key.(string)
+ if !isString {
+ // It seems attractive to panic() here, but we might already be in a failure state, so let’s not make it worse
+ keyString = fmt.Sprintf("[Invalid LeveledLogger key %#v]", key)
+ }
+ fields[keyString] = keysAndValues[i+1]
+ }
+ l.logger.WithFields(fields).Log(level, msg)
+}
+
+// Debug implements retryablehttp.LeveledLogger
+func (l *leveledLogger) Debug(msg string, keysAndValues ...any) {
+ l.log(logrus.DebugLevel, msg, keysAndValues)
+}
+
+// Error implements retryablehttp.LeveledLogger
+func (l *leveledLogger) Error(msg string, keysAndValues ...any) {
+ l.log(logrus.ErrorLevel, msg, keysAndValues)
+}
+
+// Info implements retryablehttp.LeveledLogger
+func (l *leveledLogger) Info(msg string, keysAndValues ...any) {
+ l.log(logrus.InfoLevel, msg, keysAndValues)
+}
+
+// Warn implements retryablehttp.LeveledLogger
+func (l *leveledLogger) Warn(msg string, keysAndValues ...any) {
+ l.log(logrus.WarnLevel, msg, keysAndValues)
+}
diff --git a/signature/sigstore/rekor/rekor.go b/signature/sigstore/rekor/rekor.go
new file mode 100644
index 0000000..0236f0a
--- /dev/null
+++ b/signature/sigstore/rekor/rekor.go
@@ -0,0 +1,160 @@
+package rekor
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/containers/image/v5/signature/internal"
+ signerInternal "github.com/containers/image/v5/signature/sigstore/internal"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ rekor "github.com/sigstore/rekor/pkg/client"
+ "github.com/sigstore/rekor/pkg/generated/client"
+ "github.com/sigstore/rekor/pkg/generated/client/entries"
+ "github.com/sigstore/rekor/pkg/generated/models"
+ "github.com/sirupsen/logrus"
+)
+
+// WithRekor asks the generated signature to be uploaded to the specified Rekor server,
+// and to include a log inclusion proof in the signature.
+func WithRekor(rekorURL *url.URL) signerInternal.Option {
+ return func(s *signerInternal.SigstoreSigner) error {
+ logrus.Debugf("Using Rekor server at %s", rekorURL.Redacted())
+ client, err := rekor.GetRekorClient(rekorURL.String(),
+ rekor.WithLogger(leveledLoggerForLogrus(logrus.StandardLogger())))
+ if err != nil {
+ return fmt.Errorf("creating Rekor client: %w", err)
+ }
+ u := uploader{
+ client: client,
+ }
+ s.RekorUploader = u.uploadKeyOrCert
+ return nil
+ }
+}
+
+// uploader wraps a Rekor client, basically so that we can set RekorUploader to a method instead of an one-off closure.
+type uploader struct {
+ client *client.Rekor
+}
+
+// rekorEntryToSET converts a Rekor log entry into a sigstore “signed entry timestamp”.
+func rekorEntryToSET(entry *models.LogEntryAnon) (internal.UntrustedRekorSET, error) {
+ // We could plausibly call entry.Validate() here; that mostly just uses unnecessary reflection instead of direct == nil checks.
+ // Right now the only extra validation .Validate() does is *entry.LogIndex >= 0 and a regex check on *entry.LogID;
+ // we don’t particularly care about either of these (notably signature verification only uses the Body value).
+ if entry.Verification == nil || entry.IntegratedTime == nil || entry.LogIndex == nil || entry.LogID == nil {
+ return internal.UntrustedRekorSET{}, fmt.Errorf("invalid Rekor entry (missing data): %#v", *entry)
+ }
+ bodyBase64, ok := entry.Body.(string)
+ if !ok {
+ return internal.UntrustedRekorSET{}, fmt.Errorf("unexpected Rekor entry body type: %#v", entry.Body)
+ }
+ body, err := base64.StdEncoding.DecodeString(bodyBase64)
+ if err != nil {
+ return internal.UntrustedRekorSET{}, fmt.Errorf("error parsing Rekor entry body: %w", err)
+ }
+ payloadJSON, err := internal.UntrustedRekorPayload{
+ Body: body,
+ IntegratedTime: *entry.IntegratedTime,
+ LogIndex: *entry.LogIndex,
+ LogID: *entry.LogID,
+ }.MarshalJSON()
+ if err != nil {
+ return internal.UntrustedRekorSET{}, err
+ }
+
+ return internal.UntrustedRekorSET{
+ UntrustedSignedEntryTimestamp: entry.Verification.SignedEntryTimestamp,
+ UntrustedPayload: payloadJSON,
+ }, nil
+}
+
+// uploadEntry ensures proposedEntry exists in Rekor (usually uploading it), and returns the resulting log entry.
+func (u *uploader) uploadEntry(ctx context.Context, proposedEntry models.ProposedEntry) (models.LogEntry, error) {
+ params := entries.NewCreateLogEntryParamsWithContext(ctx)
+ params.SetProposedEntry(proposedEntry)
+ logrus.Debugf("Calling Rekor's CreateLogEntry")
+ resp, err := u.client.Entries.CreateLogEntry(params)
+ if err != nil {
+ // In ordinary operation, we should not get duplicate entries, because our payload contains a timestamp,
+ // so it is supposed to be unique; and the default key format, ECDSA p256, also contains a nonce.
+ // But conflicts can fairly easily happen during debugging and experimentation, so it pays to handle this.
+ var conflictErr *entries.CreateLogEntryConflict
+ if errors.As(err, &conflictErr) && conflictErr.Location != "" {
+ location := conflictErr.Location.String()
+ logrus.Debugf("CreateLogEntry reported a conflict, location = %s", location)
+ // We might be able to just GET the returned Location, but let’s use the generated API client.
+ // OTOH that requires us to hard-code the URI structure…
+ uuidDelimiter := strings.LastIndexByte(location, '/')
+ if uuidDelimiter != -1 { // Otherwise the URI is unexpected, and fall through to the bottom
+ uuid := location[uuidDelimiter+1:]
+ logrus.Debugf("Calling Rekor's NewGetLogEntryByUUIDParamsWithContext")
+ params2 := entries.NewGetLogEntryByUUIDParamsWithContext(ctx)
+ params2.SetEntryUUID(uuid)
+ resp2, err := u.client.Entries.GetLogEntryByUUID(params2)
+ if err != nil {
+ return nil, fmt.Errorf("Error re-loading previously-created log entry with UUID %s: %w", uuid, err)
+ }
+ return resp2.GetPayload(), nil
+ }
+ }
+ return nil, fmt.Errorf("Error uploading a log entry: %w", err)
+ }
+ return resp.GetPayload(), nil
+}
+
+// uploadKeyOrCert integrates this code into sigstore/internal.Signer.
+// Given components of the created signature, it returns a SET that should be added to the signature.
+func (u *uploader) uploadKeyOrCert(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) {
+ payloadHash := sha256.Sum256(payloadBytes) // HashedRecord only accepts SHA-256
+ proposedEntry := models.Hashedrekord{
+ APIVersion: swag.String(internal.HashedRekordV001APIVersion),
+ Spec: models.HashedrekordV001Schema{
+ Data: &models.HashedrekordV001SchemaData{
+ Hash: &models.HashedrekordV001SchemaDataHash{
+ Algorithm: swag.String(models.HashedrekordV001SchemaDataHashAlgorithmSha256),
+ Value: swag.String(hex.EncodeToString(payloadHash[:])),
+ },
+ },
+ Signature: &models.HashedrekordV001SchemaSignature{
+ Content: strfmt.Base64(signatureBytes),
+ PublicKey: &models.HashedrekordV001SchemaSignaturePublicKey{
+ Content: strfmt.Base64(keyOrCertBytes),
+ },
+ },
+ },
+ }
+
+ uploadedPayload, err := u.uploadEntry(ctx, &proposedEntry)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(uploadedPayload) != 1 {
+ return nil, fmt.Errorf("expected 1 Rekor entry, got %d", len(uploadedPayload))
+ }
+ var storedEntry *models.LogEntryAnon
+ // This “loop” extracts the single value from the uploadedPayload map.
+ for _, p := range uploadedPayload {
+ storedEntry = &p
+ break
+ }
+
+ rekorBundle, err := rekorEntryToSET(storedEntry)
+ if err != nil {
+ return nil, err
+ }
+ rekorSETBytes, err := json.Marshal(rekorBundle)
+ if err != nil {
+ return nil, err
+ }
+ return rekorSETBytes, nil
+}
diff --git a/signature/sigstore/signer.go b/signature/sigstore/signer.go
new file mode 100644
index 0000000..fb825ad
--- /dev/null
+++ b/signature/sigstore/signer.go
@@ -0,0 +1,60 @@
+package sigstore
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature/signer"
+ "github.com/containers/image/v5/signature/sigstore/internal"
+ "github.com/sigstore/sigstore/pkg/cryptoutils"
+)
+
+type Option = internal.Option
+
+func WithPrivateKeyFile(file string, passphrase []byte) Option {
+ return func(s *internal.SigstoreSigner) error {
+ if s.PrivateKey != nil {
+ return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
+ }
+
+ if passphrase == nil {
+ return errors.New("private key passphrase not provided")
+ }
+
+ privateKeyPEM, err := os.ReadFile(file)
+ if err != nil {
+ return fmt.Errorf("reading private key from %s: %w", file, err)
+ }
+ signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase)
+ if err != nil {
+ return fmt.Errorf("initializing private key: %w", err)
+ }
+ publicKey, err := signerVerifier.PublicKey()
+ if err != nil {
+ return fmt.Errorf("getting public key from private key: %w", err)
+ }
+ publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
+ if err != nil {
+ return fmt.Errorf("converting public key to PEM: %w", err)
+ }
+ s.PrivateKey = signerVerifier
+ s.SigningKeyOrCert = publicKeyPEM
+ return nil
+ }
+}
+
+func NewSigner(opts ...Option) (*signer.Signer, error) {
+ s := internal.SigstoreSigner{}
+ for _, o := range opts {
+ if err := o(&s); err != nil {
+ return nil, err
+ }
+ }
+ if s.PrivateKey == nil {
+ return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures")
+ }
+
+ return internalSigner.NewSigner(&s), nil
+}
diff --git a/signature/simple.go b/signature/simple.go
new file mode 100644
index 0000000..56b222e
--- /dev/null
+++ b/signature/simple.go
@@ -0,0 +1,283 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
+
+package signature
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/containers/image/v5/signature/internal"
+ "github.com/containers/image/v5/version"
+ digest "github.com/opencontainers/go-digest"
+)
+
+const (
+ signatureType = "atomic container signature"
+)
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+type InvalidSignatureError = internal.InvalidSignatureError
+
+// Signature is a parsed content of a signature.
+// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
+type Signature struct {
+ DockerManifestDigest digest.Digest
+ DockerReference string // FIXME: more precise type?
+}
+
+// untrustedSignature is a parsed content of a signature.
+type untrustedSignature struct {
+ untrustedDockerManifestDigest digest.Digest
+ untrustedDockerReference string // FIXME: more precise type?
+ untrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ untrustedTimestamp *int64
+}
+
+// UntrustedSignatureInformation is information available in an untrusted signature.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+type UntrustedSignatureInformation struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ UntrustedTimestamp *time.Time
+ UntrustedShortKeyIdentifier string
+}
+
+// newUntrustedSignature returns an untrustedSignature object with
+// the specified primary contents and appropriate metadata.
+func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "atomic " + version.Version
+ timestamp := time.Now().Unix()
+ return untrustedSignature{
+ untrustedDockerManifestDigest: dockerManifestDigest,
+ untrustedDockerReference: dockerReference,
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: &timestamp,
+ }
+}
+
+// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler
+var _ json.Marshaler = untrustedSignature{}
+var _ json.Marshaler = (*untrustedSignature)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s untrustedSignature) MarshalJSON() ([]byte, error) {
+ if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]any{
+ "type": signatureType,
+ "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.untrustedDockerReference},
+ }
+ optional := map[string]any{}
+ if s.untrustedCreatorID != nil {
+ optional["creator"] = *s.untrustedCreatorID
+ }
+ if s.untrustedTimestamp != nil {
+ optional["timestamp"] = *s.untrustedTimestamp
+ }
+ signature := map[string]any{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that untrustedSignature implements json.Unmarshaler
+var _ json.Unmarshaler = (*untrustedSignature)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if formatErr, ok := err.(internal.JSONFormatError); ok {
+ err = internal.NewInvalidSignatureError(formatErr.Error())
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
+// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
+func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return &timestamp
+ default:
+ var ignore any
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ if gotCreatorID {
+ s.untrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
+ }
+ s.untrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != signatureType {
+ return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t))
+ }
+
+ var digestString string
+ if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.untrustedDockerManifestDigest = digest.Digest(digestString)
+
+ return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
+ "docker-reference": &s.untrustedDockerReference,
+ })
+}
+
+// Sign formats the signature and returns a blob signed using mech and keyIdentity
+// (If it seems surprising that this is a method on untrustedSignature, note that there
+// isn’t a good reason to think that a key used by the user is trusted by any component
+// of the system just because it is a private key — actually the presence of a private key
+// on the system increases the likelihood of an a successful attack on that private key
+// on that particular system.)
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
+ json, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
+ return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
+ }
+
+ if passphrase != "" {
+ return nil, errors.New("signing mechanism does not support passphrases")
+ }
+
+ return mech.Sign(json, keyIdentity)
+}
+
+// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
+// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type signatureAcceptanceRules struct {
+ validateKeyIdentity func(string) error
+ validateSignedDockerReference func(string) error
+ validateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
+// match expected values, both as specified by rules, and returns it
+func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
+ signed, keyIdentity, err := mech.Verify(unverifiedSignature)
+ if err != nil {
+ return nil, err
+ }
+ if err := rules.validateKeyIdentity(keyIdentity); err != nil {
+ return nil, err
+ }
+
+ var unmatchedSignature untrustedSignature
+ if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
+ return nil, internal.NewInvalidSignatureError(err.Error())
+ }
+ if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // signatureAcceptanceRules have accepted this value.
+ return &Signature{
+ DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
+ DockerReference: unmatchedSignature.untrustedDockerReference,
+ }, nil
+}
+
+// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
+// WITHOUT doing any cryptographic verification.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
+ // NOTE: This should eventually do format autodetection.
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ defer mech.Close()
+
+ untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
+ if err != nil {
+ return nil, err
+ }
+ var untrustedDecodedContents untrustedSignature
+ if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
+ return nil, internal.NewInvalidSignatureError(err.Error())
+ }
+
+ var timestamp *time.Time // = nil
+ if untrustedDecodedContents.untrustedTimestamp != nil {
+ ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
+ timestamp = &ts
+ }
+ return &UntrustedSignatureInformation{
+ UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
+ UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
+ UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
+ UntrustedTimestamp: timestamp,
+ UntrustedShortKeyIdentifier: shortKeyIdentifier,
+ }, nil
+}
diff --git a/signature/simple_test.go b/signature/simple_test.go
new file mode 100644
index 0000000..313343e
--- /dev/null
+++ b/signature/simple_test.go
@@ -0,0 +1,405 @@
+package signature
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/version"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/xeipuuv/gojsonschema"
+)
+
+func TestNewUntrustedSignature(t *testing.T) {
+ timeBefore := time.Now()
+ sig := newUntrustedSignature(TestImageManifestDigest, TestImageSignatureReference)
+ assert.Equal(t, TestImageManifestDigest, sig.untrustedDockerManifestDigest)
+ assert.Equal(t, TestImageSignatureReference, sig.untrustedDockerReference)
+ require.NotNil(t, sig.untrustedCreatorID)
+ assert.Equal(t, "atomic "+version.Version, *sig.untrustedCreatorID)
+ require.NotNil(t, sig.untrustedTimestamp)
+ timeAfter := time.Now()
+ assert.True(t, timeBefore.Unix() <= *sig.untrustedTimestamp)
+ assert.True(t, *sig.untrustedTimestamp <= timeAfter.Unix())
+}
+
+func TestMarshalJSON(t *testing.T) {
+ // Empty string values
+ s := newUntrustedSignature("", "_")
+ _, err := s.MarshalJSON()
+ assert.Error(t, err)
+ s = newUntrustedSignature("_", "")
+ _, err = s.MarshalJSON()
+ assert.Error(t, err)
+
+ // Success
+ // Use intermediate variables for these values so that we can take their addresses.
+ creatorID := "CREATOR"
+ timestamp := int64(1484683104)
+ for _, c := range []struct {
+ input untrustedSignature
+ expected string
+ }{
+ {
+ untrustedSignature{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ untrustedCreatorID: &creatorID,
+ untrustedTimestamp: &timestamp,
+ },
+ "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":1484683104}}",
+ },
+ {
+ untrustedSignature{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ },
+ "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{}}",
+ },
+ } {
+ marshaled, err := c.input.MarshalJSON()
+ require.NoError(t, err)
+ assert.Equal(t, []byte(c.expected), marshaled)
+
+ // Also call MarshalJSON through the JSON package.
+ marshaled, err = json.Marshal(c.input)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte(c.expected), marshaled)
+ }
+}
+
+// Return the result of modifying validJSON with fn
+func modifiedJSON(t *testing.T, validJSON []byte, modifyFn func(mSA)) []byte {
+ var tmp mSA
+ err := json.Unmarshal(validJSON, &tmp)
+ require.NoError(t, err)
+
+ modifyFn(tmp)
+
+ modifiedJSON, err := json.Marshal(tmp)
+ require.NoError(t, err)
+ return modifiedJSON
+}
+
+// Verify that input can be unmarshaled as an untrustedSignature, and that it passes JSON schema validation, and return the unmarshaled untrustedSignature.
+func successfullyUnmarshalUntrustedSignature(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) untrustedSignature {
+ inputString := string(input)
+
+ var s untrustedSignature
+ err := json.Unmarshal(input, &s)
+ require.NoError(t, err, inputString)
+
+ res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString))
+ assert.True(t, err == nil, inputString)
+ assert.True(t, res.Valid(), inputString)
+
+ return s
+}
+
+// Verify that input can't be unmarshaled as an untrusted signature, and that it fails JSON schema validation.
+func assertUnmarshalUntrustedSignatureFails(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) {
+ inputString := string(input)
+
+ var s untrustedSignature
+ err := json.Unmarshal(input, &s)
+ assert.Error(t, err, inputString)
+
+ res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString))
+ assert.True(t, err != nil || !res.Valid(), inputString)
+}
+
+func TestUnmarshalJSON(t *testing.T) {
+ // NOTE: The schema at schemaPath is NOT authoritative; docs/atomic-signature.json and the code is, rather!
+ // The schemaPath references are not testing that the code follows the behavior declared by the schema,
+ // they are testing that the schema follows the behavior of the code!
+ schemaPath, err := filepath.Abs("../docs/atomic-signature-embedded-json.json")
+ require.NoError(t, err)
+ schemaLoader := gojsonschema.NewReferenceLoader("file://" + schemaPath)
+
+ // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
+ // UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
+ assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("&"))
+ var s untrustedSignature
+ err = s.UnmarshalJSON([]byte("&"))
+ assert.Error(t, err)
+
+ // Not an object
+ assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("1"))
+
+ // Start with a valid JSON.
+ validSig := newUntrustedSignature("digest!@#", "reference#@!")
+ validJSON, err := validSig.MarshalJSON()
+ require.NoError(t, err)
+
+ // Success
+ s = successfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON)
+ assert.Equal(t, validSig, s)
+
+ // Various ways to corrupt the JSON
+ breakFns := []func(mSA){
+ // A top-level field is missing
+ func(v mSA) { delete(v, "critical") },
+ func(v mSA) { delete(v, "optional") },
+ // Extra top-level sub-object
+ func(v mSA) { v["unexpected"] = 1 },
+ // "critical" not an object
+ func(v mSA) { v["critical"] = 1 },
+ // "optional" not an object
+ func(v mSA) { v["optional"] = 1 },
+ // A field of "critical" is missing
+ func(v mSA) { delete(x(v, "critical"), "type") },
+ func(v mSA) { delete(x(v, "critical"), "image") },
+ func(v mSA) { delete(x(v, "critical"), "identity") },
+ // Extra field of "critical"
+ func(v mSA) { x(v, "critical")["unexpected"] = 1 },
+ // Invalid "type"
+ func(v mSA) { x(v, "critical")["type"] = 1 },
+ func(v mSA) { x(v, "critical")["type"] = "unexpected" },
+ // Invalid "image" object
+ func(v mSA) { x(v, "critical")["image"] = 1 },
+ func(v mSA) { delete(x(v, "critical", "image"), "docker-manifest-digest") },
+ func(v mSA) { x(v, "critical", "image")["unexpected"] = 1 },
+ // Invalid "docker-manifest-digest"
+ func(v mSA) { x(v, "critical", "image")["docker-manifest-digest"] = 1 },
+ // Invalid "identity" object
+ func(v mSA) { x(v, "critical")["identity"] = 1 },
+ func(v mSA) { delete(x(v, "critical", "identity"), "docker-reference") },
+ func(v mSA) { x(v, "critical", "identity")["unexpected"] = 1 },
+ // Invalid "docker-reference"
+ func(v mSA) { x(v, "critical", "identity")["docker-reference"] = 1 },
+ // Invalid "creator"
+ func(v mSA) { x(v, "optional")["creator"] = 1 },
+ // Invalid "timestamp"
+ func(v mSA) { x(v, "optional")["timestamp"] = "unexpected" },
+ func(v mSA) { x(v, "optional")["timestamp"] = 0.5 }, // Fractional input
+ }
+ for _, fn := range breakFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ assertUnmarshalUntrustedSignatureFails(t, schemaLoader, testJSON)
+ }
+
+ // Modifications to unrecognized fields in "optional" are allowed and ignored
+ allowedModificationFns := []func(mSA){
+ // Add an optional field
+ func(v mSA) { x(v, "optional")["unexpected"] = 1 },
+ }
+ for _, fn := range allowedModificationFns {
+ testJSON := modifiedJSON(t, validJSON, fn)
+ s := successfullyUnmarshalUntrustedSignature(t, schemaLoader, testJSON)
+ assert.Equal(t, validSig, s)
+ }
+
+ // Optional fields can be missing
+ validSig = untrustedSignature{
+ untrustedDockerManifestDigest: "digest!@#",
+ untrustedDockerReference: "reference#@!",
+ untrustedCreatorID: nil,
+ untrustedTimestamp: nil,
+ }
+ validJSON, err = validSig.MarshalJSON()
+ require.NoError(t, err)
+ s = successfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON)
+ assert.Equal(t, validSig, s)
+}
+
+func TestSign(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ sig := newUntrustedSignature("digest!@#", "reference#@!")
+
+ // Successful signing
+ signature, err := sig.sign(mech, TestKeyFingerprint, "")
+ require.NoError(t, err)
+
+ verified, err := verifyAndExtractSignature(mech, signature, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ if keyIdentity != TestKeyFingerprint {
+ return errors.New("Unexpected keyIdentity")
+ }
+ return nil
+ },
+ validateSignedDockerReference: func(signedDockerReference string) error {
+ if signedDockerReference != sig.untrustedDockerReference {
+ return errors.New("Unexpected signedDockerReference")
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ if signedDockerManifestDigest != sig.untrustedDockerManifestDigest {
+ return errors.New("Unexpected signedDockerManifestDigest")
+ }
+ return nil
+ },
+ })
+ require.NoError(t, err)
+
+ assert.Equal(t, sig.untrustedDockerManifestDigest, verified.DockerManifestDigest)
+ assert.Equal(t, sig.untrustedDockerReference, verified.DockerReference)
+
+ // Error creating blob to sign
+ _, err = untrustedSignature{}.sign(mech, TestKeyFingerprint, "")
+ assert.Error(t, err)
+
+ // Error signing
+ _, err = sig.sign(mech, "this fingerprint doesn't exist", "")
+ assert.Error(t, err)
+}
+
+func TestVerifyAndExtractSignature(t *testing.T) {
+ mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
+ require.NoError(t, err)
+ defer mech.Close()
+
+ type triple struct {
+ keyIdentity string
+ signedDockerReference string
+ signedDockerManifestDigest digest.Digest
+ }
+ var wanted, recorded triple
+ // recordingRules are a plausible signatureAcceptanceRules implementations, but equally
+ // importantly record that we are passing the correct values to the rule callbacks.
+ recordingRules := signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ recorded.keyIdentity = keyIdentity
+ if keyIdentity != wanted.keyIdentity {
+ return errors.New("keyIdentity mismatch")
+ }
+ return nil
+ },
+ validateSignedDockerReference: func(signedDockerReference string) error {
+ recorded.signedDockerReference = signedDockerReference
+ if signedDockerReference != wanted.signedDockerReference {
+ return errors.New("signedDockerReference mismatch")
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ recorded.signedDockerManifestDigest = signedDockerManifestDigest
+ if signedDockerManifestDigest != wanted.signedDockerManifestDigest {
+ return errors.New("signedDockerManifestDigest mismatch")
+ }
+ return nil
+ },
+ }
+
+ signature, err := os.ReadFile("./fixtures/image.signature")
+ require.NoError(t, err)
+ signatureData := triple{
+ keyIdentity: TestKeyFingerprint,
+ signedDockerReference: TestImageSignatureReference,
+ signedDockerManifestDigest: TestImageManifestDigest,
+ }
+
+ // Successful verification
+ wanted = signatureData
+ recorded = triple{}
+ sig, err := verifyAndExtractSignature(mech, signature, recordingRules)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
+ assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
+ assert.Equal(t, signatureData, recorded)
+
+ // For extra paranoia, test that we return a nil signature object on error.
+
+ // Completely invalid signature.
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, []byte{}, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, triple{}, recorded)
+
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, []byte("invalid signature"), recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, triple{}, recorded)
+
+ // Valid signature of non-JSON: asked for keyIdentity, only
+ invalidBlobSignature, err := os.ReadFile("./fixtures/invalid-blob.signature")
+ require.NoError(t, err)
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, invalidBlobSignature, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded)
+
+ // Valid signature with a wrong key: asked for keyIdentity, only
+ wanted = signatureData
+ wanted.keyIdentity = "unexpected fingerprint"
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded)
+
+ // Valid signature with a wrong manifest digest: asked for keyIdentity and signedDockerManifestDigest
+ wanted = signatureData
+ wanted.signedDockerManifestDigest = "invalid digest"
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, triple{
+ keyIdentity: signatureData.keyIdentity,
+ signedDockerManifestDigest: signatureData.signedDockerManifestDigest,
+ }, recorded)
+
+ // Valid signature with a wrong image reference
+ wanted = signatureData
+ wanted.signedDockerReference = "unexpected docker reference"
+ recorded = triple{}
+ sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
+ assert.Error(t, err)
+ assert.Nil(t, sig)
+ assert.Equal(t, signatureData, recorded)
+}
+
+func TestGetUntrustedSignatureInformationWithoutVerifying(t *testing.T) {
+ signature, err := os.ReadFile("./fixtures/image.signature")
+ require.NoError(t, err)
+ // Successful parsing, all optional fields present
+ info, err := GetUntrustedSignatureInformationWithoutVerifying(signature)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference)
+ assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest)
+ assert.NotNil(t, info.UntrustedCreatorID)
+ assert.Equal(t, "atomic ", *info.UntrustedCreatorID)
+ assert.NotNil(t, info.UntrustedTimestamp)
+ assert.Equal(t, time.Unix(1458239713, 0), *info.UntrustedTimestamp)
+ assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier)
+ // Successful parsing, no optional fields present
+ signature, err = os.ReadFile("./fixtures/no-optional-fields.signature")
+ require.NoError(t, err)
+ // Successful parsing
+ info, err = GetUntrustedSignatureInformationWithoutVerifying(signature)
+ require.NoError(t, err)
+ assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference)
+ assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest)
+ assert.Nil(t, info.UntrustedCreatorID)
+ assert.Nil(t, info.UntrustedTimestamp)
+ assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier)
+
+ // Completely invalid signature.
+ _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte{})
+ assert.Error(t, err)
+
+ _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte("invalid signature"))
+ assert.Error(t, err)
+
+ // Valid signature of non-JSON
+ invalidBlobSignature, err := os.ReadFile("./fixtures/invalid-blob.signature")
+ require.NoError(t, err)
+ _, err = GetUntrustedSignatureInformationWithoutVerifying(invalidBlobSignature)
+ assert.Error(t, err)
+}
diff --git a/signature/simplesigning/signer.go b/signature/simplesigning/signer.go
new file mode 100644
index 0000000..983bbb1
--- /dev/null
+++ b/signature/simplesigning/signer.go
@@ -0,0 +1,105 @@
+package simplesigning
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ internalSig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/signature/signer"
+)
+
+// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures.
+type simpleSigner struct {
+ mech signature.SigningMechanism
+ keyFingerprint string
+ passphrase string // "" if not provided.
+}
+
+type Option func(*simpleSigner) error
+
+// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint.
+func WithKeyFingerprint(keyFingerprint string) Option {
+ return func(s *simpleSigner) error {
+ s.keyFingerprint = keyFingerprint
+ return nil
+ }
+}
+
+// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key.
+// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry.
+func WithPassphrase(passphrase string) Option {
+ return func(s *simpleSigner) error {
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return errors.New("invalid passphrase: must not contain a line break")
+ }
+ s.passphrase = passphrase
+ return nil
+ }
+}
+
+// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default
+// GPG configuration ($GNUPGHOME / ~/.gnupg).
+//
+// The set of options must identify a key to sign with, probably using a WithKeyFingerprint.
+//
+// The caller must call Close() on the returned Signer.
+func NewSigner(opts ...Option) (*signer.Signer, error) {
+ mech, err := signature.NewGPGSigningMechanism()
+ if err != nil {
+ return nil, fmt.Errorf("initializing GPG: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ mech.Close()
+ }
+ }()
+ if err := mech.SupportsSigning(); err != nil {
+ return nil, fmt.Errorf("Signing not supported: %w", err)
+ }
+
+ s := simpleSigner{
+ mech: mech,
+ }
+ for _, o := range opts {
+ if err := o(&s); err != nil {
+ return nil, err
+ }
+ }
+ if s.keyFingerprint == "" {
+ return nil, errors.New("no key identity provided for simple signing")
+ }
+ // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that.
+
+ succeeded = true
+ return internalSigner.NewSigner(&s), nil
+}
+
+// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
+func (s *simpleSigner) ProgressMessage() string {
+ return "Signing image using simple signing"
+}
+
+// SignImageManifest creates a new signature for manifest m as dockerReference.
+func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) {
+ if reference.IsNameOnly(dockerReference) {
+ return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
+ }
+ simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{
+ Passphrase: s.passphrase,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return internalSig.SimpleSigningFromBlob(simpleSig), nil
+}
+
+func (s *simpleSigner) Close() error {
+ return s.mech.Close()
+}
diff --git a/signature/simplesigning/signer_test.go b/signature/simplesigning/signer_test.go
new file mode 100644
index 0000000..0246c13
--- /dev/null
+++ b/signature/simplesigning/signer_test.go
@@ -0,0 +1,235 @@
+package simplesigning
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ internalSig "github.com/containers/image/v5/internal/signature"
+ internalSigner "github.com/containers/image/v5/internal/signer"
+ "github.com/containers/image/v5/internal/testing/gpgagent"
+ "github.com/containers/image/v5/signature"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ // testImageManifestDigest is the Docker manifest digest of "image.manifest.json"
+ testImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
+ testGPGHomeDirectory = "./testdata"
+ // TestKeyFingerprint is the fingerprint of the private key in testGPGHomeDirectory.
+ testKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
+ // testKeyFingerprintWithPassphrase is the fingerprint of the private key with passphrase in testGPGHomeDirectory.
+ testKeyFingerprintWithPassphrase = "E3EB7611D815211F141946B5B0CDE60B42557346"
+ // testPassphrase is the passphrase for testKeyFingerprintWithPassphrase.
+ testPassphrase = "WithPassphrase123"
+)
+
+// Ensure we don’t leave around GPG agent processes.
+func TestMain(m *testing.M) {
+ code := m.Run()
+ if err := gpgagent.KillGPGAgent(testGPGHomeDirectory); err != nil {
+ logrus.Warnf("Error killing GPG agent: %v", err)
+ }
+ os.Exit(code)
+}
+
+func TestNewSigner(t *testing.T) {
+ t.Setenv("GNUPGHOME", testGPGHomeDirectory)
+
+ mech, err := signature.NewGPGSigningMechanism()
+ require.NoError(t, err)
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ // An option causes an error
+ _, err = NewSigner(WithKeyFingerprint(testKeyFingerprintWithPassphrase), WithPassphrase("\n"))
+ assert.Error(t, err)
+
+ // WithKeyFingerprint is missing
+ _, err = NewSigner(WithPassphrase("something"))
+ assert.Error(t, err)
+
+ // A smoke test
+ s, err := NewSigner(WithKeyFingerprint(testKeyFingerprint))
+ require.NoError(t, err)
+ err = s.Close()
+ assert.NoError(t, err)
+}
+
+func TestSimpleSignerProgressMessage(t *testing.T) {
+ t.Setenv("GNUPGHOME", testGPGHomeDirectory)
+
+ mech, err := signature.NewGPGSigningMechanism()
+ require.NoError(t, err)
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ // Just a smoke test
+ s, err := NewSigner(WithKeyFingerprint(testKeyFingerprint))
+ require.NoError(t, err)
+ defer func() {
+ err = s.Close()
+ assert.NoError(t, err)
+ }()
+
+ _ = internalSigner.ProgressMessage(s)
+}
+
+func TestSimpleSignerSignImageManifest(t *testing.T) {
+ t.Setenv("GNUPGHOME", testGPGHomeDirectory)
+
+ mech, err := signature.NewGPGSigningMechanism()
+ require.NoError(t, err)
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ t.Skipf("Signing not supported: %v", err)
+ }
+
+ err = gpgagent.KillGPGAgent(testGPGHomeDirectory)
+ require.NoError(t, err)
+
+ manifest, err := os.ReadFile("../fixtures/image.manifest.json")
+ require.NoError(t, err)
+ testImageSignatureReference, err := reference.ParseNormalizedNamed("example.com/testing/manifest:notlatest")
+ require.NoError(t, err)
+
+ // Failures to sign need to be tested in two parts: First the failures that involve the wrong passphrase, then failures that
+ // should manifest even with a valid passphrase or unlocked key (because the GPG agent is caching unlocked keys).
+ // Alternatively, we could be caling gpgagent.KillGPGAgent() all the time...
+ type failingCase struct {
+ name string
+ opts []Option
+ // NOTE: We DO NOT promise that things that don't fail during NewSigner won't start failing there.
+ // Actually we’d prefer failures to be identified early. This field only records current expected behavior, not the _desired_ end state.
+ creationFails bool
+ creationErrorContains string
+ manifest []byte
+ ref reference.Named
+ }
+ testFailure := func(c failingCase) {
+ s, err := NewSigner(c.opts...)
+ if c.creationFails {
+ assert.Error(t, err, c.name)
+ if c.creationErrorContains != "" {
+ assert.ErrorContains(t, err, c.creationErrorContains, c.name)
+ }
+ } else {
+ require.NoError(t, err, c.name)
+ defer s.Close()
+
+ m := manifest
+ if c.manifest != nil {
+ m = c.manifest
+ }
+ _, err = internalSigner.SignImageManifest(context.Background(), s, m, c.ref)
+ assert.Error(t, err, c.name)
+ }
+ }
+ for _, c := range []failingCase{
+ {
+ name: "Invalid passphrase",
+ opts: []Option{
+ WithKeyFingerprint(testKeyFingerprintWithPassphrase),
+ WithPassphrase(testPassphrase + "\n"),
+ },
+ creationFails: true,
+ creationErrorContains: "invalid passphrase",
+ ref: testImageSignatureReference,
+ },
+ {
+ name: "Wrong passphrase",
+ opts: []Option{
+ WithKeyFingerprint(testKeyFingerprintWithPassphrase),
+ WithPassphrase("wrong"),
+ },
+ ref: testImageSignatureReference,
+ },
+ {
+ name: "No passphrase",
+ opts: []Option{WithKeyFingerprint(testKeyFingerprintWithPassphrase)},
+ ref: testImageSignatureReference,
+ },
+ } {
+ testFailure(c)
+ }
+
+ // Successful signing
+ for _, c := range []struct {
+ name string
+ fingerprint string
+ opts []Option
+ }{
+ {
+ name: "No passphrase",
+ fingerprint: testKeyFingerprint,
+ },
+ {
+ name: "With passphrase",
+ fingerprint: testKeyFingerprintWithPassphrase,
+ opts: []Option{WithPassphrase(testPassphrase)},
+ },
+ } {
+ s, err := NewSigner(append([]Option{WithKeyFingerprint(c.fingerprint)}, c.opts...)...)
+ require.NoError(t, err, c.name)
+ defer s.Close()
+
+ sig, err := internalSigner.SignImageManifest(context.Background(), s, manifest, testImageSignatureReference)
+ require.NoError(t, err, c.name)
+ simpleSig, ok := sig.(internalSig.SimpleSigning)
+ require.True(t, ok)
+
+ // FIXME FIXME: gpgme_op_sign with a passphrase succeeds, but somehow confuses the GPGME internal state
+ // so that gpgme_op_verify below never completes (it polls on an already closed FD).
+ // That’s probably a GPGME bug, and needs investigating and fixing, but it isn’t related to this “signer” implementation.
+ if len(c.opts) == 0 {
+ mech, err := signature.NewGPGSigningMechanism()
+ require.NoError(t, err)
+ defer mech.Close()
+
+ verified, err := signature.VerifyDockerManifestSignature(simpleSig.UntrustedSignature(), manifest, testImageSignatureReference.String(), mech, c.fingerprint)
+ require.NoError(t, err)
+ assert.Equal(t, testImageSignatureReference.String(), verified.DockerReference)
+ assert.Equal(t, testImageManifestDigest, verified.DockerManifestDigest)
+ }
+ }
+
+ invalidManifest, err := os.ReadFile("../fixtures/v2s1-invalid-signatures.manifest.json")
+ require.NoError(t, err)
+ invalidReference, err := reference.ParseNormalizedNamed("no-tag")
+ require.NoError(t, err)
+ for _, c := range []failingCase{
+ {
+ name: "No key to sign with",
+ opts: nil,
+ creationFails: true,
+ },
+ {
+ name: "Error computing Docker manifest",
+ opts: []Option{WithKeyFingerprint(testKeyFingerprint)},
+ manifest: invalidManifest,
+ ref: testImageSignatureReference,
+ },
+ {
+ name: "Invalid reference",
+ opts: []Option{WithKeyFingerprint(testKeyFingerprint)},
+ ref: invalidReference,
+ },
+ {
+ name: "Error signing",
+ opts: []Option{
+ WithKeyFingerprint("this fingerprint doesn't exist"),
+ },
+ ref: testImageSignatureReference,
+ },
+ } {
+ testFailure(c)
+ }
+}
diff --git a/signature/simplesigning/testdata/.gitignore b/signature/simplesigning/testdata/.gitignore
new file mode 100644
index 0000000..2772b97
--- /dev/null
+++ b/signature/simplesigning/testdata/.gitignore
@@ -0,0 +1,6 @@
+/*.gpg~
+/.gpg-v21-migrated
+/private-keys-v1.d
+/random_seed
+/gnupg_spawn_agent_sentinel.lock
+/.#*
diff --git a/signature/simplesigning/testdata/pubring.gpg b/signature/simplesigning/testdata/pubring.gpg
new file mode 120000
index 0000000..be16a53
--- /dev/null
+++ b/signature/simplesigning/testdata/pubring.gpg
@@ -0,0 +1 @@
+../../fixtures/pubring.gpg \ No newline at end of file
diff --git a/signature/simplesigning/testdata/secring.gpg b/signature/simplesigning/testdata/secring.gpg
new file mode 120000
index 0000000..f8f8c2b
--- /dev/null
+++ b/signature/simplesigning/testdata/secring.gpg
@@ -0,0 +1 @@
+../../fixtures/secring.gpg \ No newline at end of file
diff --git a/signature/simplesigning/testdata/trustdb.gpg b/signature/simplesigning/testdata/trustdb.gpg
new file mode 120000
index 0000000..e22f33a
--- /dev/null
+++ b/signature/simplesigning/testdata/trustdb.gpg
@@ -0,0 +1 @@
+../../fixtures/trustdb.gpg \ No newline at end of file
diff --git a/storage/storage_dest.go b/storage/storage_dest.go
new file mode 100644
index 0000000..07e1d5e
--- /dev/null
+++ b/storage/storage_dest.go
@@ -0,0 +1,958 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination/impl"
+ "github.com/containers/image/v5/internal/imagedestination/stubs"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/internal/putblobdigest"
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ graphdriver "github.com/containers/storage/drivers"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+var (
+ // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
+ // with a digest-based name that doesn't match its contents.
+ // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value),
+ // and there is no known user of this error.
+ ErrBlobDigestMismatch = errors.New("blob digest mismatch")
+ // ErrBlobSizeMismatch is returned when PutBlob() is given a blob
+ // with an expected size that doesn't match the reader.
+ ErrBlobSizeMismatch = errors.New("blob size mismatch")
+)
+
+type storageImageDestination struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.ImplementsPutBlobPartial
+ stubs.AlwaysSupportsSignatures
+
+ imageRef storageReference
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ manifestDigest digest.Digest // Valid if len(manifest) != 0
+ signatures []byte // Signature contents, temporary
+ signatureses map[digest.Digest][]byte // Instance signature contents, temporary
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
+
+ // A storage destination may be used concurrently. Accesses are
+ // serialized via a mutex. Please refer to the individual comments
+ // below for details.
+ lock sync.Mutex
+ // Mapping from layer (by index) to the associated ID in the storage.
+ // It's protected *implicitly* since `commitLayer()`, at any given
+ // time, can only be executed by *one* goroutine. Please refer to
+ // `queueOrCommit()` for further details on how the single-caller
+ // guarantee is implemented.
+ indexToStorageID map[int]*string
+ // All accesses to below data are protected by `lock` which is made
+ // *explicit* in the code.
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
+ indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
+ blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
+ diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
+}
+
+// addedLayerInfo records data about a layer to use in this image.
+type addedLayerInfo struct {
+ digest digest.Digest
+ emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := tmpdir.MkDirBigFileTemp(sys, "storage")
+ if err != nil {
+ return nil, fmt.Errorf("creating a temporary directory: %w", err)
+ }
+ dest := &storageImageDestination{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ SupportedManifestMIMETypes: []string{
+ imgspecv1.MediaTypeImageManifest,
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+ },
+ // We ultimately have to decompress layers to populate trees on disk
+ // and need to explicitly ask for it here, so that the layers' MIME
+ // types can be set accordingly.
+ DesiredLayerCompression: types.PreserveOriginal,
+ AcceptsForeignLayerURLs: false,
+ MustMatchRuntimeOS: true,
+ IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
+ HasThreadSafePutBlob: true,
+ }),
+
+ imageRef: imageRef,
+ directory: directory,
+ signatureses: make(map[digest.Digest][]byte),
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ indexToStorageID: make(map[int]*string),
+ indexToAddedLayerInfo: make(map[int]addedLayerInfo),
+ diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
+ }
+ dest.Compat = impl.AddCompat(dest)
+ return dest, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (s *storageImageDestination) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up the temporary directory and additional layer store handlers.
+func (s *storageImageDestination) Close() error {
+ for _, al := range s.blobAdditionalLayer {
+ al.Release()
+ }
+ for _, v := range s.diffOutputs {
+ if v.Target != "" {
+ _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
+ }
+ }
+ return os.RemoveAll(s.directory)
+}
+
+func (s *storageImageDestination) computeNextBlobCacheFile() string {
+ return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
+ info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
+ if err != nil {
+ return info, err
+ }
+
+ if options.IsConfig || options.LayerIndex == nil {
+ return info, nil
+ }
+
+ return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
+ digest: info.Digest,
+ emptyLayer: options.EmptyLayer,
+ })
+}
+
+// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
+ if blobinfo.Digest != "" {
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
+ }
+ }
+
+ // Set up to digest the blob if necessary, and count its size while saving it to a file.
+ filename := s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ defer file.Close()
+ counter := ioutils.NewWriteCounter(file)
+ stream = io.TeeReader(stream, counter)
+ digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
+ decompressed, err := archive.DecompressStream(stream)
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
+ }
+
+ diffID := digest.Canonical.Digester()
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using context.Context.
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+
+ // Determine blob properties, and fail if information that we were given about the blob
+ // is known to be incorrect.
+ blobDigest := digester.Digest()
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
+ } else if blobinfo.Size != counter.Count {
+ return private.UploadedBlob{}, ErrBlobSizeMismatch
+ }
+
+ // Record information about the blob.
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = diffID.Digest()
+ s.fileSizes[blobDigest] = counter.Count
+ s.filenames[blobDigest] = filename
+ s.lock.Unlock()
+ // This is safe because we have just computed diffID, and blobDigest was either computed
+ // by us, or validated by the caller (usually copy.digestingReader).
+ options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
+ return private.UploadedBlob{
+ Digest: blobDigest,
+ Size: blobSize,
+ }, nil
+}
+
+type zstdFetcher struct {
+ chunkAccessor private.BlobChunkAccessor
+ ctx context.Context
+ blobInfo types.BlobInfo
+}
+
+// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
+func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
+ for _, v := range chunks {
+ i := private.ImageSourceChunk{
+ Offset: v.Offset,
+ Length: v.Length,
+ }
+ newChunks = append(newChunks, i)
+ }
+ rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
+ if _, ok := err.(private.BadPartialRequestError); ok {
+ err = chunked.ErrBadRequest{}
+ }
+ return rc, errs, err
+
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
+ fetcher := zstdFetcher{
+ chunkAccessor: chunkAccessor,
+ ctx: ctx,
+ blobInfo: srcInfo,
+ }
+
+ differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
+ if err != nil {
+ return private.UploadedBlob{}, err
+ }
+
+ blobDigest := srcInfo.Digest
+
+ s.lock.Lock()
+ s.blobDiffIDs[blobDigest] = blobDigest
+ s.fileSizes[blobDigest] = 0
+ s.filenames[blobDigest] = ""
+ s.diffOutputs[blobDigest] = out
+ s.lock.Unlock()
+
+ return private.UploadedBlob{
+ Digest: blobDigest,
+ Size: srcInfo.Size,
+ }, nil
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil).
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ if !impl.OriginalBlobMatchesRequiredCompression(options) {
+ return false, private.ReusedBlob{}, nil
+ }
+ reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
+ if err != nil || !reused || options.LayerIndex == nil {
+ return reused, info, err
+ }
+
+ return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
+ digest: info.Digest,
+ emptyLayer: options.EmptyLayer,
+ })
+}
+
+// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
+// The caller must arrange the blob to be eventually committed using s.commitLayer().
+func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
+ // lock the entire method as it executes fairly quickly
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if options.SrcRef != nil {
+ // Check if we have the layer in the underlying additional layer store.
+ aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
+ } else if err == nil {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
+ s.blobAdditionalLayer[digest] = aLayer
+ return true, private.ReusedBlob{
+ Digest: digest,
+ Size: aLayer.CompressedSize(),
+ }, nil
+ }
+ }
+
+ if digest == "" {
+ return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
+ }
+ if err := digest.Validate(); err != nil {
+ return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
+ }
+
+ // Check if we've already cached it in a file.
+ if size, ok := s.fileSizes[digest]; ok {
+ return true, private.ReusedBlob{
+ Digest: digest,
+ Size: size,
+ }, nil
+ }
+
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
+ }
+ if len(layers) > 0 {
+ // Save this for completeness.
+ s.blobDiffIDs[digest] = layers[0].UncompressedDigest
+ return true, private.ReusedBlob{
+ Digest: digest,
+ Size: layers[0].UncompressedSize,
+ }, nil
+ }
+
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
+ }
+ if len(layers) > 0 {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[digest] = layers[0].UncompressedDigest
+ return true, private.ReusedBlob{
+ Digest: digest,
+ Size: layers[0].CompressedSize,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
+ if options.CanSubstitute || size != -1 {
+ if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+ return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+ }
+ if len(layers) > 0 {
+ if size != -1 {
+ s.blobDiffIDs[digest] = layers[0].UncompressedDigest
+ return true, private.ReusedBlob{
+ Digest: digest,
+ Size: size,
+ }, nil
+ }
+ if !options.CanSubstitute {
+ return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
+ }
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, private.ReusedBlob{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ }, nil
+ }
+ }
+ }
+
+ // Nope, we don't have it.
+ return false, private.ReusedBlob{}, nil
+}
+
+// computeID computes a recommended image ID based on information we have so far. If
+// the manifest is not of a type that we recognize, we return an empty value, indicating
+// that since we don't have a recommendation, a random ID should be used if one needs
+// to be allocated.
+func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+ // Build the diffID list. We need the decompressed sums that we've been calculating to
+ // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
+ // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
+ var diffIDs []digest.Digest
+ switch m := m.(type) {
+ case *manifest.Schema1:
+ // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
+ // in reverse of the order in which they were originally listed.
+ for i, compat := range m.ExtractedV1Compatibility {
+ if compat.ThrowAway {
+ continue
+ }
+ blobSum := m.FSLayers[i].BlobSum
+ diffID, ok := s.blobDiffIDs[blobSum]
+ if !ok {
+ logrus.Infof("error looking up diffID for layer %q", blobSum.String())
+ return ""
+ }
+ diffIDs = append([]digest.Digest{diffID}, diffIDs...)
+ }
+ case *manifest.Schema2, *manifest.OCI1:
+ // We know the ID calculation for these formats doesn't actually use the diffIDs,
+ // so we don't need to populate the diffID list.
+ default:
+ return ""
+ }
+ id, err := m.ImageID(diffIDs)
+ if err != nil {
+ return ""
+ }
+ return id
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.New(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.filenames[info.Digest]; ok {
+ contents, err2 := os.ReadFile(filename)
+ if err2 != nil {
+ return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
+ }
+ return contents, nil
+ }
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
+}
+
+// queueOrCommit queues the specified layer to be committed to the storage.
+// If no other goroutine is already committing layers, the layer and all
+// subsequent layers (if already queued) will be committed to the storage.
+func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
+ // NOTE: whenever the code below is touched, make sure that all code
+ // paths unlock the lock and to unlock it exactly once.
+ //
+ // Conceptually, the code is divided in two stages:
+ //
+ // 1) Queue in work by marking the layer as ready to be committed.
+ // If at least one previous/parent layer with a lower index has
+ // not yet been committed, return early.
+ //
+ // 2) Process the queued-in work by committing the "ready" layers
+ // in sequence. Make sure that more items can be queued-in
+ // during the comparatively I/O expensive task of committing a
+ // layer.
+ //
+ // The conceptual benefit of this design is that caller can continue
+ // pulling layers after an early return. At any given time, only one
+ // caller is the "worker" routine committing layers. All other routines
+ // can continue pulling and queuing in layers.
+ s.lock.Lock()
+ s.indexToAddedLayerInfo[index] = info
+
+ // We're still waiting for at least one previous/parent layer to be
+ // committed, so there's nothing to do.
+ if index != s.currentIndex {
+ s.lock.Unlock()
+ return nil
+ }
+
+ for {
+ info, ok := s.indexToAddedLayerInfo[index]
+ if !ok {
+ break
+ }
+ s.lock.Unlock()
+ // Note: commitLayer locks on-demand.
+ if err := s.commitLayer(index, info, -1); err != nil {
+ return err
+ }
+ s.lock.Lock()
+ index++
+ }
+
+ // Set the index at the very end to make sure that only one routine
+ // enters stage 2).
+ s.currentIndex = index
+ s.lock.Unlock()
+ return nil
+}
+
+// commitLayer commits the specified layer with the given index to the storage.
+// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
+//
+// Note that the previous layer is expected to already be committed.
+//
+// Caution: this function must be called without holding `s.lock`. Callers
+// must guarantee that, at any given time, at most one goroutine may execute
+// `commitLayer()`.
+func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
+ // Already committed? Return early.
+ if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
+ return nil
+ }
+
+ // Start with an empty string or the previous layer ID. Note that
+ // `s.indexToStorageID` can only be accessed by *one* goroutine at any
+ // given time. Hence, we don't need to lock accesses.
+ var lastLayer string
+ if prev := s.indexToStorageID[index-1]; prev != nil {
+ lastLayer = *prev
+ }
+
+ // Carry over the previous ID for empty non-base layers.
+ if info.emptyLayer {
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ s.lock.Lock()
+ diffID, haveDiffID := s.blobDiffIDs[info.digest]
+ s.lock.Unlock()
+ if !haveDiffID {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
+ // or to even check if we had it.
+ // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
+ logrus.Debugf("looking for diffID for blob %+v", info.digest)
+ // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
+ has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
+ Cache: none.NoCache,
+ CanSubstitute: false,
+ })
+ if err != nil {
+ return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
+ }
+ if !has {
+ return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
+ }
+ diffID, haveDiffID = s.blobDiffIDs[info.digest]
+ if !haveDiffID {
+ return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
+ }
+ }
+ id := diffID.Hex()
+ if lastLayer != "" {
+ id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ s.lock.Lock()
+ diffOutput, ok := s.diffOutputs[info.digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
+ if err != nil {
+ return err
+ }
+
+ // FIXME: what to do with the uncompressed digest?
+ diffOutput.UncompressedDigest = info.digest
+
+ if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
+ _ = s.imageRef.transport.store.Delete(layer.ID)
+ return err
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+ }
+
+ s.lock.Lock()
+ al, ok := s.blobAdditionalLayer[info.digest]
+ s.lock.Unlock()
+ if ok {
+ layer, err := al.PutAs(id, lastLayer, nil)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("failed to put layer from digest and labels: %w", err)
+ }
+ lastLayer = layer.ID
+ s.indexToStorageID[index] = &lastLayer
+ return nil
+ }
+
+ // Check if we previously cached a file with that blob's contents. If we didn't,
+ // then we need to read the desired contents from a layer.
+ s.lock.Lock()
+ filename, ok := s.filenames[info.digest]
+ s.lock.Unlock()
+ if !ok {
+ // Try to find the layer with contents matching that blobsum.
+ layer := ""
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ } else {
+ layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ }
+ }
+ if layer == "" {
+ return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
+ }
+ // Read the layer's contents.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
+ if err2 != nil {
+ return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
+ }
+ // Copy the layer diff to a file. Diff() takes a lock that it holds
+ // until the ReadCloser that it returns is closed, and PutLayer() wants
+ // the same lock, so the diff can't just be directly streamed from one
+ // to the other.
+ filename = s.computeNextBlobCacheFile()
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ diff.Close()
+ return fmt.Errorf("creating temporary file %q: %w", filename, err)
+ }
+ // Copy the data to the file.
+ // TODO: This can take quite some time, and should ideally be cancellable using
+ // ctx.Done().
+ _, err = io.Copy(file, diff)
+ diff.Close()
+ file.Close()
+ if err != nil {
+ return fmt.Errorf("storing blob to file %q: %w", filename, err)
+ }
+ // Make sure that we can find this file later, should we need the layer's
+ // contents again.
+ s.lock.Lock()
+ s.filenames[info.digest] = filename
+ s.lock.Unlock()
+ }
+ // Read the cached blob and use it as a diff.
+ file, err := os.Open(filename)
+ if err != nil {
+ return fmt.Errorf("opening file %q: %w", filename, err)
+ }
+ defer file.Close()
+ // Build the new layer using the diff, regardless of where it came from.
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
+ OriginalDigest: info.digest,
+ UncompressedDigest: diffID,
+ }, file)
+ if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
+ return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
+ }
+
+ s.indexToStorageID[index] = &layer.ID
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
+ if err != nil {
+ return fmt.Errorf("retrieving top-level manifest: %w", err)
+ }
+ // If the name we're saving to includes a digest, then check that the
+ // manifests that we're about to save all either match the one from the
+ // unparsedToplevel, or match the digest in the name that we're using.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ if !matches {
+ matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
+ if err != nil {
+ return err
+ }
+ }
+ if !matches {
+ return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
+ }
+ }
+ }
+ // Find the list of layer blobs.
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return fmt.Errorf("parsing manifest: %w", err)
+ }
+ layerBlobs := man.LayerInfos()
+
+ // Extract, commit, or find the layers.
+ for i, blob := range layerBlobs {
+ if err := s.commitLayer(i, addedLayerInfo{
+ digest: blob.Digest,
+ emptyLayer: blob.EmptyLayer,
+ }, blob.Size); err != nil {
+ return err
+ }
+ }
+ var lastLayer string
+ if len(layerBlobs) > 0 { // Can happen when using caches
+ prev := s.indexToStorageID[len(layerBlobs)-1]
+ if prev == nil {
+ return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
+ }
+ lastLayer = *prev
+ }
+
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ options := &storage.ImageOptions{}
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ options.CreationDate = *inspect.Created
+ }
+
+ // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := set.New[digest.Digest]()
+ for blob := range s.filenames {
+ dataBlobs.Add(blob)
+ }
+ for _, layerBlob := range layerBlobs {
+ dataBlobs.Delete(layerBlob.Digest)
+ }
+ for _, blob := range dataBlobs.Values() {
+ v, err := os.ReadFile(s.filenames[blob])
+ if err != nil {
+ return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
+ }
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: blob.String(),
+ Data: v,
+ Digest: digest.Canonical.FromBytes(v),
+ })
+ }
+ // Set up to save the unparsedToplevel's manifest if it differs from
+ // the per-platform one, which is saved below.
+ if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
+ manifestDigest, err := manifest.Digest(toplevelManifest)
+ if err != nil {
+ return fmt.Errorf("digesting top-level manifest: %w", err)
+ }
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: manifestBigDataKey(manifestDigest),
+ Data: toplevelManifest,
+ Digest: manifestDigest,
+ })
+ }
+ // Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
+ // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
+ // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: manifestBigDataKey(s.manifestDigest),
+ Data: s.manifest,
+ Digest: s.manifestDigest,
+ })
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: storage.ImageDigestBigDataKey,
+ Data: s.manifest,
+ Digest: s.manifestDigest,
+ })
+ // Set up to save the signatures, if we have any.
+ if len(s.signatures) > 0 {
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: "signatures",
+ Data: s.signatures,
+ Digest: digest.Canonical.FromBytes(s.signatures),
+ })
+ }
+ for instanceDigest, signatures := range s.signatureses {
+ options.BigData = append(options.BigData, storage.ImageBigDataOption{
+ Key: signatureBigDataKey(instanceDigest),
+ Data: signatures,
+ Digest: digest.Canonical.FromBytes(signatures),
+ })
+ }
+
+ // Set up to save our metadata.
+ metadata, err := json.Marshal(s)
+ if err != nil {
+ return fmt.Errorf("encoding metadata for image: %w", err)
+ }
+ if len(metadata) != 0 {
+ options.Metadata = string(metadata)
+ }
+
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID = s.computeID(man)
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+ if err != nil {
+ if !errors.Is(err, storage.ErrDuplicateID) {
+ logrus.Debugf("error creating image: %q", err)
+ return fmt.Errorf("creating image %q: %w", intendedID, err)
+ }
+ img, err = s.imageRef.transport.store.Image(intendedID)
+ if err != nil {
+ return fmt.Errorf("reading image %q: %w", intendedID, err)
+ }
+ if img.TopLayer != lastLayer {
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID)
+ }
+ logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
+ // set the data items and metadata on the already-present image
+ // FIXME: this _replaces_ any "signatures" blobs and their
+ // sizes (tracked in the metadata) which might have already
+ // been present with new values, when ideally we'd find a way
+ // to merge them since they all apply to the same image
+ for _, data := range options.BigData {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
+ logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
+ return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
+ }
+ }
+ if options.Metadata != "" {
+ if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil {
+ logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
+ return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
+ }
+ logrus.Debugf("saved image metadata %q", options.Metadata)
+ }
+ } else {
+ logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata)
+ }
+
+ // Clean up the unfinished image on any error.
+ // (Is this the right thing to do if the image has existed before?)
+ commitSucceeded := false
+ defer func() {
+ if !commitSucceeded {
+ logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
+ if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
+ logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
+ }
+ }
+ }()
+
+ // Add the reference's name on the image. We don't need to worry about avoiding duplicate
+ // values because AddNames() will deduplicate the list that we pass to it.
+ if name := s.imageRef.DockerReference(); name != nil {
+ if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
+ return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err)
+ }
+ logrus.Debugf("added name %q to image %q", name, img.ID)
+ }
+
+ commitSucceeded = true
+ return nil
+}
+
+// PutManifest writes the manifest to the destination.
+func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
+ digest, err := manifest.Digest(manifestBlob)
+ if err != nil {
+ return err
+ }
+ s.manifest = slices.Clone(manifestBlob)
+ s.manifestDigest = digest
+ return nil
+}
+
+// PutSignaturesWithFormat writes a set of signatures to the destination.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+// MUST be called after PutManifest (signatures may reference manifest contents).
+func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
+ sizes := []int{}
+ sigblob := []byte{}
+ for _, sigWithFormat := range signatures {
+ sig, err := signature.Blob(sigWithFormat)
+ if err != nil {
+ return err
+ }
+ sizes = append(sizes, len(sig))
+ sigblob = append(sigblob, sig...)
+ }
+ if instanceDigest == nil {
+ s.signatures = sigblob
+ s.SignatureSizes = sizes
+ if len(s.manifest) > 0 {
+ manifestDigest := s.manifestDigest
+ instanceDigest = &manifestDigest
+ }
+ }
+ if instanceDigest != nil {
+ s.signatureses[*instanceDigest] = sigblob
+ s.SignaturesSizes[*instanceDigest] = sizes
+ }
+ return nil
+}
diff --git a/storage/storage_image.go b/storage/storage_image.go
new file mode 100644
index 0000000..ac09f3d
--- /dev/null
+++ b/storage/storage_image.go
@@ -0,0 +1,59 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "context"
+
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrNoSuchImage is returned when we attempt to access an image which
+ // doesn't exist in the storage area.
+ ErrNoSuchImage = storage.ErrNotAnImage
+)
+
+type storageImageCloser struct {
+ types.ImageCloser
+ size int64
+}
+
+// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
+// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
+// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
+func manifestBigDataKey(digest digest.Digest) string {
+ return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
+}
+
+// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
+// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
+func signatureBigDataKey(digest digest.Digest) string {
+ return "signature-" + digest.Encoded()
+}
+
+// Size() returns the previously-computed size of the image, with no error.
+func (s *storageImageCloser) Size() (int64, error) {
+ return s.size, nil
+}
+
+// newImage creates an image that also knows its size
+func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
+ src, err := newImageSource(sys, s)
+ if err != nil {
+ return nil, err
+ }
+ img, err := image.FromSource(ctx, sys, src)
+ if err != nil {
+ return nil, err
+ }
+ size, err := src.getSize()
+ if err != nil {
+ return nil, err
+ }
+ return &storageImageCloser{ImageCloser: img, size: size}, nil
+}
diff --git a/storage/storage_reference.go b/storage/storage_reference.go
new file mode 100644
index 0000000..a55e340
--- /dev/null
+++ b/storage/storage_reference.go
@@ -0,0 +1,316 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/slices"
+)
+
+// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
+// value hex-encoded into a 64-character string, and a reference to a Store
+// where an image is, or would be, kept.
+// Either "named" or "id" must be set.
+type storageReference struct {
+ transport storageTransport
+ named reference.Named // may include a tag and/or a digest
+ id string
+}
+
+func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) {
+ if named == nil && id == "" {
+ return nil, ErrInvalidReference
+ }
+ if named != nil && reference.IsNameOnly(named) {
+ return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
+ }
+ if id != "" {
+ if err := validateImageID(id); err != nil {
+ return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
+ }
+ }
+ // We take a copy of the transport, which contains a pointer to the
+ // store that it used for resolving this reference, so that the
+ // transport that we'll return from Transport() won't be affected by
+ // further calls to the original transport's SetStore() method.
+ return &storageReference{
+ transport: transport,
+ named: named,
+ id: id,
+ }, nil
+}
+
+// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
+func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
+ repo := ref.Name()
+ return slices.ContainsFunc(image.Names, func(name string) bool {
+ if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
+ return true
+ }
+ return false
+ })
+}
+
+// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
+// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
+// image instance that matches sys.
+//
+// See the comment in storageReference.ResolveImage explaining why
+// this check is necessary.
+func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
+ // Load the manifest that matches the specified digest.
+ // We don't need to care about storage.ImageDigestBigDataKey because
+ // manifests lists are only stored into storage by c/image versions
+ // that know about manifestBigDataKey, and only using that key.
+ key := manifestBigDataKey(manifestDigest)
+ manifestBytes, err := store.ImageBigData(img.ID, key)
+ if err != nil {
+ return false
+ }
+ // The manifest is either a list, or not a list. If it's a list, find
+ // the digest of the instance that matches the current system, and try
+ // to load that manifest from the image record, and use it.
+ manifestType := manifest.GuessMIMEType(manifestBytes)
+ if !manifest.MIMETypeIsMultiImage(manifestType) {
+ // manifestDigest directly specifies a per-platform image, so we aren't
+ // choosing among different variants.
+ return false
+ }
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return false
+ }
+ chosenInstance, err := list.ChooseInstance(sys)
+ if err != nil {
+ return false
+ }
+ key = manifestBigDataKey(chosenInstance)
+ _, err = store.ImageBigData(img.ID, key)
+ return err == nil // true if img.ID is based on chosenInstance.
+}
+
+// Resolve the reference's name to an image ID in the store, if there's already
+// one present with the same name or ID, and return the image.
+//
+// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
+func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) {
+ var loadedImage *storage.Image
+ if s.id == "" && s.named != nil {
+ // Look for an image that has the expanded reference name as an explicit Name value.
+ image, err := s.transport.store.Image(s.named.String())
+ if image != nil && err == nil {
+ loadedImage = image
+ s.id = image.ID
+ }
+ }
+ if s.id == "" && s.named != nil {
+ if digested, ok := s.named.(reference.Digested); ok {
+ // Look for an image with the specified digest that has the same name,
+ // though possibly with a different tag or digest, as a Name value, so
+ // that the canonical reference can be implicitly resolved to the image.
+ //
+ // Typically there should be at most one such image, because the same
+ // manifest digest implies the same config, and we choose the storage ID
+ // based on the config (deduplicating images), except:
+ // - the user can explicitly specify an ID when creating the image.
+ // In this case we don't have a preference among the alternatives.
+ // - when pulling an image from a multi-platform manifest list, we also
+ // store the manifest list in the image; this allows referencing a
+ // per-platform image using the manifest list digest, but that also
+ // means that we can have multiple genuinely different images in the
+ // storage matching the same manifest list digest (if pulled using different
+ // SystemContext.{OS,Architecture,Variant}Choice to the same storage).
+ // In this case we prefer the image matching the current SystemContext.
+ images, err := s.transport.store.ImagesByDigest(digested.Digest())
+ if err == nil && len(images) > 0 {
+ for _, image := range images {
+ if imageMatchesRepo(image, s.named) {
+ if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
+ loadedImage = image
+ s.id = image.ID
+ }
+ }
+ }
+ }
+ }
+ }
+ if s.id == "" {
+ logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
+ }
+ if loadedImage == nil {
+ img, err := s.transport.store.Image(s.id)
+ if err != nil {
+ return nil, fmt.Errorf("reading image %q: %w", s.id, err)
+ }
+ loadedImage = img
+ }
+ if s.named != nil {
+ if !imageMatchesRepo(loadedImage, s.named) {
+ logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
+ return nil, ErrNoSuchImage
+ }
+ }
+ // Default to having the image digest that we hand back match the most recently
+ // added manifest...
+ if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
+ loadedImage.Digest = digest
+ }
+ // ... unless the named reference says otherwise, and it matches one of the digests
+ // in the image. For those cases, set the Digest field to that value, for the
+ // sake of older consumers that don't know there's a whole list in there now.
+ if s.named != nil {
+ if digested, ok := s.named.(reference.Digested); ok {
+ digest := digested.Digest()
+ if slices.Contains(loadedImage.Digests, digest) {
+ loadedImage.Digest = digest
+ }
+ }
+ }
+ return loadedImage, nil
+}
+
+// Return a Transport object that defaults to using the same store that we used
+// to build this reference object.
+func (s storageReference) Transport() types.ImageTransport {
+ return &storageTransport{
+ store: s.transport.store,
+ defaultUIDMap: s.transport.defaultUIDMap,
+ defaultGIDMap: s.transport.defaultGIDMap,
+ }
+}
+
+// Return a name with a tag or digest, if we have either, else return it bare.
+func (s storageReference) DockerReference() reference.Named {
+ return s.named
+}
+
+// Return a name with a tag, prefixed with the graph root and driver name, to
+// disambiguate between images which may be present in multiple stores and
+// share only their names.
+func (s storageReference) StringWithinTransport() string {
+ optionsList := ""
+ options := s.transport.store.GraphOptions()
+ if len(options) > 0 {
+ optionsList = ":" + strings.Join(options, ",")
+ }
+ res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
+ if s.named != nil {
+ res += s.named.String()
+ }
+ if s.id != "" {
+ res += "@" + s.id
+ }
+ return res
+}
+
+func (s storageReference) PolicyConfigurationIdentity() string {
+ res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
+ if s.named != nil {
+ res += s.named.String()
+ }
+ if s.id != "" {
+ res += "@" + s.id
+ }
+ return res
+}
+
+// Also accept policy that's tied to the combination of the graph root and
+// driver name, to apply to all images stored in the Store, and to just the
+// graph root, in case we're using multiple drivers in the same directory for
+// some reason.
+func (s storageReference) PolicyConfigurationNamespaces() []string {
+ storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
+ driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
+ namespaces := []string{}
+ if s.named != nil {
+ if s.id != "" {
+ // The reference without the ID is also a valid namespace.
+ namespaces = append(namespaces, storeSpec+s.named.String())
+ }
+ tagged, isTagged := s.named.(reference.Tagged)
+ _, isDigested := s.named.(reference.Digested)
+ if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace.
+ namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag())
+ }
+ components := strings.Split(s.named.Name(), "/")
+ for len(components) > 0 {
+ namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
+ components = components[:len(components)-1]
+ }
+ }
+ namespaces = append(namespaces, storeSpec)
+ namespaces = append(namespaces, driverlessStoreSpec)
+ return namespaces
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return newImage(ctx, sys, s)
+}
+
+func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ img, err := s.resolveImage(sys)
+ if err != nil {
+ return err
+ }
+ layers, err := s.transport.store.DeleteImage(img.ID, true)
+ if err == nil {
+ logrus.Debugf("deleted image %q", img.ID)
+ for _, layer := range layers {
+ logrus.Debugf("deleted layer %q", layer)
+ }
+ }
+ return err
+}
+
+func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(sys, s)
+}
+
+func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(sys, s)
+}
+
+// ResolveReference finds the underlying storage image for a storage.Transport reference.
+// It returns that image, and an updated reference which can be used to refer back to the _same_
+// image again.
+//
+// This matters if the input reference contains a tagged name; the destination of the tag can
+// move in local storage. The updated reference returned by this function contains the resolved
+// image ID, so later uses of that updated reference will either continue to refer to the same
+// image, or fail.
+//
+// Note that it _is_ possible for the later uses to fail, either because the image was removed
+// completely, or because the name used in the reference was untaged (even if the underlying image
+// ID still exists in local storage).
+//
+// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
+func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) {
+ sref, ok := ref.(*storageReference)
+ if !ok {
+ return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(),
+ transports.ImageName(ref))
+ }
+ clone := *sref // A shallow copy we can update
+ img, err := clone.resolveImage(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ return clone, img, nil
+}
diff --git a/storage/storage_reference_test.go b/storage/storage_reference_test.go
new file mode 100644
index 0000000..32590a0
--- /dev/null
+++ b/storage/storage_reference_test.go
@@ -0,0 +1,196 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewReference(t *testing.T) {
+ newStore(t)
+ st, ok := Transport.(*storageTransport)
+ require.True(t, ok)
+ // Success is tested throughout; test only the failure
+ _, err := newReference(*st, nil, "")
+ assert.Error(t, err)
+ _, err = newReference(*st, nil, "ab")
+ assert.Error(t, err)
+ ref, err := reference.ParseNormalizedNamed("busybox")
+ require.NoError(t, err)
+ _, err = newReference(*st, ref, "")
+ assert.Error(t, err)
+}
+
+func TestStorageReferenceTransport(t *testing.T) {
+ newStore(t)
+ ref, err := Transport.ParseReference("busybox")
+ require.NoError(t, err)
+ transport := ref.Transport()
+ st, ok := transport.(*storageTransport)
+ require.True(t, ok)
+ assert.Equal(t, *(Transport.(*storageTransport)), *st)
+}
+
+// A common list of reference formats to test for the various ImageReference methods.
+var validReferenceTestCases = []struct {
+ input, dockerRef, canonical string
+ namespaces []string
+}{
+ {
+ "busybox", "docker.io/library/busybox:latest", "docker.io/library/busybox:latest",
+ []string{"docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest",
+ []string{"example.com/myns/ns2/busybox", "example.com/myns/ns2", "example.com/myns", "example.com"},
+ },
+ {
+ "@" + sha256digestHex, "", "@" + sha256digestHex,
+ []string{},
+ },
+ {
+ "busybox@" + sha256digestHex, "docker.io/library/busybox:latest", "docker.io/library/busybox:latest@" + sha256digestHex,
+ []string{"docker.io/library/busybox:latest", "docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex,
+ []string{"docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest@" + sha256digestHex,
+ []string{"docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "busybox:notlatest@sha256:" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex,
+ []string{"docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, "docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex,
+ []string{"docker.io/library/busybox@" + sha256Digest2, "docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+ {
+ "busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex,
+ []string{"docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"},
+ },
+}
+
+func TestStorageReferenceDockerReference(t *testing.T) {
+ newStore(t)
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ if c.dockerRef != "" {
+ dr := ref.DockerReference()
+ require.NotNil(t, dr, c.input)
+ assert.Equal(t, c.dockerRef, dr.String(), c.input)
+ } else {
+ dr := ref.DockerReference()
+ assert.Nil(t, dr, c.input)
+ }
+ }
+}
+
+// The […] part of references created for store
+func storeSpecForStringWithinTransport(store storage.Store) string {
+ optionsList := ""
+ options := store.GraphOptions()
+ if len(options) > 0 {
+ optionsList = ":" + strings.Join(options, ",")
+ }
+ return fmt.Sprintf("[%s@%s+%s%s]", store.GraphDriverName(), store.GraphRoot(), store.RunRoot(), optionsList)
+}
+
+func TestStorageReferenceStringWithinTransport(t *testing.T) {
+ store := newStore(t)
+ storeSpec := storeSpecForStringWithinTransport(store)
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ assert.Equal(t, storeSpec+c.canonical, ref.StringWithinTransport(), c.input)
+ }
+}
+
+func TestStorageReferencePolicyConfigurationIdentity(t *testing.T) {
+ store := newStore(t)
+ storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot())
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ assert.Equal(t, storeSpec+c.canonical, ref.PolicyConfigurationIdentity(), c.input)
+ }
+}
+
+func TestStorageReferencePolicyConfigurationNamespaces(t *testing.T) {
+ store := newStore(t)
+ storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot())
+
+ for _, c := range validReferenceTestCases {
+ ref, err := Transport.ParseReference(c.input)
+ require.NoError(t, err, c.input)
+ expectedNS := []string{}
+ for _, ns := range c.namespaces {
+ expectedNS = append(expectedNS, storeSpec+ns)
+ }
+ expectedNS = append(expectedNS, storeSpec)
+ expectedNS = append(expectedNS, fmt.Sprintf("[%s]", store.GraphRoot()))
+ assert.Equal(t, expectedNS, ref.PolicyConfigurationNamespaces(), c.input)
+ }
+}
+
+// NewImage, NewImageSource, NewImageDestination, DeleteImage tested in storage_test.go
+
+func TestResolveReference(t *testing.T) {
+ // This is, so far, only a minimal smoke test
+
+ ensureTestCanCreateImages(t)
+
+ store := newStore(t)
+ storeSpec := storeSpecForStringWithinTransport(store)
+ cache := memory.New()
+
+ id := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+
+ // Create an image with a known name and ID
+ ref, err := Transport.ParseStoreReference(store, "test@"+id)
+ require.NoError(t, err)
+ createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil)
+
+ for _, c := range []struct {
+ input string
+ expected string // "" on error
+ }{
+ { // No ID match
+ "@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "",
+ },
+ {"@" + id, "@" + id}, // ID-only lookup
+ {"test", "docker.io/library/test:latest@" + id}, // Name is resolved to include ID
+ {"nottest", ""}, // No name match
+ {"test@" + id, "docker.io/library/test:latest@" + id}, // Name+ID works, and is unchanged
+ {"nottest@" + id, ""}, // Name mismatch is rejected even with an ID
+ } {
+ input, err := Transport.ParseStoreReference(store, c.input)
+ require.NoError(t, err, c.input)
+ inputClone := *input
+ resolved, img, err := ResolveReference(input)
+ if c.expected == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ require.Equal(t, &inputClone, input) // input was not modified in-place
+ assert.Equal(t, id, img.ID, c.input)
+ assert.Equal(t, storeSpec+c.expected, resolved.StringWithinTransport(), c.input)
+ }
+ }
+}
diff --git a/storage/storage_src.go b/storage/storage_src.go
new file mode 100644
index 0000000..f1ce086
--- /dev/null
+++ b/storage/storage_src.go
@@ -0,0 +1,403 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/internal/signature"
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+type storageImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ stubs.NoGetBlobAtInitialize
+
+ imageRef storageReference
+ image *storage.Image
+ systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
+ SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
+}
+
+// newImageSource sets up an image for reading.
+func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
+ img, err := imageRef.resolveImage(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build the reader object.
+ image := &storageImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: true,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
+
+ imageRef: imageRef,
+ systemContext: sys,
+ image: img,
+ layerPosition: make(map[digest.Digest]int),
+ SignatureSizes: []int{},
+ SignaturesSizes: make(map[digest.Digest][]int),
+ }
+ image.Compat = impl.AddCompat(image)
+ if img.Metadata != "" {
+ if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
+ return nil, fmt.Errorf("decoding metadata for source image: %w", err)
+ }
+ }
+ return image, nil
+}
+
+// Reference returns the image reference that we used to find this image.
+func (s *storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s *storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
+ // We need a valid digest value.
+ digest := info.Digest
+ err = digest.Validate()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if digest == image.GzippedEmptyLayerDigest {
+ return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
+ }
+
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
+
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String())
+ if err != nil {
+ return nil, 0, err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", digest.String())
+ return io.NopCloser(r), int64(r.Len()), nil
+ }
+
+ // NOTE: the blob is first written to a temporary file and subsequently
+ // closed. The intention is to keep the time we own the storage lock
+ // as short as possible to allow other processes to access the storage.
+ rc, n, _, err = s.getBlobAndLayerID(digest, layers)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer rc.Close()
+
+ tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "")
+ if err != nil {
+ return nil, 0, err
+ }
+ success := false
+ tmpFileRemovePending := true
+ defer func() {
+ if !success {
+ tmpFile.Close()
+ if tmpFileRemovePending {
+ os.Remove(tmpFile.Name())
+ }
+ }
+ }()
+ // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
+ // cleaned up on process termination (or if the caller forgets to invoke Close())
+ // On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
+ if err := os.Remove(tmpFile.Name()); err != nil {
+ tmpFileRemovePending = false
+ }
+
+ if _, err := io.Copy(tmpFile, rc); err != nil {
+ return nil, 0, err
+ }
+ if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
+ return nil, 0, err
+ }
+
+ success = true
+
+ if tmpFileRemovePending {
+ return ioutils.NewReadCloserWrapper(tmpFile, func() error {
+ tmpFile.Close()
+ return os.Remove(tmpFile.Name())
+ }), n, nil
+ }
+
+ return tmpFile, n, nil
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ s.getBlobMutex.Lock()
+ i := s.layerPosition[digest]
+ s.layerPosition[digest] = i + 1
+ s.getBlobMutex.Unlock()
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
+ if instanceDigest != nil {
+ key := manifestBigDataKey(*instanceDigest)
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
+ }
+ return blob, manifest.GuessMIMEType(blob), err
+ }
+ if len(s.cachedManifest) == 0 {
+ // The manifest is stored as a big data item.
+ // Prefer the manifest corresponding to the user-specified digest, if available.
+ if s.imageRef.named != nil {
+ if digested, ok := s.imageRef.named.(reference.Digested); ok {
+ key := manifestBigDataKey(digested.Digest())
+ blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
+ return nil, "", err
+ }
+ if err == nil {
+ s.cachedManifest = blob
+ }
+ }
+ }
+ // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
+ // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
+ if len(s.cachedManifest) == 0 {
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
+ if err != nil {
+ return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err)
+ }
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)")
+ }
+ man, err := manifest.FromBlob(manifestBlob, manifestType)
+ if err != nil {
+ return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err)
+ }
+
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
+ }
+
+ physicalBlobInfos := []types.BlobInfo{}
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
+ }
+ if layer.UncompressedDigest == "" {
+ return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
+ }
+ if layer.UncompressedSize < 0 {
+ return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
+ }
+ blobInfo := types.BlobInfo{
+ Digest: layer.UncompressedDigest,
+ Size: layer.UncompressedSize,
+ MediaType: uncompressedLayerType,
+ }
+ physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
+ layerID = layer.Parent
+ }
+
+ res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
+ if err != nil {
+ return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err)
+ }
+ return res, nil
+}
+
+// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
+// but using layer data which we can actually produce — physicalInfos for non-empty layers,
+// and image.GzippedEmptyLayer for empty ones.
+// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
+func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
+ nextPhysical := 0
+ res := make([]types.BlobInfo, len(manifestInfos))
+ for i, mi := range manifestInfos {
+ if mi.EmptyLayer {
+ res[i] = types.BlobInfo{
+ Digest: image.GzippedEmptyLayerDigest,
+ Size: int64(len(image.GzippedEmptyLayer)),
+ MediaType: mi.MediaType,
+ }
+ } else {
+ if nextPhysical >= len(physicalInfos) {
+ return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
+ }
+ res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations.
+ nextPhysical++
+ }
+ }
+ if nextPhysical != len(physicalInfos) {
+ return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
+ }
+ return res, nil
+}
+
+// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+ var offset int
+ signatureBlobs := []byte{}
+ signatureSizes := s.SignatureSizes
+ key := "signatures"
+ instance := "default instance"
+ if instanceDigest != nil {
+ signatureSizes = s.SignaturesSizes[*instanceDigest]
+ key = signatureBigDataKey(*instanceDigest)
+ instance = instanceDigest.Encoded()
+ }
+ if len(signatureSizes) > 0 {
+ data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+ if err != nil {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err)
+ }
+ signatureBlobs = data
+ }
+ res := []signature.Signature{}
+ for _, length := range signatureSizes {
+ if offset+length > len(signatureBlobs) {
+ return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length)
+ }
+ sig, err := signature.FromBlob(signatureBlobs[offset : offset+length])
+ if err != nil {
+ return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err)
+ }
+ res = append(res, sig)
+ offset += length
+ }
+ if offset != len(signatureBlobs) {
+ return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset)
+ }
+ return res, nil
+}
+
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) getSize() (int64, error) {
+ var sum int64
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
+ if err != nil {
+ return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err)
+ }
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
+ if err != nil {
+ return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err)
+ }
+ sum += bigSize
+ }
+ // Add the signature sizes.
+ for _, sigSize := range s.SignatureSizes {
+ sum += int64(sigSize)
+ }
+ // Walk the layer list.
+ layerID := s.image.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
+ return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
+ }
+ sum += layer.UncompressedSize
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
+ }
+ return sum, nil
+}
+
+// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) Size() (int64, error) {
+ return s.getSize()
+}
diff --git a/storage/storage_src_test.go b/storage/storage_src_test.go
new file mode 100644
index 0000000..adbcfe9
--- /dev/null
+++ b/storage/storage_src_test.go
@@ -0,0 +1,43 @@
+package storage
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBuildLayerInfosForCopy(t *testing.T) {
+ manifestInfos := []manifest.LayerInfo{
+ {BlobInfo: types.BlobInfo{Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: -1}, EmptyLayer: true},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, EmptyLayer: false},
+ {BlobInfo: types.BlobInfo{Digest: "sha256:5555555555555555555555555555555555555555555555555555555555555555", Size: -1}, EmptyLayer: true},
+ }
+ physicalInfos := []types.BlobInfo{
+ {Digest: "sha256:1111111111111111111111111111111111111111111111111111111111111111", Size: 111, MediaType: manifest.DockerV2Schema2LayerMediaType},
+ {Digest: "sha256:2222222222222222222222222222222222222222222222222222222222222222", Size: 222, MediaType: manifest.DockerV2Schema2LayerMediaType},
+ }
+
+ // Success
+ res, err := buildLayerInfosForCopy(manifestInfos, physicalInfos)
+ require.NoError(t, err)
+ assert.Equal(t, []types.BlobInfo{
+ {Digest: "sha256:1111111111111111111111111111111111111111111111111111111111111111", Size: 111, MediaType: manifest.DockerV2Schema2LayerMediaType},
+ {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32},
+ {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32},
+ {Digest: "sha256:2222222222222222222222222222222222222222222222222222222222222222", Size: 222, MediaType: manifest.DockerV2Schema2LayerMediaType},
+ {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32},
+ }, res)
+
+ // PhysicalInfos too short
+ _, err = buildLayerInfosForCopy(manifestInfos, physicalInfos[:len(physicalInfos)-1])
+ assert.Error(t, err)
+
+ // PhysicalInfos too long
+ _, err = buildLayerInfosForCopy(manifestInfos, append(physicalInfos, physicalInfos[0]))
+ assert.Error(t, err)
+}
diff --git a/storage/storage_test.go b/storage/storage_test.go
new file mode 100644
index 0000000..fb5158e
--- /dev/null
+++ b/storage/storage_test.go
@@ -0,0 +1,722 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ imanifest "github.com/containers/image/v5/internal/manifest"
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/pkg/blobinfocache/memory"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ _ types.ImageDestination = &storageImageDestination{}
+ _ private.ImageDestination = (*storageImageDestination)(nil)
+ _ types.ImageSource = &storageImageSource{}
+ _ private.ImageSource = (*storageImageSource)(nil)
+ _ types.ImageReference = &storageReference{}
+ _ types.ImageTransport = &storageTransport{}
+)
+
+const (
+ layerSize = 12345
+)
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ debug := false
+ flag.BoolVar(&debug, "debug", false, "print debug statements")
+ flag.Parse()
+ if debug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ os.Exit(m.Run())
+}
+
+func newStoreWithGraphDriverOptions(t *testing.T, options []string) storage.Store {
+ wd := t.TempDir()
+ run := filepath.Join(wd, "run")
+ root := filepath.Join(wd, "root")
+ Transport.SetDefaultUIDMap([]idtools.IDMap{{
+ ContainerID: 0,
+ HostID: os.Getuid(),
+ Size: 1,
+ }})
+ Transport.SetDefaultGIDMap([]idtools.IDMap{{
+ ContainerID: 0,
+ HostID: os.Getgid(),
+ Size: 1,
+ }})
+ store, err := storage.GetStore(storage.StoreOptions{
+ RunRoot: run,
+ GraphRoot: root,
+ GraphDriverName: "vfs",
+ GraphDriverOptions: options,
+ UIDMap: Transport.DefaultUIDMap(),
+ GIDMap: Transport.DefaultGIDMap(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ Transport.SetStore(store)
+ return store
+}
+
+func newStore(t *testing.T) storage.Store {
+ return newStoreWithGraphDriverOptions(t, []string{})
+}
+
+func TestParse(t *testing.T) {
+ store := newStore(t)
+
+ ref, err := Transport.ParseReference("test")
+ if err != nil {
+ t.Fatalf("ParseReference(%q) returned error %v", "test", err)
+ }
+ if ref == nil {
+ t.Fatalf("ParseReference returned nil reference")
+ }
+
+ ref, err = Transport.ParseStoreReference(store, "test")
+ if err != nil {
+ t.Fatalf("ParseStoreReference(%q) returned error %v", "test", err)
+ }
+
+ strRef := ref.StringWithinTransport()
+ ref, err = Transport.ParseReference(strRef)
+ if err != nil {
+ t.Fatalf("ParseReference(%q) returned error: %v", strRef, err)
+ }
+ if ref == nil {
+ t.Fatalf("ParseReference(%q) returned nil reference", strRef)
+ }
+
+ transport := storageTransport{
+ store: store,
+ defaultUIDMap: Transport.(*storageTransport).defaultUIDMap,
+ defaultGIDMap: Transport.(*storageTransport).defaultGIDMap,
+ }
+ _references := []storageReference{
+ {
+ named: ref.(*storageReference).named,
+ id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ transport: transport,
+ },
+ {
+ named: ref.(*storageReference).named,
+ transport: transport,
+ },
+ {
+ id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ transport: transport,
+ },
+ {
+ named: ref.DockerReference(),
+ transport: transport,
+ },
+ }
+ for _, reference := range _references {
+ s := reference.StringWithinTransport()
+ ref, err := Transport.ParseStoreReference(store, s)
+ if err != nil {
+ t.Fatalf("ParseReference(%q) returned error: %v", strRef, err)
+ }
+ if ref.id != reference.id {
+ t.Fatalf("ParseReference(%q) failed to extract ID", s)
+ }
+ if reference.named == nil {
+ if ref.named != nil {
+ t.Fatalf("ParseReference(%q) set non-nil named", s)
+ }
+ } else {
+ if ref.named.String() != reference.named.String() {
+ t.Fatalf("ParseReference(%q) failed to extract reference (%q!=%q)", s, ref.named.String(), reference.named.String())
+ }
+ }
+ }
+}
+
+func TestParseWithGraphDriverOptions(t *testing.T) {
+ optionLists := [][]string{
+ {},
+ {"vfs.ignore_chown_errors=true"},
+ {"vfs.ignore_chown_errors=false"},
+ }
+ for _, optionList := range optionLists {
+ store := newStoreWithGraphDriverOptions(t, optionList)
+ ref, err := Transport.ParseStoreReference(store, "test")
+ require.NoError(t, err, optionList)
+ require.NotNil(t, ref)
+ spec := ref.StringWithinTransport()
+ ref2, err := Transport.ParseReference(spec)
+ require.NoError(t, err)
+ require.NotNil(t, ref)
+ sref, ok := ref2.(*storageReference)
+ require.True(t, ok, "transport %s", ref2.Transport().Name())
+ parsedOptions := sref.transport.store.GraphOptions()
+ assert.Equal(t, optionList, parsedOptions)
+ }
+}
+
+// makeLayerGoroutine writes to pwriter, and on success, updates uncompressedCount
+// before it terminates.
+func makeLayerGoroutine(pwriter io.Writer, uncompressedCount *int64, compression archive.Compression) error {
+ var uncompressed *ioutils.WriteCounter
+ if compression != archive.Uncompressed {
+ compressor, err := archive.CompressStream(pwriter, compression)
+ if err != nil {
+ return fmt.Errorf("compressing layer: %w", err)
+ }
+ defer compressor.Close()
+ uncompressed = ioutils.NewWriteCounter(compressor)
+ } else {
+ uncompressed = ioutils.NewWriteCounter(pwriter)
+ }
+ twriter := tar.NewWriter(uncompressed)
+ // defer twriter.Close()
+ // should be called here to correctly terminate the archive.
+ // We do not do that, to workaround https://github.com/containers/storage/issues/1729 :
+ // tar-split runs a goroutine that consumes/forwards tar content and might access
+ // concurrently-freed objects if it sees a valid EOF marker.
+ // Instead, realy on raw EOF to terminate the goroutine.
+ // This depends on implementation details of tar.Writer (that it does not do any
+ // internal buffering).
+
+ buf := make([]byte, layerSize)
+ n, err := rand.Read(buf)
+ if err != nil {
+ return fmt.Errorf("reading tar data: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("short read reading tar data: %d < %d", n, len(buf))
+ }
+ for i := 1024; i < 2048; i++ {
+ buf[i] = 0
+ }
+
+ if err := twriter.WriteHeader(&tar.Header{
+ Name: "/random-single-file",
+ Mode: 0600,
+ Size: int64(len(buf)),
+ ModTime: time.Now(),
+ AccessTime: time.Now(),
+ ChangeTime: time.Now(),
+ Typeflag: tar.TypeReg,
+ }); err != nil {
+ return fmt.Errorf("Error writing tar header: %w", err)
+ }
+ n, err = twriter.Write(buf)
+ if err != nil {
+ return fmt.Errorf("Error writing tar header: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("Short write writing tar header: %d < %d", n, len(buf))
+ }
+ if err := twriter.Flush(); err != nil {
+ return fmt.Errorf("Error flushing output to tar archive: %w", err)
+ }
+ *uncompressedCount = uncompressed.Count
+ return nil
+}
+
+type testBlob struct {
+ compressedDigest digest.Digest
+ uncompressedSize int64
+ compressedSize int64
+ data []byte
+}
+
+func makeLayer(t *testing.T, compression archive.Compression) testBlob {
+ preader, pwriter := io.Pipe()
+ var uncompressedCount int64
+ go func() {
+ err := errors.New("Internal error: unexpected panic in makeLayer")
+ defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
+ _ = pwriter.CloseWithError(err)
+ }()
+ err = makeLayerGoroutine(pwriter, &uncompressedCount, compression)
+ }()
+
+ tbuffer := bytes.Buffer{}
+ _, err := io.Copy(&tbuffer, preader)
+ require.NoError(t, err)
+ return testBlob{
+ compressedDigest: digest.SHA256.FromBytes(tbuffer.Bytes()),
+ uncompressedSize: uncompressedCount,
+ compressedSize: int64(tbuffer.Len()),
+ data: tbuffer.Bytes(),
+ }
+}
+
+func (l testBlob) storeBlob(t *testing.T, dest types.ImageDestination, cache types.BlobInfoCache, mimeType string) manifest.Schema2Descriptor {
+ _, err := dest.PutBlob(context.Background(), bytes.NewReader(l.data), types.BlobInfo{
+ Size: l.compressedSize,
+ Digest: l.compressedDigest,
+ }, cache, false)
+ require.NoError(t, err)
+ return manifest.Schema2Descriptor{
+ MediaType: mimeType,
+ Size: l.compressedSize,
+ Digest: l.compressedDigest,
+ }
+}
+
+// ensureTestCanCreateImages skips the current test if it is not possible to create layers and images in a private store.
+func ensureTestCanCreateImages(t *testing.T) {
+ t.Helper()
+ switch runtime.GOOS {
+ case "darwin":
+ return // Due to https://github.com/containers/storage/pull/811 , c/storage can be used on macOS unprivileged.
+ case "linux":
+ if os.Geteuid() != 0 {
+ t.Skip("test requires root privileges on Linux")
+ }
+ default:
+ // Unknown, let’s leave the tests enabled so that this can be investigated when working on that architecture.
+ }
+}
+
+func createUncommittedImageDest(t *testing.T, ref types.ImageReference, cache types.BlobInfoCache,
+ layers []testBlob, config *testBlob) (types.ImageDestination, types.UnparsedImage) {
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+
+ layerDescriptors := []manifest.Schema2Descriptor{}
+ for _, layer := range layers {
+ desc := layer.storeBlob(t, dest, cache, manifest.DockerV2Schema2LayerMediaType)
+ layerDescriptors = append(layerDescriptors, desc)
+ }
+ configDescriptor := manifest.Schema2Descriptor{} // might be good enough
+ if config != nil {
+ configDescriptor = config.storeBlob(t, dest, cache, manifest.DockerV2Schema2ConfigMediaType)
+ }
+
+ manifest := manifest.Schema2FromComponents(configDescriptor, layerDescriptors)
+ manifestBytes, err := manifest.Serialize()
+ require.NoError(t, err)
+ err = dest.PutManifest(context.Background(), manifestBytes, nil)
+ require.NoError(t, err)
+ unparsedToplevel := unparsedImage{
+ imageReference: nil,
+ manifestBytes: manifestBytes,
+ manifestType: manifest.MediaType,
+ signatures: nil,
+ }
+ return dest, &unparsedToplevel
+}
+
+func createImage(t *testing.T, ref types.ImageReference, cache types.BlobInfoCache,
+ layers []testBlob, config *testBlob) {
+ dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache, layers, config)
+ err := dest.Commit(context.Background(), unparsedToplevel)
+ require.NoError(t, err)
+ err = dest.Close()
+ require.NoError(t, err)
+}
+
+func TestWriteRead(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`)
+ config := testBlob{
+ compressedDigest: digest.SHA256.FromBytes(configBytes),
+ uncompressedSize: int64(len(configBytes)),
+ compressedSize: int64(len(configBytes)),
+ data: configBytes,
+ }
+
+ manifests := []string{
+ //`{
+ // "schemaVersion": 2,
+ // "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ // "config": {
+ // "mediaType": "application/vnd.oci.image.serialization.config.v1+json",
+ // "size": %cs,
+ // "digest": "%ch"
+ // },
+ // "layers": [
+ // {
+ // "mediaType": "application/vnd.oci.image.serialization.rootfs.tar.gzip",
+ // "digest": "%lh",
+ // "size": %ls
+ // }
+ // ]
+ //}`,
+ `{
+ "schemaVersion": 1,
+ "name": "test",
+ "tag": "latest",
+ "architecture": "amd64",
+ "fsLayers": [
+ {
+ "blobSum": "%lh"
+ }
+ ],
+ "history": [
+ {
+ "v1Compatibility": "{\"id\":\"%li\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":%ls}"
+ }
+ ]
+ }`,
+ `{
+ "schemaVersion": 2,
+ "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+ "config": {
+ "mediaType": "application/vnd.docker.container.image.v1+json",
+ "size": %cs,
+ "digest": "%ch"
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ "digest": "%lh",
+ "size": %ls
+ }
+ ]
+ }`,
+ }
+ // Start signatures with 0xA0 to fool internal/signature.FromBlob into thinking it is valid GPG
+ signatures := [][]byte{
+ []byte("\xA0Signature A"),
+ []byte("\xA0Signature B"),
+ }
+
+ newStore(t)
+ cache := memory.New()
+
+ ref, err := Transport.ParseReference("test")
+ require.NoError(t, err)
+
+ for _, manifestFmt := range manifests {
+ dest, err := ref.NewImageDestination(context.Background(), nil)
+ require.NoError(t, err)
+ require.Equal(t, ref.StringWithinTransport(), dest.Reference().StringWithinTransport())
+ t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes())
+ err = dest.SupportsSignatures(context.Background())
+ require.NoError(t, err)
+ t.Logf("compress layers: %v", dest.DesiredLayerCompression())
+ compression := archive.Uncompressed
+ if dest.DesiredLayerCompression() == types.Compress {
+ compression = archive.Gzip
+ }
+ layer := makeLayer(t, compression)
+ _ = layer.storeBlob(t, dest, cache, manifest.DockerV2Schema2LayerMediaType)
+ t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", layer.compressedDigest, layer.compressedSize, layer.uncompressedSize)
+ _ = config.storeBlob(t, dest, cache, manifest.DockerV2Schema2ConfigMediaType)
+
+ manifest := strings.ReplaceAll(manifestFmt, "%lh", layer.compressedDigest.String())
+ manifest = strings.ReplaceAll(manifest, "%ch", config.compressedDigest.String())
+ manifest = strings.ReplaceAll(manifest, "%ls", fmt.Sprintf("%d", layer.compressedSize))
+ manifest = strings.ReplaceAll(manifest, "%cs", fmt.Sprintf("%d", config.compressedSize))
+ manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Hex())
+ manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Hex())
+ t.Logf("this manifest is %q", manifest)
+ err = dest.PutManifest(context.Background(), []byte(manifest), nil)
+ require.NoError(t, err)
+ err = dest.PutSignatures(context.Background(), signatures, nil)
+ require.NoError(t, err)
+ unparsedToplevel := unparsedImage{
+ imageReference: nil,
+ manifestBytes: []byte(manifest),
+ manifestType: imanifest.GuessMIMEType([]byte(manifest)),
+ signatures: signatures,
+ }
+ err = dest.Commit(context.Background(), &unparsedToplevel)
+ require.NoError(t, err)
+ err = dest.Close()
+ require.NoError(t, err)
+
+ img, err := ref.NewImage(context.Background(), nil)
+ require.NoError(t, err)
+ imageConfigInfo := img.ConfigInfo()
+ if imageConfigInfo.Digest != "" {
+ blob, err := img.ConfigBlob(context.Background())
+ require.NoError(t, err)
+ sum := digest.SHA256.FromBytes(blob)
+ assert.Equal(t, config.compressedDigest, sum)
+ assert.Len(t, blob, int(config.compressedSize))
+ }
+ layerInfos := img.LayerInfos()
+ assert.NotNil(t, layerInfos)
+ imageInfo, err := img.Inspect(context.Background())
+ require.NoError(t, err)
+ assert.False(t, imageInfo.Created.IsZero())
+
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ if src.Reference().StringWithinTransport() != ref.StringWithinTransport() {
+ // As long as it's only the addition of an ID suffix, that's okay.
+ assert.True(t, strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@"))
+ }
+ _, manifestType, err := src.GetManifest(context.Background(), nil)
+ require.NoError(t, err)
+ t.Logf("this manifest's type appears to be %q", manifestType)
+ instanceDigest, err := imanifest.Digest([]byte(manifest))
+ require.NoError(t, err)
+ retrieved, _, err := src.GetManifest(context.Background(), &instanceDigest)
+ require.NoError(t, err)
+ assert.Equal(t, manifest, string(retrieved))
+ sigs, err := src.GetSignatures(context.Background(), nil)
+ require.NoError(t, err)
+ assert.Equal(t, signatures, sigs)
+ sigs2, err := src.GetSignatures(context.Background(), &instanceDigest)
+ require.NoError(t, err)
+ assert.Equal(t, sigs, sigs2)
+ for _, layerInfo := range layerInfos {
+ buf := bytes.Buffer{}
+ layer, size, err := src.GetBlob(context.Background(), layerInfo, cache)
+ require.NoError(t, err)
+ t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size)
+ hasher := sha256.New()
+ compressed := ioutils.NewWriteCounter(hasher)
+ countedLayer := io.TeeReader(layer, compressed)
+ decompressed, err := archive.DecompressStream(countedLayer)
+ require.NoError(t, err)
+ n, err := io.Copy(&buf, decompressed)
+ require.NoError(t, err)
+ layer.Close()
+ if layerInfo.Size >= 0 {
+ assert.Equal(t, layerInfo.Size, compressed.Count)
+ assert.Equal(t, layerInfo.Size, n)
+ }
+ if size >= 0 {
+ assert.Equal(t, size, compressed.Count)
+ }
+ sum := hasher.Sum(nil)
+ assert.Equal(t, layerInfo.Digest, digest.NewDigestFromBytes(digest.SHA256, sum))
+ }
+ err = src.Close()
+ require.NoError(t, err)
+ err = img.Close()
+ require.NoError(t, err)
+ err = ref.DeleteImage(context.Background(), nil)
+ require.NoError(t, err)
+ }
+}
+
+func TestDuplicateName(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ newStore(t)
+ cache := memory.New()
+
+ ref, err := Transport.ParseReference("test")
+ require.NoError(t, err)
+
+ createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Uncompressed)}, nil)
+ createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil)
+}
+
+func TestDuplicateID(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ newStore(t)
+ cache := memory.New()
+
+ ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+ require.NoError(t, err)
+
+ createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil)
+
+ dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache,
+ []testBlob{makeLayer(t, archive.Gzip)}, nil)
+ err = dest.Commit(context.Background(), unparsedToplevel)
+ require.Error(t, err)
+ assert.ErrorIs(t, err, storage.ErrDuplicateID)
+ err = dest.Close()
+ require.NoError(t, err)
+}
+
+func TestDuplicateNameID(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ newStore(t)
+ cache := memory.New()
+
+ ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+ require.NoError(t, err)
+
+ createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil)
+
+ dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache,
+ []testBlob{makeLayer(t, archive.Gzip)}, nil)
+ err = dest.Commit(context.Background(), unparsedToplevel)
+ require.Error(t, err)
+ assert.ErrorIs(t, err, storage.ErrDuplicateID)
+ err = dest.Close()
+ require.NoError(t, err)
+}
+
+func TestNamespaces(t *testing.T) {
+ newStore(t)
+
+ ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+ if err != nil {
+ t.Fatalf("ParseReference(%q) returned error %v", "test", err)
+ }
+ if ref == nil {
+ t.Fatalf("ParseReference returned nil reference")
+ }
+
+ namespaces := ref.PolicyConfigurationNamespaces()
+ for _, namespace := range namespaces {
+ t.Logf("namespace: %q", namespace)
+ err = Transport.ValidatePolicyConfigurationScope(namespace)
+ if ref == nil {
+ t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err)
+ }
+ }
+ namespace := ref.StringWithinTransport()
+ t.Logf("ref: %q", namespace)
+ err = Transport.ValidatePolicyConfigurationScope(namespace)
+ if err != nil {
+ t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err)
+ }
+ for _, namespace := range []string{
+ "@beefee",
+ ":miracle",
+ ":miracle@beefee",
+ "@beefee:miracle",
+ } {
+ t.Logf("invalid ref: %q", namespace)
+ err = Transport.ValidatePolicyConfigurationScope(namespace)
+ if err == nil {
+ t.Fatalf("ValidatePolicyConfigurationScope(%q) should have failed", namespace)
+ }
+ }
+}
+
+func TestSize(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ newStore(t)
+ cache := memory.New()
+
+ layer1 := makeLayer(t, archive.Gzip)
+ layer2 := makeLayer(t, archive.Gzip)
+ configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`)
+ config := testBlob{
+ compressedDigest: digest.SHA256.FromBytes(configBytes),
+ uncompressedSize: int64(len(configBytes)),
+ compressedSize: int64(len(configBytes)),
+ data: configBytes,
+ }
+
+ ref, err := Transport.ParseReference("test")
+ require.NoError(t, err)
+
+ createImage(t, ref, cache, []testBlob{layer1, layer2}, &config)
+
+ img, err := ref.NewImage(context.Background(), nil)
+ require.NoError(t, err)
+ manifest, _, err := img.Manifest(context.Background())
+ require.NoError(t, err)
+
+ usize, err := img.Size()
+ require.NoError(t, err)
+ require.NotEqual(t, -1, usize)
+
+ assert.Equal(t, config.compressedSize+layer1.uncompressedSize+layer2.uncompressedSize+2*int64(len(manifest)), usize)
+ err = img.Close()
+ require.NoError(t, err)
+}
+
+func TestDuplicateBlob(t *testing.T) {
+ ensureTestCanCreateImages(t)
+
+ newStore(t)
+ cache := memory.New()
+
+ ref, err := Transport.ParseReference("test")
+ require.NoError(t, err)
+
+ layer1 := makeLayer(t, archive.Gzip)
+ layer2 := makeLayer(t, archive.Gzip)
+ configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`)
+ config := testBlob{
+ compressedDigest: digest.SHA256.FromBytes(configBytes),
+ uncompressedSize: int64(len(configBytes)),
+ compressedSize: int64(len(configBytes)),
+ data: configBytes,
+ }
+
+ createImage(t, ref, cache, []testBlob{layer1, layer2, layer1, layer2}, &config)
+
+ img, err := ref.NewImage(context.Background(), nil)
+ require.NoError(t, err)
+ src, err := ref.NewImageSource(context.Background(), nil)
+ require.NoError(t, err)
+ source, ok := src.(*storageImageSource)
+ require.True(t, ok)
+
+ layers := []string{}
+ layersInfo, err := img.LayerInfosForCopy(context.Background())
+ require.NoError(t, err)
+ for _, layerInfo := range layersInfo {
+ digestLayers, _ := source.imageRef.transport.store.LayersByUncompressedDigest(layerInfo.Digest)
+ rc, _, layerID, err := source.getBlobAndLayerID(layerInfo.Digest, digestLayers)
+ require.NoError(t, err)
+ _, err = io.Copy(io.Discard, rc)
+ require.NoError(t, err)
+ rc.Close()
+ layers = append(layers, layerID)
+ }
+ assert.Len(t, layers, 4)
+ for i, layerID := range layers {
+ for j, otherID := range layers {
+ if i != j && layerID == otherID {
+ t.Fatalf("Layer IDs are not unique: %v", layers)
+ }
+ }
+ }
+ err = src.Close()
+ require.NoError(t, err)
+ err = img.Close()
+ require.NoError(t, err)
+}
+
+type unparsedImage struct {
+ imageReference types.ImageReference
+ manifestBytes []byte
+ manifestType string
+ signatures [][]byte
+}
+
+func (u *unparsedImage) Reference() types.ImageReference {
+ return u.imageReference
+}
+func (u *unparsedImage) Manifest(context.Context) ([]byte, string, error) {
+ return u.manifestBytes, u.manifestType, nil
+}
+func (u *unparsedImage) Signatures(context.Context) ([][]byte, error) {
+ return u.signatures, nil
+}
diff --git a/storage/storage_transport.go b/storage/storage_transport.go
new file mode 100644
index 0000000..deb500b
--- /dev/null
+++ b/storage/storage_transport.go
@@ -0,0 +1,416 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ minimumTruncatedIDLength = 3
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+var (
+ // Transport is an ImageTransport that uses either a default
+ // storage.Store or one that's it's explicitly told to use.
+ Transport StoreTransport = &storageTransport{}
+ // ErrInvalidReference is returned when ParseReference() is passed an
+ // empty reference.
+ ErrInvalidReference = errors.New("invalid reference")
+ // ErrPathNotAbsolute is returned when a graph root is not an absolute
+ // path name.
+ ErrPathNotAbsolute = errors.New("path name is not absolute")
+)
+
+// StoreTransport is an ImageTransport that uses a storage.Store to parse
+// references, either its own default or one that it's told to use.
+type StoreTransport interface {
+ types.ImageTransport
+ // SetStore sets the default store for this transport.
+ SetStore(storage.Store)
+ // GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet.
+ GetStoreIfSet() storage.Store
+ // GetImage retrieves the image from the transport's store that's named
+ // by the reference.
+ // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+ // this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+ // can return different images, with no way for the caller to "freeze" the storage.Image identity
+ // without discarding the name entirely.
+ //
+ // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+ // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
+ GetImage(types.ImageReference) (*storage.Image, error)
+ // GetStoreImage retrieves the image from a specified store that's named
+ // by the reference.
+ //
+ // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+ // this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+ // can return different images, with no way for the caller to "freeze" the storage.Image identity
+ // without discarding the name entirely.
+ //
+ // Also, a StoreTransport reference already contains a store, so providing another one is redundant.
+ //
+ // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+ // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
+ GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
+ // ParseStoreReference parses a reference, overriding any store
+ // specification that it may contain.
+ ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
+ // NewStoreReference creates a reference for (named@ID) in store.
+ // either of name or ID can be unset; named must not be a reference.IsNameOnly.
+ NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error)
+ // SetDefaultUIDMap sets the default UID map to use when opening stores.
+ SetDefaultUIDMap(idmap []idtools.IDMap)
+ // SetDefaultGIDMap sets the default GID map to use when opening stores.
+ SetDefaultGIDMap(idmap []idtools.IDMap)
+ // DefaultUIDMap returns the default UID map used when opening stores.
+ DefaultUIDMap() []idtools.IDMap
+ // DefaultGIDMap returns the default GID map used when opening stores.
+ DefaultGIDMap() []idtools.IDMap
+}
+
+type storageTransport struct {
+ store storage.Store
+ defaultUIDMap []idtools.IDMap
+ defaultGIDMap []idtools.IDMap
+}
+
+func (s *storageTransport) Name() string {
+ // Still haven't really settled on a name.
+ return "containers-storage"
+}
+
+// SetStore sets the Store object which the Transport will use for parsing
+// references when information about a Store is not directly specified as part
+// of the reference. If one is not set, the library will attempt to initialize
+// one with default settings when a reference needs to be parsed. Calling
+// SetStore does not affect previously parsed references.
+func (s *storageTransport) SetStore(store storage.Store) {
+ s.store = store
+}
+
+// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet.
+func (s *storageTransport) GetStoreIfSet() storage.Store {
+ return s.store
+}
+
+// SetDefaultUIDMap sets the default UID map to use when opening stores.
+func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) {
+ s.defaultUIDMap = idmap
+}
+
+// SetDefaultGIDMap sets the default GID map to use when opening stores.
+func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) {
+ s.defaultGIDMap = idmap
+}
+
+// DefaultUIDMap returns the default UID map used when opening stores.
+func (s *storageTransport) DefaultUIDMap() []idtools.IDMap {
+ return s.defaultUIDMap
+}
+
+// DefaultGIDMap returns the default GID map used when opening stores.
+func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
+ return s.defaultGIDMap
+}
+
+// ParseStoreReference takes a name or an ID, tries to figure out which it is
+// relative to the given store, and returns it in a reference object.
+func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
+ if ref == "" {
+ return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference)
+ }
+ if ref[0] == '[' {
+ // Ignore the store specifier.
+ closeIndex := strings.IndexRune(ref, ']')
+ if closeIndex < 1 {
+ return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference)
+ }
+ ref = ref[closeIndex+1:]
+ }
+
+ // The reference may end with an image ID. Image IDs and digests use the same "@" separator;
+ // here we only peel away an image ID, and leave digests alone.
+ split := strings.LastIndex(ref, "@")
+ id := ""
+ if split != -1 {
+ possibleID := ref[split+1:]
+ if possibleID == "" {
+ return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference)
+ }
+ // If it looks like a digest, leave it alone for now.
+ if _, err := digest.Parse(possibleID); err != nil {
+ // Otherwise…
+ if err := validateImageID(possibleID); err == nil {
+ id = possibleID // … it is a full ID
+ } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
+ // … it is a truncated version of the ID of an image that's present in local storage,
+ // so we might as well use the expanded value.
+ id = img.ID
+ } else {
+ return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference)
+ }
+ // We have recognized an image ID; peel it off.
+ ref = ref[:split]
+ }
+ }
+
+ // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's
+ // at least of what we guess is a reasonable minimum length, because we don't want a really short value
+ // like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
+ if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") {
+ if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
+ // It's a truncated version of the ID of an image that's present in local storage;
+ // we need to expand it.
+ id = img.ID
+ ref = ""
+ }
+ }
+
+ var named reference.Named
+ // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been
+ // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest..
+ if ref != "" {
+ var err error
+ named, err = reference.ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, fmt.Errorf("parsing named reference %q: %w", ref, err)
+ }
+ named = reference.TagNameOnly(named)
+ }
+
+ result, err := s.NewStoreReference(store, named, id)
+ if err != nil {
+ return nil, err
+ }
+ logrus.Debugf("parsed reference into %q", result.StringWithinTransport())
+ return result, nil
+}
+
+// NewStoreReference creates a reference for (named@ID) in store.
+// either of name or ID can be unset; named must not be a reference.IsNameOnly.
+func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) {
+ return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id)
+}
+
+func (s *storageTransport) GetStore() (storage.Store, error) {
+ // Return the transport's previously-set store. If we don't have one
+ // of those, initialize one now.
+ if s.store == nil {
+ options, err := storage.DefaultStoreOptionsAutoDetectUID()
+ if err != nil {
+ return nil, err
+ }
+ options.UIDMap = s.defaultUIDMap
+ options.GIDMap = s.defaultGIDMap
+ store, err := storage.GetStore(options)
+ if err != nil {
+ return nil, err
+ }
+ s.store = store
+ }
+ return s.store, nil
+}
+
+// ParseReference takes a name and a tag or digest and/or ID
+// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"),
+// possibly prefixed with a store specifier in the form "[_graphroot_]" or
+// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
+// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
+// tries to figure out which it is, and returns it in a reference object.
+// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
+// even be specified as if it were a _name_, value.
+func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
+ var store storage.Store
+ // Check if there's a store location prefix. If there is, then it
+ // needs to match a store that was previously initialized using
+ // storage.GetStore(), or be enough to let the storage library fill out
+ // the rest using knowledge that it has from elsewhere.
+ if len(reference) > 0 && reference[0] == '[' {
+ closeIndex := strings.IndexRune(reference, ']')
+ if closeIndex < 1 {
+ return nil, ErrInvalidReference
+ }
+ storeSpec := reference[1:closeIndex]
+ reference = reference[closeIndex+1:]
+ // Peel off a "driver@" from the start.
+ driverInfo := ""
+ driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
+ if !gotDriver {
+ storeSpec = driverPart1
+ if storeSpec == "" {
+ return nil, ErrInvalidReference
+ }
+ } else {
+ driverInfo = driverPart1
+ if driverInfo == "" {
+ return nil, ErrInvalidReference
+ }
+ storeSpec = driverPart2
+ if storeSpec == "" {
+ return nil, ErrInvalidReference
+ }
+ }
+ // Peel off a ":options" from the end.
+ var options []string
+ storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
+ if gotOptions {
+ options = strings.Split(optionsPart, ",")
+ }
+ // Peel off a "+runroot" from the new end.
+ storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
+ // The rest is our graph root.
+ rootInfo := storeSpec
+ // Check that any paths are absolute paths.
+ if rootInfo != "" && !filepath.IsAbs(rootInfo) {
+ return nil, ErrPathNotAbsolute
+ }
+ if runRootInfo != "" && !filepath.IsAbs(runRootInfo) {
+ return nil, ErrPathNotAbsolute
+ }
+ store2, err := storage.GetStore(storage.StoreOptions{
+ GraphDriverName: driverInfo,
+ GraphRoot: rootInfo,
+ RunRoot: runRootInfo,
+ GraphDriverOptions: options,
+ UIDMap: s.defaultUIDMap,
+ GIDMap: s.defaultGIDMap,
+ })
+ if err != nil {
+ return nil, err
+ }
+ store = store2
+ } else {
+ // We didn't have a store spec, so use the default.
+ store2, err := s.GetStore()
+ if err != nil {
+ return nil, err
+ }
+ store = store2
+ }
+ return s.ParseStoreReference(store, reference)
+}
+
+// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+// can return different images, with no way for the caller to "freeze" the storage.Image identity
+// without discarding the name entirely.
+//
+// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
+//
+// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
+func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
+ dref := ref.DockerReference()
+ if dref != nil {
+ if img, err := store.Image(dref.String()); err == nil {
+ return img, nil
+ }
+ }
+ if sref, ok := ref.(*storageReference); ok {
+ tmpRef := *sref
+ if img, err := tmpRef.resolveImage(nil); err == nil {
+ return img, nil
+ }
+ }
+ return nil, storage.ErrImageUnknown
+}
+
+// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
+// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
+// can return different images, with no way for the caller to "freeze" the storage.Image identity
+// without discarding the name entirely.
+//
+// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
+// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
+func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
+ store, err := s.GetStore()
+ if err != nil {
+ return nil, err
+ }
+ return s.GetStoreImage(store, ref)
+}
+
+func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // Check that there's a store location prefix. Values we're passed are
+ // expected to come from PolicyConfigurationIdentity or
+ // PolicyConfigurationNamespaces, so if there's no store location,
+ // something's wrong.
+ if scope[0] != '[' {
+ return ErrInvalidReference
+ }
+ // Parse the store location prefix.
+ closeIndex := strings.IndexRune(scope, ']')
+ if closeIndex < 1 {
+ return ErrInvalidReference
+ }
+ storeSpec := scope[1:closeIndex]
+ scope = scope[closeIndex+1:]
+ storeInfo := strings.SplitN(storeSpec, "@", 2)
+ if len(storeInfo) == 1 && storeInfo[0] != "" {
+ // One component: the graph root.
+ if !filepath.IsAbs(storeInfo[0]) {
+ return ErrPathNotAbsolute
+ }
+ } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
+ // Two components: the driver type and the graph root.
+ if !filepath.IsAbs(storeInfo[1]) {
+ return ErrPathNotAbsolute
+ }
+ } else {
+ // Anything else: scope specified in a form we don't
+ // recognize.
+ return ErrInvalidReference
+ }
+ // That might be all of it, and that's okay.
+ if scope == "" {
+ return nil
+ }
+
+ fields := strings.SplitN(scope, "@", 3)
+ switch len(fields) {
+ case 1: // name only
+ case 2: // name:tag@ID or name[:tag]@digest
+ if idErr := validateImageID(fields[1]); idErr != nil {
+ if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
+ return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
+ }
+ }
+ case 3: // name[:tag]@digest@ID
+ if _, err := digest.Parse(fields[1]); err != nil {
+ return err
+ }
+ if err := validateImageID(fields[2]); err != nil {
+ return err
+ }
+ default: // Coverage: This should never happen
+ return errors.New("Internal error: unexpected number of fields form strings.SplitN")
+ }
+ // As for field[0], if it is non-empty at all:
+ // FIXME? We could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// validateImageID returns nil if id is a valid (full) image ID, or an error
+func validateImageID(id string) error {
+ _, err := digest.Parse("sha256:" + id)
+ return err
+}
diff --git a/storage/storage_transport_test.go b/storage/storage_transport_test.go
new file mode 100644
index 0000000..091806c
--- /dev/null
+++ b/storage/storage_transport_test.go
@@ -0,0 +1,187 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package storage
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ sha256Digest2 = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+)
+
+func TestTransportName(t *testing.T) {
+ assert.Equal(t, "containers-storage", Transport.Name())
+}
+
+func TestTransportSetGetStore(t *testing.T) {
+ Transport.SetStore(nil)
+ res := Transport.GetStoreIfSet()
+ assert.Nil(t, res)
+ store := newStore(t) // Calls SetStore
+ res = Transport.GetStoreIfSet()
+ assert.Equal(t, store, res)
+ Transport.SetStore(nil)
+}
+
+func TestTransportParseStoreReference(t *testing.T) {
+ const digest3 = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
+
+ store := newStore(t)
+
+ Transport.SetStore(nil)
+ for _, c := range []struct{ input, expectedRef, expectedID string }{
+ {"", "", ""}, // Empty input
+ // Handling of the store prefix
+ // FIXME? Should we be silently discarding input like this?
+ {"[unterminated", "", ""}, // Unterminated store specifier
+ {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference
+
+ {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name
+ {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix, so it looks like a tag
+ // FIXME: This test is now incorrect, this should not fail _if the image ID matches_
+ {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs
+ {"@" + sha256digestHex, "", sha256digestHex}, // Valid single-component ID
+ {"@sha256:" + sha256digestHex, "", ""}, // Invalid un-named @digest
+ // "aaaa", either a valid image ID prefix, or a short form of docker.io/library/aaaa, untested
+ {"sha256:ab", "docker.io/library/sha256:ab", ""}, // Valid single-component name, explicit tag
+ {"busybox", "docker.io/library/busybox:latest", ""}, // Valid single-component name, implicit tag
+ {"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag
+ {"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit
+
+ {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@digestOrID
+ {"busybox@ab", "", ""}, // Invalid ID in name@digestOrID
+ {"busybox@", "", ""}, // Empty ID in name@digestOrID
+ {"busybox@sha256:ab", "", ""}, // Invalid digest in name@digestOrID
+ {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, ""}, // Valid name@digest, no tag
+ {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid name@ID, implicit tag
+ // "busybox@aaaa", a valid image ID prefix, untested
+ {"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid name@ID, explicit tag
+ {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid name@ID, everything explicit
+ {"docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest@" + sha256Digest2, ""}, // Valid name:tag@digest, everything explicit
+
+ {"busybox@sha256:" + sha256digestHex + "@ab", "", ""}, // Invalid ID in name@digest@ID
+ {"busybox@ab@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID
+ {"busybox@@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID
+ {"busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID
+ {"docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID, everything explicit
+ {"docker.io/library/busybox:notlatest@sha256:" + sha256digestHex + "@" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, sha256digestHex}, // name:tag@digest@ID, everything explicit
+ // "busybox@sha256:"+sha256digestHex+"@aaaa", a valid image ID prefix, untested
+ {"busybox:notlatest@" + sha256Digest2 + "@" + digest3 + "@" + sha256digestHex, "", ""}, // name@digest@ID, with name containing another digest
+ } {
+ storageRef, err := Transport.ParseStoreReference(store, c.input)
+ if c.expectedRef == "" && c.expectedID == "" {
+ assert.Error(t, err, c.input)
+ } else {
+ require.NoError(t, err, c.input)
+ assert.Equal(t, store, storageRef.transport.store, c.input)
+ if c.expectedRef == "" {
+ assert.Nil(t, storageRef.named, c.input)
+ } else {
+ dockerRef, err := reference.ParseNormalizedNamed(c.expectedRef)
+ require.NoError(t, err)
+ require.NotNil(t, storageRef.named, c.input)
+ assert.Equal(t, dockerRef.String(), storageRef.named.String())
+ }
+ assert.Equal(t, c.expectedID, storageRef.id, c.input)
+ }
+ }
+}
+
+func TestTransportParseReference(t *testing.T) {
+ store := newStore(t)
+ driver := store.GraphDriverName()
+ root := store.GraphRoot()
+
+ for _, c := range []struct{ prefix, expectedDriver, expectedRoot, expectedRunRoot string }{
+ {"", driver, root, ""}, // Implicit store location prefix
+ {"[unterminated", "", "", ""}, // Unterminated store specifier
+ {"[]", "", "", ""}, // Empty store specifier
+ {"[relative/path]", "", "", ""}, // Non-absolute graph root path
+ {"[" + driver + "@relative/path]", "", "", ""}, // Non-absolute graph root path
+ {"[@" + root + "suffix2]", "", "", ""}, // Empty graph driver
+ {"[" + driver + "@]", "", "", ""}, // Empty root path
+ {"[thisisunknown@" + root + "suffix2]", "", "", ""}, // Unknown graph driver
+ {"[" + root + "suffix1]", "", "", ""}, // A valid root path, but no run dir
+ {"[" + driver + "@" + root + "suffix3+relative/path]", "", "", ""}, // Non-absolute run dir
+ {"[" + driver + "@" + root + "suffix3+" + root + "suffix4]",
+ driver,
+ root + "suffix3",
+ root + "suffix4"}, // A valid root@graph+run set
+ {"[" + driver + "@" + root + "suffix3+" + root + "suffix4:options,options,options]",
+ driver,
+ root + "suffix3",
+ root + "suffix4"}, // A valid root@graph+run+options set
+ } {
+ t.Logf("parsing %q", c.prefix+"busybox")
+ ref, err := Transport.ParseReference(c.prefix + "busybox")
+ if c.expectedDriver == "" {
+ assert.Error(t, err, c.prefix)
+ } else {
+ require.NoError(t, err, c.prefix)
+ storageRef, ok := ref.(*storageReference)
+ require.True(t, ok, c.prefix)
+ assert.Equal(t, c.expectedDriver, storageRef.transport.store.GraphDriverName(), c.prefix)
+ assert.Equal(t, c.expectedRoot, storageRef.transport.store.GraphRoot(), c.prefix)
+ if c.expectedRunRoot != "" {
+ assert.Equal(t, c.expectedRunRoot, storageRef.transport.store.RunRoot(), c.prefix)
+ }
+ }
+ }
+}
+
+func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
+ store := newStore(t)
+ driver := store.GraphDriverName()
+ root := store.GraphRoot()
+ storeSpec := fmt.Sprintf("[%s@%s]", driver, root) // As computed in PolicyConfigurationNamespaces
+
+ // Valid inputs
+ for _, scope := range []string{
+ "[" + root + "suffix1]", // driverlessStoreSpec in PolicyConfigurationNamespaces
+ "[" + driver + "@" + root + "suffix3]", // storeSpec in PolicyConfigurationNamespaces
+ storeSpec + "@" + sha256digestHex, // ID only
+ storeSpec + "docker.io", // Host name only
+ storeSpec + "docker.io/library", // A repository namespace
+ storeSpec + "docker.io/library/busybox", // A repository name
+ storeSpec + "docker.io/library/busybox:notlatest", // name:tag
+ storeSpec + "docker.io/library/busybox:notlatest@" + sha256digestHex, // name@ID
+ storeSpec + "docker.io/library/busybox@" + sha256Digest2, // name@digest
+ storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, // name@digest@ID
+ storeSpec + "docker.io/library/busybox:notlatest@" + sha256Digest2, // name:tag@digest
+ storeSpec + "docker.io/library/busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex, // name:tag@digest@ID
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.NoError(t, err, scope)
+ }
+
+ // Invalid inputs
+ for _, scope := range []string{
+ "busybox", // Unprefixed reference
+ "[unterminated", // Unterminated store specifier
+ "[]", // Empty store specifier
+ "[relative/path]", // Non-absolute graph root path
+ "[" + driver + "@relative/path]", // Non-absolute graph root path
+ // "[thisisunknown@" + root + "suffix2]", // Unknown graph driver FIXME: validate against storage.ListGraphDrivers() once that's available
+ storeSpec + "@", // An incomplete two-component name
+
+ storeSpec + "docker.io/library/busybox@sha256:ab", // Invalid digest in name@digest
+ storeSpec + "docker.io/library/busybox@ab", // Invalid ID in name@ID
+ storeSpec + "docker.io/library/busybox@", // Empty ID/digest in name@ID
+ storeSpec + "docker.io/library/busybox@@" + sha256digestHex, // Empty digest in name@digest@ID
+ storeSpec + "docker.io/library/busybox@ab@" + sha256digestHex, // Invalid digest in name@digest@ID
+ storeSpec + "docker.io/library/busybox@sha256:ab@" + sha256digestHex, // Invalid digest in name@digest@ID
+ storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@", // Empty ID in name@digest@ID
+ storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@ab", // Invalid ID in name@digest@ID
+ } {
+ err := Transport.ValidatePolicyConfigurationScope(scope)
+ assert.Error(t, err, scope)
+ }
+}
diff --git a/tarball/doc.go b/tarball/doc.go
new file mode 100644
index 0000000..064c78b
--- /dev/null
+++ b/tarball/doc.go
@@ -0,0 +1,61 @@
+// Package tarball provides a way to generate images using one or more layer
+// tarballs and an optional template configuration.
+//
+// An example:
+//
+// package main
+//
+// import (
+// "context"
+//
+// cp "github.com/containers/image/v5/copy"
+// "github.com/containers/image/v5/signature"
+// "github.com/containers/image/v5/tarball"
+// "github.com/containers/image/v5/transports/alltransports"
+// "github.com/containers/image/v5/types"
+// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+// )
+//
+// func imageFromTarball() {
+// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
+// // - or -
+// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
+// if err != nil {
+// panic(err)
+// }
+// updater, ok := src.(tarball.ConfigUpdater)
+// if !ok {
+// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater")
+// }
+// config := imgspecv1.Image{
+// Config: imgspecv1.ImageConfig{
+// Cmd: []string{"/bin/bash"},
+// },
+// }
+// annotations := make(map[string]string)
+// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache"
+// err = updater.ConfigUpdate(config, annotations)
+// if err != nil {
+// panic(err)
+// }
+// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest")
+// if err != nil {
+// panic(err)
+// }
+//
+// policy, err := signature.DefaultPolicy(nil)
+// if err != nil {
+// panic(err)
+// }
+//
+// pc, err := signature.NewPolicyContext(policy)
+// if err != nil {
+// panic(err)
+// }
+// defer pc.Destroy()
+// _, err = cp.Image(context.TODO(), pc, dest, src, nil)
+// if err != nil {
+// panic(err)
+// }
+// }
+package tarball
diff --git a/tarball/tarball_reference.go b/tarball/tarball_reference.go
new file mode 100644
index 0000000..d5578d9
--- /dev/null
+++ b/tarball/tarball_reference.go
@@ -0,0 +1,82 @@
+package tarball
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/internal/image"
+ "github.com/containers/image/v5/types"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/maps"
+)
+
+// ConfigUpdater is an interface that ImageReferences for "tarball" images also
+// implement. It can be used to set values for a configuration, and to set
+// image annotations which will be present in the images returned by the
+// reference's NewImage() or NewImageSource() methods.
+type ConfigUpdater interface {
+ ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error
+}
+
+type tarballReference struct {
+ config imgspecv1.Image
+ annotations map[string]string
+ filenames []string
+ stdin []byte
+}
+
+// ConfigUpdate updates the image's default configuration and adds annotations
+// which will be visible in source images created using this reference.
+func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error {
+ r.config = config
+ if r.annotations == nil {
+ r.annotations = make(map[string]string)
+ }
+ maps.Copy(r.annotations, annotations)
+ return nil
+}
+
+func (r *tarballReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+func (r *tarballReference) StringWithinTransport() string {
+ return strings.Join(r.filenames, ":")
+}
+
+func (r *tarballReference) DockerReference() reference.Named {
+ return nil
+}
+
+func (r *tarballReference) PolicyConfigurationIdentity() string {
+ return ""
+}
+
+func (r *tarballReference) PolicyConfigurationNamespaces() []string {
+ return nil
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ return image.FromReference(ctx, sys, r)
+}
+
+func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ for _, filename := range r.filenames {
+ if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("error removing %q: %w", filename, err)
+ }
+ }
+ return nil
+}
+
+func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`)
+}
diff --git a/tarball/tarball_src.go b/tarball/tarball_src.go
new file mode 100644
index 0000000..6f9bfaf
--- /dev/null
+++ b/tarball/tarball_src.go
@@ -0,0 +1,234 @@
+package tarball
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/internal/imagesource/impl"
+ "github.com/containers/image/v5/internal/imagesource/stubs"
+ "github.com/containers/image/v5/types"
+ "github.com/klauspost/pgzip"
+ digest "github.com/opencontainers/go-digest"
+ imgspecs "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/exp/maps"
+)
+
+type tarballImageSource struct {
+ impl.Compat
+ impl.PropertyMethodsInitialize
+ impl.NoSignatures
+ impl.DoesNotAffectLayerInfosForCopy
+ stubs.NoGetBlobAtInitialize
+
+ reference tarballReference
+ blobs map[digest.Digest]tarballBlob
+ manifest []byte
+}
+
+// tarballBlob is a blob that tarballImagSource can return by GetBlob.
+type tarballBlob struct {
+ contents []byte // or nil to read from filename below
+ filename string // valid if contents == nil
+ size int64
+}
+
+func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ // Pick up the layer comment from the configuration's history list, if one is set.
+ comment := "imported from tarball"
+ if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
+ comment = r.config.History[0].Comment
+ }
+
+ // Gather up the digests, sizes, and history information for all of the files.
+ blobs := map[digest.Digest]tarballBlob{}
+ diffIDs := []digest.Digest{}
+ created := time.Time{}
+ history := []imgspecv1.History{}
+ layerDescriptors := []imgspecv1.Descriptor{}
+ for _, filename := range r.filenames {
+ var reader io.Reader
+ var blobTime time.Time
+ var blob tarballBlob
+ if filename == "-" {
+ reader = bytes.NewReader(r.stdin)
+ blobTime = time.Now()
+ blob = tarballBlob{
+ contents: r.stdin,
+ size: int64(len(r.stdin)),
+ }
+ } else {
+ file, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ reader = file
+ fileinfo, err := file.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
+ }
+ blobTime = fileinfo.ModTime()
+ blob = tarballBlob{
+ filename: filename,
+ size: fileinfo.Size(),
+ }
+ }
+
+ // Default to assuming the layer is compressed.
+ layerType := imgspecv1.MediaTypeImageLayerGzip
+
+ // Set up to digest the file as it is.
+ blobIDdigester := digest.Canonical.Digester()
+ reader = io.TeeReader(reader, blobIDdigester.Hash())
+
+ // Set up to digest the file after we maybe decompress it.
+ diffIDdigester := digest.Canonical.Digester()
+ uncompressed, err := pgzip.NewReader(reader)
+ if err == nil {
+ // It is compressed, so the diffID is the digest of the uncompressed version
+ reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
+ } else {
+ // It is not compressed, so the diffID and the blobID are going to be the same
+ diffIDdigester = blobIDdigester
+ layerType = imgspecv1.MediaTypeImageLayer
+ uncompressed = nil
+ }
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ if _, err := io.Copy(io.Discard, reader); err != nil {
+ return nil, fmt.Errorf("error reading %q: %v", filename, err)
+ }
+ if uncompressed != nil {
+ uncompressed.Close()
+ }
+
+ // Grab our uncompressed and possibly-compressed digests and sizes.
+ diffID := diffIDdigester.Digest()
+ blobID := blobIDdigester.Digest()
+ diffIDs = append(diffIDs, diffID)
+ blobs[blobID] = blob
+
+ history = append(history, imgspecv1.History{
+ Created: &blobTime,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
+ Comment: comment,
+ })
+ // Use the mtime of the most recently modified file as the image's creation time.
+ if created.Before(blobTime) {
+ created = blobTime
+ }
+
+ layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
+ Digest: blobID,
+ Size: blob.size,
+ MediaType: layerType,
+ })
+ }
+
+ // Pick up other defaults from the config in the reference.
+ config := r.config
+ if config.Created == nil {
+ config.Created = &created
+ }
+ if config.Architecture == "" {
+ config.Architecture = runtime.GOARCH
+ }
+ if config.OS == "" {
+ config.OS = runtime.GOOS
+ }
+ config.RootFS = imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
+ config.History = history
+
+ // Encode and digest the image configuration blob.
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
+ }
+ configID := digest.Canonical.FromBytes(configBytes)
+ blobs[configID] = tarballBlob{
+ contents: configBytes,
+ size: int64(len(configBytes)),
+ }
+
+ // Populate a manifest with the configuration blob and the layers.
+ manifest := imgspecv1.Manifest{
+ Versioned: imgspecs.Versioned{
+ SchemaVersion: 2,
+ },
+ Config: imgspecv1.Descriptor{
+ Digest: configID,
+ Size: int64(len(configBytes)),
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ },
+ Layers: layerDescriptors,
+ Annotations: maps.Clone(r.annotations),
+ }
+
+ // Encode the manifest.
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
+ }
+
+ // Return the image.
+ src := &tarballImageSource{
+ PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
+ HasThreadSafeGetBlob: false,
+ }),
+ NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
+
+ reference: *r,
+ blobs: blobs,
+ manifest: manifestBytes,
+ }
+ src.Compat = impl.AddCompat(src)
+
+ return src, nil
+}
+
+func (is *tarballImageSource) Close() error {
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ blob, ok := is.blobs[blobinfo.Digest]
+ if !ok {
+ return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
+ }
+ if blob.contents != nil {
+ return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil
+ }
+ reader, err := os.Open(blob.filename)
+ if err != nil {
+ return nil, -1, err
+ }
+ return reader, blob.size, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
+ }
+ return is.manifest, imgspecv1.MediaTypeImageManifest, nil
+}
+
+func (is *tarballImageSource) Reference() types.ImageReference {
+ return &is.reference
+}
diff --git a/tarball/tarball_src_test.go b/tarball/tarball_src_test.go
new file mode 100644
index 0000000..5d9f48c
--- /dev/null
+++ b/tarball/tarball_src_test.go
@@ -0,0 +1,5 @@
+package tarball
+
+import "github.com/containers/image/v5/internal/private"
+
+var _ private.ImageSource = (*tarballImageSource)(nil)
diff --git a/tarball/tarball_transport.go b/tarball/tarball_transport.go
new file mode 100644
index 0000000..63d8355
--- /dev/null
+++ b/tarball/tarball_transport.go
@@ -0,0 +1,75 @@
+package tarball
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+const (
+ transportName = "tarball"
+ separator = ":"
+)
+
+var (
+ // Transport implements the types.ImageTransport interface for "tarball:" images,
+ // which are makeshift images constructed using one or more possibly-compressed tar
+ // archives.
+ Transport = &tarballTransport{}
+)
+
+type tarballTransport struct {
+}
+
+func (t *tarballTransport) Name() string {
+ return transportName
+}
+
+func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) {
+ var stdin []byte
+ var err error
+ filenames := strings.Split(reference, separator)
+ for _, filename := range filenames {
+ if filename == "-" {
+ stdin, err = io.ReadAll(os.Stdin)
+ if err != nil {
+ return nil, fmt.Errorf("error buffering stdin: %v", err)
+ }
+ continue
+ }
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, fmt.Errorf("error opening %q: %v", filename, err)
+ }
+ f.Close()
+ }
+ return NewReference(filenames, stdin)
+}
+
+// NewReference creates a new "tarball:" reference for the listed fileNames.
+// If any of the fileNames is "-", the contents of stdin are used instead.
+func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) {
+ for _, path := range fileNames {
+ if strings.Contains(path, separator) {
+ return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator)
+ }
+ }
+ return &tarballReference{
+ filenames: fileNames,
+ stdin: stdin,
+ }, nil
+}
+
+func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // See the explanation in daemonReference.PolicyConfigurationIdentity.
+ return errors.New(`tarball: does not support any scopes except the default "" one`)
+}
+
+func init() {
+ transports.Register(Transport)
+}
diff --git a/transports/alltransports/alltransports.go b/transports/alltransports/alltransports.go
new file mode 100644
index 0000000..a8f1c13
--- /dev/null
+++ b/transports/alltransports/alltransports.go
@@ -0,0 +1,49 @@
+package alltransports
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+
+ // Register all known transports.
+ // NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating
+ // a transport.
+ _ "github.com/containers/image/v5/directory"
+ _ "github.com/containers/image/v5/docker"
+ _ "github.com/containers/image/v5/docker/archive"
+ _ "github.com/containers/image/v5/oci/archive"
+ _ "github.com/containers/image/v5/oci/layout"
+ _ "github.com/containers/image/v5/openshift"
+ _ "github.com/containers/image/v5/sif"
+ _ "github.com/containers/image/v5/tarball"
+ // The docker-daemon transport is registeredy by docker_daemon*.go
+ // The ostree transport is registered by ostree*.go
+ // The storage transport is registered by storage*.go
+)
+
+// ParseImageName converts a URL-like image name to a types.ImageReference.
+func ParseImageName(imgName string) (types.ImageReference, error) {
+ // Keep this in sync with TransportFromImageName!
+ transportName, withinTransport, valid := strings.Cut(imgName, ":")
+ if !valid {
+ return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
+ }
+ transport := transports.Get(transportName)
+ if transport == nil {
+ return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
+ }
+ return transport.ParseReference(withinTransport)
+}
+
+// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
+// the transport is unknown or when the input is invalid.
+func TransportFromImageName(imageName string) types.ImageTransport {
+ // Keep this in sync with ParseImageName!
+ transportName, _, valid := strings.Cut(imageName, ":")
+ if valid {
+ return transports.Get(transportName)
+ }
+ return nil
+}
diff --git a/transports/alltransports/alltransports_test.go b/transports/alltransports/alltransports_test.go
new file mode 100644
index 0000000..1a5d177
--- /dev/null
+++ b/transports/alltransports/alltransports_test.go
@@ -0,0 +1,63 @@
+package alltransports
+
+import (
+ "testing"
+
+ "github.com/containers/image/v5/directory"
+ "github.com/containers/image/v5/transports"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseImageName(t *testing.T) {
+ // This primarily tests error handling, TestImageNameHandling is a table-driven
+ // test for the expected values.
+ for _, name := range []string{
+ "", // Empty
+ "busybox", // No transport name
+ ":busybox", // Empty transport name
+ "docker:", // Empty transport reference
+ } {
+ _, err := ParseImageName(name)
+ assert.Error(t, err, name)
+ }
+}
+
+// A table-driven test summarizing the various transports' behavior.
+func TestImageNameHandling(t *testing.T) {
+ // Always registered transports
+ for _, c := range []struct{ transport, input, roundtrip string }{
+ {"dir", "/etc", "/etc"},
+ {"docker", "//busybox", "//busybox:latest"},
+ {"docker", "//busybox:notlatest", "//busybox:notlatest"}, // This also tests handling of multiple ":" characters
+ {"docker-archive", "/var/lib/oci/busybox.tar:busybox:latest", "/var/lib/oci/busybox.tar:docker.io/library/busybox:latest"},
+ {"docker-archive", "busybox.tar:busybox:latest", "busybox.tar:docker.io/library/busybox:latest"},
+ {"oci", "/etc:someimage", "/etc:someimage"},
+ {"oci", "/etc:someimage:mytag", "/etc:someimage:mytag"},
+ {"oci-archive", "/etc:someimage", "/etc:someimage"},
+ {"oci-archive", "/etc:someimage:mytag", "/etc:someimage:mytag"},
+ // "atomic" not tested here because it depends on per-user configuration for the default cluster.
+ // "containers-storage" not tested here because it needs to initialize various directories on the fs.
+ } {
+ fullInput := c.transport + ":" + c.input
+ ref, err := ParseImageName(fullInput)
+ require.NoError(t, err, fullInput)
+ s := transports.ImageName(ref)
+ assert.Equal(t, c.transport+":"+c.roundtrip, s, fullInput)
+ }
+
+ // Possibly stubbed-out transports: Only verify that something is registered.
+ for _, c := range []string{"docker-daemon", "ostree"} {
+ transport := transports.Get(c)
+ assert.NotNil(t, transport, c)
+ }
+}
+
+func TestTransportFromImageName(t *testing.T) {
+ dirTransport := TransportFromImageName("dir:/tmp/test")
+ assert.Equal(t, dirTransport.Name(), directory.Transport.Name())
+ unknownTransport := TransportFromImageName("unknown:ref:test")
+ assert.Equal(t, unknownTransport, nil)
+ invalidName := TransportFromImageName("unknown")
+ assert.Equal(t, invalidName, nil)
+}
diff --git a/transports/alltransports/docker_daemon.go b/transports/alltransports/docker_daemon.go
new file mode 100644
index 0000000..ffac6e0
--- /dev/null
+++ b/transports/alltransports/docker_daemon.go
@@ -0,0 +1,9 @@
+//go:build !containers_image_docker_daemon_stub
+// +build !containers_image_docker_daemon_stub
+
+package alltransports
+
+import (
+ // Register the docker-daemon transport
+ _ "github.com/containers/image/v5/docker/daemon"
+)
diff --git a/transports/alltransports/docker_daemon_stub.go b/transports/alltransports/docker_daemon_stub.go
new file mode 100644
index 0000000..ddc347b
--- /dev/null
+++ b/transports/alltransports/docker_daemon_stub.go
@@ -0,0 +1,10 @@
+//go:build containers_image_docker_daemon_stub
+// +build containers_image_docker_daemon_stub
+
+package alltransports
+
+import "github.com/containers/image/v5/transports"
+
+func init() {
+ transports.Register(transports.NewStubTransport("docker-daemon"))
+}
diff --git a/transports/alltransports/ostree.go b/transports/alltransports/ostree.go
new file mode 100644
index 0000000..2340702
--- /dev/null
+++ b/transports/alltransports/ostree.go
@@ -0,0 +1,9 @@
+//go:build containers_image_ostree && linux
+// +build containers_image_ostree,linux
+
+package alltransports
+
+import (
+ // Register the ostree transport
+ _ "github.com/containers/image/v5/ostree"
+)
diff --git a/transports/alltransports/ostree_stub.go b/transports/alltransports/ostree_stub.go
new file mode 100644
index 0000000..8c41751
--- /dev/null
+++ b/transports/alltransports/ostree_stub.go
@@ -0,0 +1,10 @@
+//go:build !containers_image_ostree || !linux
+// +build !containers_image_ostree !linux
+
+package alltransports
+
+import "github.com/containers/image/v5/transports"
+
+func init() {
+ transports.Register(transports.NewStubTransport("ostree"))
+}
diff --git a/transports/alltransports/storage.go b/transports/alltransports/storage.go
new file mode 100644
index 0000000..1e399cd
--- /dev/null
+++ b/transports/alltransports/storage.go
@@ -0,0 +1,9 @@
+//go:build !containers_image_storage_stub
+// +build !containers_image_storage_stub
+
+package alltransports
+
+import (
+ // Register the storage transport
+ _ "github.com/containers/image/v5/storage"
+)
diff --git a/transports/alltransports/storage_stub.go b/transports/alltransports/storage_stub.go
new file mode 100644
index 0000000..3080266
--- /dev/null
+++ b/transports/alltransports/storage_stub.go
@@ -0,0 +1,10 @@
+//go:build containers_image_storage_stub
+// +build containers_image_storage_stub
+
+package alltransports
+
+import "github.com/containers/image/v5/transports"
+
+func init() {
+ transports.Register(transports.NewStubTransport("containers-storage"))
+}
diff --git a/transports/stub.go b/transports/stub.go
new file mode 100644
index 0000000..2c186a9
--- /dev/null
+++ b/transports/stub.go
@@ -0,0 +1,36 @@
+package transports
+
+import (
+ "fmt"
+
+ "github.com/containers/image/v5/types"
+)
+
+// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+type stubTransport string
+
+// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+func NewStubTransport(name string) types.ImageTransport {
+ return stubTransport(name)
+}
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (s stubTransport) Name() string {
+ return string(s)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s))
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // Allowing any reference in here allows tools with some transports stubbed-out to still
+ // use signature verification policies which refer to these stubbed-out transports.
+ // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON .
+ return nil
+}
diff --git a/transports/stub_test.go b/transports/stub_test.go
new file mode 100644
index 0000000..f181a1a
--- /dev/null
+++ b/transports/stub_test.go
@@ -0,0 +1,18 @@
+package transports
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStubTransport(t *testing.T) {
+ const name = "whatever"
+
+ s := NewStubTransport(name)
+ assert.Equal(t, name, s.Name())
+ _, err := s.ParseReference("this is rejected regardless of content")
+ assert.Error(t, err)
+ err = s.ValidatePolicyConfigurationScope("this is accepted regardless of content")
+ assert.NoError(t, err)
+}
diff --git a/transports/transports.go b/transports/transports.go
new file mode 100644
index 0000000..834f33b
--- /dev/null
+++ b/transports/transports.go
@@ -0,0 +1,90 @@
+package transports
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/containers/image/v5/internal/set"
+ "github.com/containers/image/v5/types"
+)
+
+// knownTransports is a registry of known ImageTransport instances.
+type knownTransports struct {
+ transports map[string]types.ImageTransport
+ mu sync.Mutex
+}
+
+func (kt *knownTransports) Get(k string) types.ImageTransport {
+ kt.mu.Lock()
+ t := kt.transports[k]
+ kt.mu.Unlock()
+ return t
+}
+
+func (kt *knownTransports) Remove(k string) {
+ kt.mu.Lock()
+ delete(kt.transports, k)
+ kt.mu.Unlock()
+}
+
+func (kt *knownTransports) Add(t types.ImageTransport) {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ name := t.Name()
+ if t := kt.transports[name]; t != nil {
+ panic(fmt.Sprintf("Duplicate image transport name %s", name))
+ }
+ kt.transports[name] = t
+}
+
+var kt *knownTransports
+
+func init() {
+ kt = &knownTransports{
+ transports: make(map[string]types.ImageTransport),
+ }
+}
+
+// Get returns the transport specified by name or nil when unavailable.
+func Get(name string) types.ImageTransport {
+ return kt.Get(name)
+}
+
+// Delete deletes a transport from the registered transports.
+func Delete(name string) {
+ kt.Remove(name)
+}
+
+// Register registers a transport.
+func Register(t types.ImageTransport) {
+ kt.Add(t)
+}
+
+// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that
+// ParseImageName(ImageName(reference)) returns an equivalent reference.
+//
+// This is the generally recommended way to refer to images in the UI.
+//
+// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
+// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+func ImageName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.StringWithinTransport()
+}
+
+var deprecatedTransports = set.NewWithValues("atomic")
+
+// ListNames returns a list of non deprecated transport names.
+// Deprecated transports can be used, but are not presented to users.
+func ListNames() []string {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ var names []string
+ for _, transport := range kt.transports {
+ if !deprecatedTransports.Contains(transport.Name()) {
+ names = append(names, transport.Name())
+ }
+ }
+ sort.Strings(names)
+ return names
+}
diff --git a/types/types.go b/types/types.go
new file mode 100644
index 0000000..180a98c
--- /dev/null
+++ b/types/types.go
@@ -0,0 +1,717 @@
+package types
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/containers/image/v5/docker/reference"
+ compression "github.com/containers/image/v5/pkg/compression/types"
+ digest "github.com/opencontainers/go-digest"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageTransport is a top-level namespace for ways to store/load an image.
+// It should generally correspond to ImageSource/ImageDestination implementations.
+//
+// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
+// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS
+// (or, even, IPv4 or IPv6).
+//
+// OTOH all images using the same transport should (apart from versions of the image format), be interoperable.
+// For example, several different ImageTransport implementations may be based on local filesystem paths,
+// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...)
+//
+// See also transports.KnownTransports.
+type ImageTransport interface {
+ // Name returns the name of the transport, which must be unique among other transports.
+ Name() string
+ // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+ ParseReference(reference string) (ImageReference, error)
+ // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+ // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+ // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+ // scope passed to this function will not be "", that value is always allowed.
+ ValidatePolicyConfigurationScope(scope string) error
+}
+
+// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport.
+//
+// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening
+// within an ImageTransport.ParseReference() or equivalent API creating the reference object.
+// That's also why the various identification/formatting methods of this type do not support returning errors.
+//
+// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside
+// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on.
+type ImageReference interface {
+ Transport() ImageTransport
+ // StringWithinTransport returns a string representation of the reference, which MUST be such that
+ // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+ // NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+ // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
+ // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+ // instead, see transports.ImageName().
+ StringWithinTransport() string
+
+ // DockerReference returns a Docker reference associated with this reference
+ // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+ // not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+ DockerReference() reference.Named
+
+ // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+ // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+ // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+ // (i.e. various references with exactly the same semantics should return the same configuration identity)
+ // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+ // not required/guaranteed that it will be a valid input to Transport().ParseReference().
+ // Returns "" if configuration identities for these references are not supported.
+ PolicyConfigurationIdentity() string
+
+ // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+ // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+ // in order, terminating on first match, and an implicit "" is always checked at the end.
+ // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+ // and each following element to be a prefix of the element preceding it.
+ PolicyConfigurationNamespaces() []string
+
+ // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+ // The caller must call .Close() on the returned ImageCloser.
+ // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+ // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+ // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+ NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error)
+ // NewImageSource returns a types.ImageSource for this reference.
+ // The caller must call .Close() on the returned ImageSource.
+ NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error)
+ // NewImageDestination returns a types.ImageDestination for this reference.
+ // The caller must call .Close() on the returned ImageDestination.
+ NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error)
+
+ // DeleteImage deletes the named image from the registry, if supported.
+ DeleteImage(ctx context.Context, sys *SystemContext) error
+}
+
+// LayerCompression indicates if layers must be compressed, decompressed or preserved
+type LayerCompression int
+
+const (
+ // PreserveOriginal indicates the layer must be preserved, ie
+ // no compression or decompression.
+ PreserveOriginal LayerCompression = iota
+ // Decompress indicates the layer must be decompressed
+ Decompress
+ // Compress indicates the layer must be compressed
+ Compress
+)
+
+// LayerCrypto indicates if layers have been encrypted or decrypted or none
+type LayerCrypto int
+
+const (
+ // PreserveOriginalCrypto indicates the layer must be preserved, ie
+ // no encryption/decryption
+ PreserveOriginalCrypto LayerCrypto = iota
+ // Encrypt indicates the layer is encrypted
+ Encrypt
+ // Decrypt indicates the layer is decrypted
+ Decrypt
+)
+
+// BlobInfo collects known information about a blob (layer/config).
+// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
+type BlobInfo struct {
+ Digest digest.Digest // "" if unknown.
+ Size int64 // -1 if unknown
+ URLs []string
+ Annotations map[string]string
+ MediaType string
+
+ // NOTE: The following fields contain desired _edits_ to blob infos.
+ // Conceptually then don't belong in the BlobInfo object at all;
+ // the edits should be provided specifically as parameters to the edit implementation.
+ // We can’t remove the fields without breaking compatibility, but don’t
+ // add any more.
+
+ // CompressionOperation is used in Image.UpdateLayerInfos to instruct
+ // whether the original layer's "compressed or not" should be preserved,
+ // possibly while changing the compression algorithm from one to another,
+ // or if it should be compressed or decompressed. The field defaults to
+ // preserve the original layer's compressedness.
+ // TODO: To remove together with CryptoOperation in re-design to remove
+ // field out of BlobInfo.
+ CompressionOperation LayerCompression
+ // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
+ // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
+ // set when `CompressionOperation == Compress` and MAY be set when
+ // `CompressionOperation == PreserveOriginal` and the compression type is
+ // being changed for an already-compressed layer.
+ CompressionAlgorithm *compression.Algorithm
+ // CryptoOperation is used in Image.UpdateLayerInfos to instruct
+ // whether the original layer was encrypted/decrypted
+ // TODO: To remove together with CompressionOperation in re-design to
+ // remove field out of BlobInfo.
+ CryptoOperation LayerCrypto
+ // Before adding any fields to this struct, read the NOTE above.
+}
+
+// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
+// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest).
+// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICTransportScope struct {
+ Opaque string
+}
+
+// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
+// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
+// can look it up using BlobInfoCache.CandidateLocations.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICLocationReference struct {
+ Opaque string
+}
+
+// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
+type BICReplacementCandidate struct {
+ Digest digest.Digest
+ Location BICLocationReference
+}
+
+// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
+//
+// It records two kinds of data:
+//
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+//
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
+//
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
+//
+// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
+// users of the cache should just fall back to copying the blobs the usual way.
+//
+// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by
+// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own.
+type BlobInfoCache interface {
+ // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+ // May return anyDigest if it is known to be uncompressed.
+ // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+ UncompressedDigest(anyDigest digest.Digest) digest.Digest
+ // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+ // It’s allowed for anyDigest == uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+ // and can be reused given the opaque location data.
+ RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
+ // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+ // within the specified (transport scope) (if they still exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
+ // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+ // uncompressed digest.
+ CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
+}
+
+// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
+// This is primarily useful for copying images around; for examining their properties, Image (below)
+// is usually more useful.
+// Each ImageSource should eventually be closed by calling Close().
+//
+// WARNING: Various methods which return an object identified by digest generally do not
+// validate that the returned data actually matches that digest; this is the caller’s responsibility.
+type ImageSource interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageSource, if any.
+ Close() error
+ // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+ // It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+ // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+ GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
+ // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+ // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+ GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob() bool
+ // GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
+ // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+ // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+ // to read the image's layers.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+ // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+ // (e.g. if the source never returns manifest lists).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error)
+}
+
+// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
+//
+// There is a specific required order for some of the calls:
+// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
+// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
+// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
+//
+// Each ImageDestination should eventually be closed by calling Close().
+type ImageDestination interface {
+ // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+ // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageDestination, if any.
+ Close() error
+
+ // SupportedManifestMIMETypes tells which manifest mime types the destination supports
+ // If an empty slice or nil it's returned, then any mime type can be tried to upload
+ SupportedManifestMIMETypes() []string
+ // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+ // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+ SupportsSignatures(ctx context.Context) error
+ // DesiredLayerCompression indicates the kind of compression to apply on layers
+ DesiredLayerCompression() LayerCompression
+ // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs() bool
+ // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
+ MustMatchRuntimeOS() bool
+ // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
+ // and would prefer to receive an unmodified manifest instead of one modified for the destination.
+ // Does not make a difference if Reference().DockerReference() is nil.
+ IgnoresEmbeddedDockerReference() bool
+
+ // PutBlob writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // May update cache.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+ PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+ HasThreadSafePutBlob() bool
+ // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+ // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+ // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+ // reflected in the manifest that will be written.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ // May use and/or update cache.
+ TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
+ // PutManifest writes manifest to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
+ // by `manifest.Digest()`.
+ // FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+ // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+ // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+ PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error
+ // PutSignatures writes a set of signatures to the destination.
+ // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
+ // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
+ // MUST be called after PutManifest (signatures may reference manifest contents).
+ PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
+ // Commit marks the process of storing the image as successful and asks for the image to be persisted.
+ // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+ // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+ // original manifest list digest, if desired.
+ // WARNING: This does not have any transactional semantics:
+ // - Uploaded data MAY be visible to others before Commit() is called
+ // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+ Commit(ctx context.Context, unparsedToplevel UnparsedImage) error
+}
+
+// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available,
+// refuses specifically this manifest type, but may accept a different manifest type.
+type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
+ Err error
+}
+
+func (e ManifestTypeRejectedError) Error() string {
+ return e.Err.Error()
+}
+
+// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs.
+// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them,
+// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else.
+// This also makes the UnparsedImage→Image conversion an explicitly visible step.
+//
+// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
+type UnparsedImage interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+ Manifest(ctx context.Context) ([]byte, string, error)
+ // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+ Signatures(ctx context.Context) ([][]byte, error)
+}
+
+// Image is the primary API for inspecting properties of images.
+// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
+//
+// The Image must not be used after the underlying ImageSource is Close()d.
+type Image interface {
+ // Note that Reference may return nil in the return value of UpdatedImage!
+ UnparsedImage
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob(context.Context) ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig(context.Context) (*v1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []BlobInfo
+ // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
+ // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfosForCopy(context.Context) ([]BlobInfo, error)
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+ Inspect(context.Context) (*ImageInspectInfo, error)
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
+ // This does not change the state of the original Image object.
+ // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if
+ // manifests of type options.ManifestMIMEType can not include layers that are compressed
+ // in accordance with the CompressionOperation and CompressionAlgorithm specified in one
+ // or more options.LayerInfos items, though retrying with a different
+ // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm
+ // values might succeed.
+ UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
+ // SupportsEncryption returns an indicator that the image supports encryption
+ //
+ // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
+ // the process of updating a manifest between different manifest types was to update then convert.
+ // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
+ SupportsEncryption(ctx context.Context) bool
+ // Size returns an approximation of the amount of disk space which is consumed by the image in its current
+ // location. If the size is not known, -1 will be returned.
+ Size() (int64, error)
+}
+
+// ImageCloser is an Image with a Close() method which must be called by the user.
+// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource,
+// to ensure that the ImageSource is closed.
+type ImageCloser interface {
+ Image
+ // Close removes resources associated with an initialized ImageCloser.
+ Close() error
+}
+
+// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
+type ManifestUpdateOptions struct {
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
+ EmbeddedDockerReference reference.Named
+ ManifestMIMEType string
+ // The values below are NOT requests to modify the image; they provide optional context which may or may not be used.
+ InformationOnly ManifestUpdateInformation
+}
+
+// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
+// only to make writing struct literals possible.
+type ManifestUpdateInformation struct {
+ Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
+ LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
+}
+
+// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.
+// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported
+// for other manifest types.
+type ImageInspectInfo struct {
+ Tag string
+ Created *time.Time
+ DockerVersion string
+ Labels map[string]string
+ Architecture string
+ Variant string
+ Os string
+ Layers []string
+ LayersData []ImageInspectLayer
+ Env []string
+ Author string
+}
+
+// ImageInspectLayer is a set of metadata describing an image layers' detail
+type ImageInspectLayer struct {
+ MIMEType string // "" if unknown.
+ Digest digest.Digest
+ Size int64 // -1 if unknown.
+ Annotations map[string]string
+}
+
+// DockerAuthConfig contains authorization information for connecting to a registry.
+// the value of Username and Password can be empty for accessing the registry anonymously
+type DockerAuthConfig struct {
+ Username string
+ Password string
+ // IdentityToken can be used as an refresh_token in place of username and
+ // password to obtain the bearer/access token in oauth2 flow. If identity
+ // token is set, password should not be set.
+ // Ref: https://docs.docker.com/registry/spec/auth/oauth/
+ IdentityToken string
+}
+
+// OptionalBool is a boolean with an additional undefined value, which is meant
+// to be used in the context of user input to distinguish between a
+// user-specified value and a default value.
+type OptionalBool byte
+
+const (
+ // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written.
+ OptionalBoolUndefined OptionalBool = iota
+ // OptionalBoolTrue represents the boolean true.
+ OptionalBoolTrue
+ // OptionalBoolFalse represents the boolean false.
+ OptionalBoolFalse
+)
+
+// NewOptionalBool converts the input bool into either OptionalBoolTrue or
+// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
+func NewOptionalBool(b bool) OptionalBool {
+ o := OptionalBoolFalse
+ if b {
+ o = OptionalBoolTrue
+ }
+ return o
+}
+
+// ShortNameMode defines the mode of short-name resolution.
+//
+// The use of unqualified-search registries entails an ambiguity as it's
+// unclear from which registry a given image, referenced by a short name, may
+// be pulled from.
+//
+// The ShortNameMode type defines how short names should resolve.
+type ShortNameMode int
+
+const (
+ ShortNameModeInvalid ShortNameMode = iota
+ // Use all configured unqualified-search registries without prompting
+ // the user.
+ ShortNameModeDisabled
+ // If stdout and stdin are a TTY, prompt the user to select a configured
+ // unqualified-search registry. Otherwise, use all configured
+ // unqualified-search registries.
+ //
+ // Note that if only one unqualified-search registry is set, it will be
+ // used without prompting.
+ ShortNameModePermissive
+ // Always prompt the user to select a configured unqualified-search
+ // registry. Throw an error if stdout or stdin is not a TTY as
+ // prompting isn't possible.
+ //
+ // Note that if only one unqualified-search registry is set, it will be
+ // used without prompting.
+ ShortNameModeEnforcing
+)
+
+// SystemContext allows parameterizing access to implicitly-accessed resources,
+// like configuration files in /etc and users' login state in their home directory.
+// Various components can share the same field only if their semantics is exactly
+// the same; if in doubt, add a new field.
+// It is always OK to pass nil instead of a SystemContext.
+type SystemContext struct {
+ // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/).
+ // Not used for any of the more specific path overrides available in this struct.
+ // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it).
+ // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths .
+ // and there is no need to worry about the environment.)
+ // NOTE: This does NOT affect paths starting by $HOME.
+ RootForImplicitAbsolutePaths string
+
+ // === Global configuration overrides ===
+ // If not "", overrides the system's default path for signature.Policy configuration.
+ SignaturePolicyPath string
+ // If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
+ RegistriesDirPath string
+ // Path to the system-wide registries configuration file
+ SystemRegistriesConfPath string
+ // Path to the system-wide registries configuration directory
+ SystemRegistriesConfDirPath string
+ // Path to the user-specific short-names configuration file
+ UserShortNameAliasConfPath string
+ // If set, short-name resolution in pkg/shortnames must follow the specified mode
+ ShortNameMode *ShortNameMode
+ // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and
+ // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce
+ // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
+ // specific context.
+ PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
+ // If not "", overrides the default path for the registry authentication file, but only new format files
+ AuthFilePath string
+ // if not "", overrides the default path for the registry authentication file, but with the legacy format;
+ // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
+ // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
+ // in locations other than the home dir; openshift components should then set this field in those cases;
+ // this field is ignored if `AuthFilePath` is set (we favor the newer format);
+ // only reading of this data is supported;
+ LegacyFormatAuthFilePath string
+ // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed.
+ // This must not be set if AuthFilePath is set.
+ // Only credentials and credential helpers in this file apre processed, not any other configuration in this file.
+ DockerCompatAuthFilePath string
+ // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
+ ArchitectureChoice string
+ // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
+ OSChoice string
+ // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match.
+ VariantChoice string
+ // If not "", overrides the system's default directory containing a blob info cache.
+ BlobInfoCacheDir string
+ // Additional tags when creating or copying a docker-archive.
+ DockerArchiveAdditionalTags []reference.NamedTagged
+ // If not "", overrides the temporary directory to use for storing big files
+ BigFilesTemporaryDir string
+
+ // === OCI.Transport overrides ===
+ // If not "", a directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client certificate key
+ // (ending with ".key") used when downloading OCI image layers.
+ OCICertPath string
+ // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ OCIInsecureSkipTLSVerify bool
+ // If not "", use a shared directory for storing blobs rather than within OCI layouts
+ OCISharedBlobDirPath string
+ // Allow UnCompress image layer for OCI image layer
+ OCIAcceptUncompressedLayers bool
+
+ // === docker.Transport overrides ===
+ // If not "", a directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client certificate key
+ // (ending with ".key") used when talking to a container registry.
+ DockerCertPath string
+ // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
+ // Ignored if DockerCertPath is non-empty.
+ DockerPerHostCertDirPath string
+ // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ DockerInsecureSkipTLSVerify OptionalBool
+ // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
+ // Ignored if DockerBearerRegistryToken is non-empty.
+ DockerAuthConfig *DockerAuthConfig
+ // if not "", the library uses this registry token to authenticate to the registry
+ DockerBearerRegistryToken string
+ // if not "", an User-Agent header is added to each request when contacting a registry.
+ DockerRegistryUserAgent string
+ // if true, a V1 ping attempt isn't done to give users a better error. Default is false.
+ // Note that this field is used mainly to integrate containers/image into projectatomic/docker
+ // in order to not break any existing docker's integration tests.
+ DockerDisableV1Ping bool
+ // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
+ DockerDisableDestSchema1MIMETypes bool
+ // If true, the physical pull source of docker transport images logged as info level
+ DockerLogMirrorChoice bool
+ // Directory to use for OSTree temporary files
+ OSTreeTmpDirPath string
+ // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
+ // Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
+ // when the digest for a blob is unknown.
+ DockerRegistryPushPrecomputeDigests bool
+
+ // === docker/daemon.Transport overrides ===
+ // A directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client certificate key
+ // (ending with ".key") used when talking to a Docker daemon.
+ DockerDaemonCertPath string
+ // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed.
+ DockerDaemonHost string
+ // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well.
+ DockerDaemonInsecureSkipTLSVerify bool
+
+ // === dir.Transport overrides ===
+ // DirForceCompress compresses the image layers if set to true
+ DirForceCompress bool
+ // DirForceDecompress decompresses the image layers if set to true
+ DirForceDecompress bool
+
+ // CompressionFormat is the format to use for the compression of the blobs
+ CompressionFormat *compression.Algorithm
+ // CompressionLevel specifies what compression level is used
+ CompressionLevel *int
+}
+
+// ProgressEvent is the type of events a progress reader can produce
+// Warning: new event types may be added any time.
+type ProgressEvent uint
+
+const (
+ // ProgressEventNewArtifact will be fired on progress reader setup
+ ProgressEventNewArtifact ProgressEvent = iota
+
+ // ProgressEventRead indicates that the artifact download is currently in
+ // progress
+ ProgressEventRead
+
+ // ProgressEventDone is fired when the data transfer has been finished for
+ // the specific artifact
+ ProgressEventDone
+
+ // ProgressEventSkipped is fired when the artifact has been skipped because
+ // its already available at the destination
+ ProgressEventSkipped
+)
+
+// ProgressProperties is used to pass information from the copy code to a monitor which
+// can use the real-time information to produce output or react to changes.
+type ProgressProperties struct {
+ // The event indicating what
+ Event ProgressEvent
+
+ // The artifact which has been updated in this interval
+ Artifact BlobInfo
+
+ // The currently downloaded size in bytes
+ // Increases from 0 to the final Artifact size
+ Offset uint64
+
+ // The additional offset which has been downloaded inside the last update
+ // interval. Will be reset after each ProgressEventRead event.
+ OffsetUpdate uint64
+}
diff --git a/version/version.go b/version/version.go
new file mode 100644
index 0000000..b24ee88
--- /dev/null
+++ b/version/version.go
@@ -0,0 +1,18 @@
+package version
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 5
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 29
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 2
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = ""
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)